code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Crime Analysis
# ## Team 6
# ### <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# # Introduction
# In this notebook, we explore several components of crime, touching upon questions of both personal and business safety. Our analysis is divided into questions relevant to individuals, questions relevant to the police force, and questions relevant to business. Of course, all crime-related questions may be relevant to all groups.
#
# Our personal safety questions are:
# - What factors contribute to the number of crimes that occur on a given day?
# - Do some areas have more crime than others?
# - Given that there has been a crime, what factors contribute to whether or not a shooting occurs with the crime?
#
# Our enforcement question is:
# - Can crimes be classified by neighborhood, time of day, or time of year?
#
# Our business question is:
# - In each of Boston's neighborhoods, which streets are most likely to see crimes of paricular concern to businesses, such as burglary and vandalism?
#
# For each question, we follow a consistent pattern. We begin with data wrangling and exploratory data analysis (EDA). In most cases, we proceed to apply a statistical model. Finally, in some cases, we evaluate the model with residual charts or confusion matrices.
# # Part I: Load and clean the data
# Load modules, apply settings
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
import statsmodels.api as sm
import requests
import json
from statsmodels.formula.api import ols
from statsmodels.formula.api import logit
import datetime
import calendar
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from math import sqrt
# %matplotlib inline
mpl.style.use('fivethirtyeight')
pd.options.mode.chained_assignment = None
# Load the primary crime data
base_url = 'https://raw.githubusercontent.com/aisaacso/SafeBoss/master/'
crime_url = base_url + 'Crime_Incident_Reports.csv'
crime = pd.read_csv(crime_url, low_memory = False)
# Create column that is guaranteed to have no NANs, for pivot table counts throughout the notebook
crime['indexer'] = 1
# +
# Date clean-up
# Converts FROMDATE from str to datetime
crime['FROMDATE'] = pd.to_datetime(crime.FROMDATE, format = '%m/%d/%Y %I:%M:%S %p')
# Original range is Jul-2012 to Aug-2015; because in some cases we analyze crime counts, exclude first month of data
crime = crime[crime.FROMDATE > '2012-08-10 00:00:00']
#Add a date column
crime['Date'] = crime.FROMDATE.dt.date
# +
# Convert police district codes to neighborhoods
crime = crime[crime.REPTDISTRICT.notnull()]
crime = crime[crime.REPTDISTRICT <> 'HTU']
def get_neighborhood(d):
if d=='A1': return 'Downtown'
elif d=='A15': return 'Charlestown'
elif d=='A7': return 'EastBoston'
elif d=='B2': return 'Roxbury'
elif d=='B3': return 'Mattapan'
elif d=='C6': return 'SouthBoston'
elif d=='C11': return 'Dorchester'
elif d=='D4': return 'SouthEnd'
elif d=='D14': return 'Brighton'
elif d=='E5': return 'WestRoxbury'
elif d=='E13': return 'JamaicaPlain'
elif d=='E18': return 'HydePark'
else: return '???'
crime['Neighborhood'] = crime['REPTDISTRICT'].map(get_neighborhood)
# -
# Load in weather data
weather_url = base_url + 'weather.csv' # From http://www.ncdc.noaa.gov/cdo-web/datasets
weather = pd.read_csv(weather_url)
# +
# Prepare weather data for adding to crime data
# Include only Boston Logan weather station (has most consistent data)
weather = weather[weather.STATION == 'GHCND:USW00014739']
#Match date format to crime dataset's date format
weather['Date'] = pd.to_datetime(weather.DATE, format = '%Y%m%d')
weather['Date'] = weather.Date.dt.date
# Add temp categories
median = int(weather.TMAX.median())
lower = weather.TMAX.quantile(q = 0.25).astype(int)
upper = weather.TMAX.quantile(q = 0.75).astype(int)
def tmax_groups(t):
if t<=lower: return 'Cold'
elif (t>lower and t<=median): return 'SortaCold'
elif (t>median and t<=upper): return 'SortaHot'
else: return 'Hot'
def prcp_groups(p):
if p > 0: return 1
else: return 0
weather['TempGroups'] = weather['TMAX'].map(tmax_groups)
weather['Precip_Bool'] = weather['PRCP'].map(prcp_groups)
# -
# # Part II : Personal Safety
# In this section, we analyze three questions:
# - What factors contribute to the number of crimes that occur on a given day?
# - Do some areas have more crime than others?
# - Given that there has been a crime, what factors contribute to whether or not a shooting occurs with the crime?
#
# We expect that these questions will be especially relevant to Boston residents who wish to be aware of their personal risks of facing crime in the city.
# ## Part II Question 1: What factors contribute to crime per day?
# EDA for seasonal variation
dates = pd.pivot_table(crime, values = ['indexer'], index = ['Date', 'Month'], aggfunc = 'count')
dates.rename(columns={'indexer': 'CrimeCount'}, inplace=True) #Rename for more logical referencing
min_crimes = dates.CrimeCount.min()
dates = dates[dates.CrimeCount != min_crimes] # Removes an outlier in Aug, 2105
dates.plot(xticks = None, title = 'Crimes per day varies by season')
# +
# EDA for season
def season_groups(m):
if m in [12, 1, 2]: return 'Winter'
elif m in [3, 4, 5]: return 'Spring'
elif m in [6, 7, 8]: return 'Summer'
else: return 'Fall'
dates = pd.DataFrame(dates)
dates['Month'] = dates.index.get_level_values(1)
dates['Season'] = dates['Month'].map(season_groups)
seasonal = pd.pivot_table(dates, index = 'Season', values = 'CrimeCount', aggfunc = 'sum')
seasonal.plot(kind = 'bar')
# -
# EDA for month
months = pd.pivot_table(dates, index = 'Month', values = 'CrimeCount', aggfunc = 'sum')
months.plot(kind = 'bar')
# +
# EDA for temp
dates['Date'] = dates.index.get_level_values(0)
add_weather = pd.merge(dates, weather, how = 'inner', on = 'Date') # inner join excludes 10 dates
add_weather.plot(kind = 'scatter', x = 'CrimeCount', y = 'TMAX', title = 'Crime increases with temp')
# -
# EDA for precipitation
add_weather['Raining_or_Snowing?'] = add_weather['Precip_Bool'].map({0:'No', 1:'Yes'})
crime_precip = pd.pivot_table(add_weather, index = 'Raining_or_Snowing?', values = 'CrimeCount', aggfunc = 'count')
crime_precip.plot(kind = 'bar', title = 'Fewer crimes when raining or snowing')
# +
#EDA for day of the week
def get_week_day(d):
daynum = d.weekday()
days = ['Mon','Tues','Wed','Thurs','Fri','Sat','Sun']
return days[daynum]
add_weather['DayWeek'] = add_weather['Date'].map(get_week_day)
weekdays = pd.pivot_table(add_weather, index = 'DayWeek', values = 'CrimeCount', aggfunc = 'sum')
weekdays.plot(kind = 'bar')
# -
# Build model
# Removed variables: Spring
season_dummies = pd.get_dummies(add_weather['Season']).iloc[:, 1:]
day_dummies = pd.get_dummies(add_weather['DayWeek']).iloc[:, 1:]
temp_dummies = pd.get_dummies(add_weather['TempGroups']).iloc[:, 1:]
dates_dummy_df = add_weather.join([day_dummies, season_dummies, temp_dummies])
train, test = train_test_split(dates_dummy_df, test_size = 0.2)
perday_model = ols(data=train, formula='CrimeCount ~ Summer + Winter + Hot + SortaCold + SortaHot +\
Mon + Sat + Sun + Thurs + Tues + Wed + Precip_Bool')
perday_result = perday_model.fit()
perday_result.summary()
# +
# Analyze the model
residuals = perday_result.resid
fig = sns.distplot(residuals)
# -
# Create prediction for test data
test['Prediction'] = perday_result.predict(test)
# Plot the prediction against the actual
test.plot(kind = 'scatter', x='CrimeCount', y = 'Prediction')
# Linear regression on correlation between prediction and actual
model_test = ols(data=test, formula = 'Prediction ~ CrimeCount')
test_result = model_test.fit()
# Checking residuals of test regression
test_resid = test_result.resid
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,5))
sns.distplot(test_resid, ax=axes[0]);
sm.qqplot(test_resid, fit=True, line='s', ax=axes[1]);
# # Part II Question 2: Do some areas have more crimes than others?
# First, we examine crime overall.
#
# Null hypothesis: Numbers of crime per day do not differ by neighborhood.
#EDA for crime overall
reptd = pd.pivot_table(crime, values = 'indexer', index = 'Neighborhood', aggfunc = 'count')
reptd.plot(kind = 'bar', sort_columns = True, title = 'Crime totals', figsize = (8,7))
# +
#Hypothesis testing
# Set up dataframes
date_by_neigh = pd.pivot_table(crime, index = 'Date', columns = 'Neighborhood', \
values = 'indexer', aggfunc = 'count')
date_by_neigh = date_by_neigh[date_by_neigh.SouthEnd != 167] # removes southbos outlier
date_by_neigh_melt = pd.melt(date_by_neigh).dropna()
# Pop standard deviation
pop_sd = date_by_neigh_melt.std()
# Pop average
pop_avg = date_by_neigh_melt.mean()
# Sample size
sample_size = len(date_by_neigh) # All neighborhoods have the same number of entries +/- 3
# Standard error
st_err = pop_sd / sqrt(sample_size)
date_by_neigh_p = pd.DataFrame(date_by_neigh.mean())
date_by_neigh_p['mean'] = date_by_neigh_p.loc[:,0]
date_by_neigh_p['zscore'] = (date_by_neigh.mean() - pop_avg[0])/st_err[0]
date_by_neigh_p['pscore'] = stats.norm.sf(abs((date_by_neigh_p['zscore'])))
print 'Population average crimes per day: ', pop_avg[0]
date_by_neigh_p
# -
# Null hypothesis is rejected.
# Next, we examine crime per capita. The chart below shows that the distribution of crime is very different when examined on a per capita basis.
# load in pop file
pop_url = base_url + 'pop.csv' # From our own web research
pop_df = pd.read_csv(pop_url)
# +
# EDA for crime per capita
reptd = pd.DataFrame(reptd)
reptd.rename(columns={'indexer': 'CrimeCount'}, inplace=True) #Rename for more logical referencing
reptd['Neighborhood'] = reptd.index.get_level_values(0)
add_pop = pd.merge(reptd, pop_df, how = 'inner', on = 'Neighborhood')
add_pop['percapita'] = add_pop.CrimeCount / add_pop.Population
add_pop.plot(kind = 'bar', x='Neighborhood', y = 'percapita', sort_columns = True, title = 'Crime per capita', figsize = (8,7))
# -
# # Part II Question 3: Given a crime, what factors contribute to crime including a shooting?
# Dummy for shooting
crime['Shoot_Status']=crime['Shooting'].map({'No':0,'Yes':1}).astype(int)
# EDA for day of the week
shoot = crime[crime.Shoot_Status==1]
days = pd.pivot_table(shoot, values = 'indexer', index = 'DAY_WEEK', aggfunc = 'count')
days.plot(kind = 'bar')
# EDA for month
months = pd.pivot_table(shoot, values = 'indexer', index = 'Month', aggfunc = 'count')
months.plot(kind = 'bar')
# EDA for weather
weather_shoot = pd.merge(shoot, weather, how = 'inner', on = 'Date')
temps = pd.pivot_table(weather_shoot, index = 'TempGroups', values = 'indexer', aggfunc = 'count')
temps.plot(kind = 'bar')
# Add in weather data to crime dataset
crime_weather = pd.merge(crime, weather, how = 'outer', on = 'Date')
# +
#Add a column for the month name (regression can't handle numbers as col names)
def mo_as_name(mo):
return calendar.month_name[mo]
crime_weather['MoName'] = crime_weather['Month'].map(mo_as_name)
# -
# Data prep
week_dummies = pd.get_dummies(crime_weather['DAY_WEEK']).iloc[:, 1:]
month_dummies = pd.get_dummies(crime_weather['MoName']).iloc[:, 1:]
neigh_dummies = pd.get_dummies(crime_weather['Neighborhood']).iloc[:,1:]
temp_dummies = pd.get_dummies(crime_weather['TempGroups']).iloc[:,1:]
crtype_dummies = pd.get_dummies(crime_weather['MAIN_CRIMECODE']).iloc[:,1:]
shoot_df = crime_weather.join([week_dummies, month_dummies, neigh_dummies, temp_dummies, crtype_dummies])
# +
#Regression
# Removed variables: + July + December + August + Downtown + Precip_Bool + Thursday + November + WestRoxbury + SortaCold
# # + October + March + June + May + Wednesday + Tuesday
train, test = train_test_split(shoot_df, test_size = 0.2)
model_logistic = logit(data=train, formula='Shoot_Status ~ Monday + Sunday + February + Hot + SortaHot \
+ SouthEnd + Roxbury + HydePark + JamaicaPlain + SouthBoston + Dorchester + Mattapan + EastBoston + Charlestown'
)
result_logistic = model_logistic.fit()
# Function for analyzing p_values; used for removing p values one by one
def analyze_p(res):
p = res.pvalues
p.sort_values(ascending = False, inplace = True)
print res.prsquared
print p
#analyze_p(result_logistic)
result_logistic.summary()
# -
residuals = result_logistic.resid_dev
fig = sns.distplot(residuals)
# ## Part II Conclusion
# Of these three questions, the results of Question 1 are most promising. The adjusted r-squared is high enough to make the model worthwhile, several of the variables show sufficiently low p-values, and the resideuals are evenly distributed. The other models need further refinement in order to provide meaningful insights.
#
# Our analysis of Question 1 reveals that, holding all else constant, summer, winter, temperature level, precipitation, and day of the week all have a statistically significant correlation with the number of crimes on a given day. Notably, when all these other factors are held constant, there are between 43 and 54 fewer crimes on Sundays. Hot days have between 27 and 40 more crimes, again holding all else constant. These figures all use a 95% confidence interval.
# # Part III: Enforcement
# In this section, we analyze whether crimes be classified by neighborhood, time of day, or time of year. We imagine that these questions could be particularly relevant to Boston's police department, as they prepare for the city's varying enforcement needs. In this section, we examine only the ten most common crime types.
# ## Part III Question 1: Do certain kinds of common crimes happen in certain neighborhoods?
# +
# Data wrangling
# Find the most common crime types
cr_counts = pd.DataFrame(pd.pivot_table(crime, index = 'MAIN_CRIMECODE', values = 'DAY_WEEK', aggfunc = 'count'))
cr_counts.sort_values('DAY_WEEK', ascending = False, inplace = True)
cr_counts = cr_counts.head(10)
top_crimes = cr_counts.index.tolist()
# Prep the data
districts = neigh_dummies.columns.tolist()
dist_cols = ['Neighborhood'] + top_crimes
neigh_classes = shoot_df[dist_cols].dropna()
# -
# EDA
districts_crimes = pd.pivot_table(neigh_classes, index = 'Neighborhood', values = top_crimes, aggfunc = 'sum')
districts_crimes.plot(kind = 'bar')
# Build the model
model_distcr = KMeans( n_clusters = len(districts))
model_distcr = model_distcr.fit(neigh_classes.iloc[:,1:])
neigh_classes['kmeans_class'] = model_distcr.labels_
# +
# Analyze the model
#Plot the classification
plt.figure(figsize=(7,6))
sns.stripplot(x='Neighborhood', y='kmeans_class', data=neigh_classes, jitter= True)
#Confusion matrix
pd.pivot_table(neigh_classes, index='Neighborhood', columns = 'kmeans_class', values = '11xx', aggfunc = 'count')
# -
# ## Part III Question 2: Do certain kinds of common crimes happen at certain times of day?
# +
# Data wrangling
# Adds Hour column for each crime
crime['Hour'] = crime.FROMDATE.dt.hour
# Removes the preponderance of rows for which time is 00:00:00 00:00)
crime_no_time = crime[(crime.FROMDATE.dt.hour == 0) & (crime.FROMDATE.dt.minute == 0)]
crime_no_time['no_time'] = 'indicator'
crime_time = crime.merge(crime_no_time, how='left')
crime_time = crime_time[crime_time.no_time <> 'indicator']
def time_groups(t):
if t in [0,1,2,3,4,23]: return "Night"
elif t in [5,6,7,8,9,10]: return "Morning"
elif t in [11,12,13,14,15,16]: return "Midday"
else: return "Evening"
periods = crime_time.join(crtype_dummies)
periods['timegroup'] = periods['Hour'].map(time_groups)
time_cols = ['timegroup', 'Hour'] + top_crimes
periods_classes = periods[time_cols].dropna()
# -
# EDA
hours_crimes = pd.pivot_table(periods_classes, index = 'Hour', values = top_crimes, aggfunc = 'sum')
hours_crimes.plot(kind = 'line')
# Build the model
model_periocr = KMeans( n_clusters = 4)
model_periocr = model_periocr.fit(periods_classes.iloc[:,2:])
periods_classes['kmeans_class'] = model_periocr.labels_
# +
# Analyze the model
#Plot the classification
plt.figure(figsize=(7,6))
sns.stripplot(x='timegroup', y='kmeans_class', data=periods_classes, jitter= True)
#Confusion matrix
pd.pivot_table(periods_classes, index='timegroup', columns = 'kmeans_class', values = '11xx', aggfunc = 'count')
# -
# ## Part III Question 3: Do certain types of crimes happen at certain times of year?
# Data wrangling
shoot_df['Season'] = shoot_df['Month'].map(season_groups)
seas_cols = ['Season'] + top_crimes
seasons_classes = shoot_df[seas_cols].dropna()
# EDA
seasons_crimes = pd.pivot_table(seasons_classes, index = 'Season', values = top_crimes, aggfunc = 'sum')
seasons_crimes.plot(kind = 'line')
# +
# Build the model
model_seascr = KMeans( n_clusters = 4)
model_seascr = model_seascr.fit(seasons_classes.iloc[:,2:])
seasons_classes['kmeans_class'] = model_seascr.labels_
# +
# Analyze the model
#Plot the classification
plt.figure(figsize=(7,6))
sns.stripplot(x='Season', y='kmeans_class', data=seasons_classes, jitter= True)
#Confusion matrix
pd.pivot_table(seasons_classes, index='Season', columns = 'kmeans_class', values = '11xx', aggfunc = 'count')
# -
# ## Part III Conclusion
# In this section, we attempted to classify crimes by a variety of metrics. None of these variables were effective classifiers for crime. However, as shown here, this model could be used across a number of variables to find crime categories. Again, we believe that, with further refinement, this model could be useful to law enforcement.
# # Part IV: Crimes of Concern to Business-Owners
# In this section, we list crimes that may be of particular concern to business owners, and we show which streets have the highest occurances of these crimes in each of Boston's neighborhoods. We imagine that entrepreneurs could examine this data when siting new businesses.
# +
# create a list of business crimes
business_crimes = ['COMMERCIAL BURGLARY', 'VANDALISM', 'ROBBERY', 'OTHER LARCENY', 'BurgTools', 'ARSON', 'Larceny'\
'Other Burglary', 'PROSTITUTION CHARGES', 'PubDrink']
# Classify crimes based on whether or not they are business-relevant crimes
def is_bus_cr(c):
if c in business_crimes:
return 1
else:
return 0
crime['BusCr'] = crime['INCIDENT_TYPE_DESCRIPTION'].map(is_bus_cr)
dists = crime['Neighborhood'].unique().tolist()
# Create a chart of the top five streets in each district in Boston
for d in dists:
var = crime.loc[crime.Neighborhood == d]
streets = pd.DataFrame(pd.pivot_table(var, values = 'BusCr', index = 'STREETNAME', aggfunc = 'sum'))
streets.sort_values('BusCr', ascending = False, inplace = True)
top_five = streets.head(5)
top_five.plot(kind = 'bar', title = d)
print
# -
# # Conclusion
# In this notebook, we analyzed Boston's crime data from a variety of perspectives. We examined questions relevant to individuals, police officers, and business owners. These models and this approach could be extended to explore questions relevant to other groups of stakeholders, including youth, the elderly, minorities, and city administration and leadership. Each group has its own interests and questions with respect to crime. Other data, such as economic data, unemployment data, and demographic could also be incorporated into our models to provide further crime-related insights.
|
crimes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %config Application.log_level="DEBUG"
from structure_comp.remove_duplicates import RemoveDuplicates
rd = RemoveDuplicates.from_folder(
'/Users/kevinmaikjablonka/Downloads/CURATED-COFs-master/cifs/', try_supercell=True)
# + jupyter={"outputs_hidden": true}
rd.run_filtering()
# -
rd.duplicates
rd.inspect_duplicates()
|
examples/cofs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ANB3P8Einwt2"
# # Shape Gen Graph Generative Models
# + [markdown] id="plUqwYF5oAEW"
# ## Enviroments
# + id="JMG2HDDft2f6"
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
# + [markdown] id="-YxZ0wKxeezq"
# ### Tensorboard
# + [markdown] id="udRQvHQte6gc"
# ## Tensorboard
#
# First we need install a tensorboard, so we can track a progress and generation.
#
# + id="8xTQvFTVeizq"
#docker run -it -p 8888:8888 -p 6006:6006 \
#tensorflow/tensorflow:nightly-py3-jupyter
# + [markdown] id="jRifOdD4ekNb"
# ### Git
#
# The shape gen abstract model and trainer, please git clone a repo. When you done expand folder structure, it container all source code.
# + colab={"base_uri": "https://localhost:8080/"} id="5f0SoVMboFNb" outputId="cf396c56-603e-47d5-d091-ca06bb7e9d8d"
# !git clone https://github.com/spyroot/shapenet
# %cd shapenet
# !ls
# + id="Bp20QOqBoMXe"
# !apt-get update
# !apt install software-properties-common
# !apt-get upgrade
# !apt install -y build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev wget libbz2-dev
# !pip3 install --upgrade pip
# !apt install python3.8
# !apt install python3-pip
# !apt-get install -y python3-dev
# !apt-get install -y python3.8-dev
# !apt-get install -y libpython3.8-dev
# !pip3 install docutils
# !pip3 install pygments
# !pip3 install twine
# !pip3 install wheel
# !pip3 install pyemd
# !pip3 install rich
# !pip3 install python-louvain
# + id="CdxpQfd_9I3M"
# !update-alternatives --install /usr/bin/python3 python /usr/bin/python3.8 1
# !update-alternatives --list python3
# !sudo update-alternatives --config python3
# !apt install python3-pip
# !python -m pip install --upgrade pip
# !python3 -m pip install --upgrade pip
# !python --version
# !pip3 --version
# + id="_Me-woxJ4ER8"
# !pip3 install networkx==2.6.3
# !pip3 install zict==2.0.0
# !pip3 install pyemd
# !pip3 install yaml
# !pip3 install pandas~=1.3.4
# !pip3 install tqdm~=4.62.3
# !pip3 install pydot
# !pip3 install pyvista==0.32.1
# !pip3 install pyviz-comms==2.1.0
# !pip3 install fvcore
# !pip3 install pickleshare
# !pip3 install protobuf
# !pip3 install vtk
# !pip3 install PyYAML
# !pip3 install torch==1.10.0 -f https://data.pyg.org/whl/torch-1.10.0+cu112.html
# !pip3 install torchaudio==0.10.0 -f https://data.pyg.org/whl/torch-1.10.0+cu112.html
# !pip3 install torchvision==0.11.1 -f https://data.pyg.org/whl/torch-1.10.0+cu112.html
# !pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu112.html
# !pip3 install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu112.html
# !pip3 install torch-geometric
# !pip3 install tensorboard
# !pip3 matplotlib==3.4.3
# !pip3 matplotlib-inline==0.1.3
# !pip3 install scikit-learn
# !pip3 install scipy
# !pip3 install scooby
# !pip3 install seaborn
# !pip3 install pyyaml
# !pip3 install networkx==2.6.3
# !pip3 install zict==2.0.0
# !pip3 install pyemd
# !pip3 install yaml
# !pip3 install pyyaml
# !pip3 install pandas
# !pip3 install tqdm~=4.62.3
# !pip3 install pydot
# !pip3 install pyvista==0.32.1
# !pip3 install pyviz-comms==2.1.0
# !pip3 install fvcore
# !pip3 install pickleshare
# !pip3 install protobuf
# !pip3 install vtk
# !pip3 install PyYAML
# !pip3 install torch-geometric
# !pip3 matplotlib
# !pip3 matplotlib-inline
# !pip3 install scikit-learn
# !pip3 install scipy
# !pip3 install scooby
# !pip3 install seaborn
# !python -c "import torch; print(torch.version.cuda)"
# widges fix
# !pip3 install tqdm
# !pip3 install ipywidgets
# #!jupyter nbextension enable --py widgetsnbextension
# #!jupyter labextension install @jupyter-widgets/jupyterlab-manager
# + [markdown] id="<KEY>"
# ### GPU
#
# We really need GPU to train this models
# + id="TuTM56BtpRMi" colab={"base_uri": "https://localhost:8080/"} outputId="1b1d57f5-e628-4bef-b686-1c19b227be93"
# gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
# !nvcc --version
# !python -c "import torch; print(torch.version.cuda)"
# !python3 -c "import torch; print(torch.version.cuda)"
# + [markdown] id="mbRJx17FdO6x"
# ### Graph Encoder and Decoder
#
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="10pnpc53ngXa" outputId="48f75fc9-3836-4cd2-a25b-dd36fb8abc0f"
import networkx as nx
import numpy as np
import torch.utils as tutil
np.random.seed(1234)
random_graph = nx.erdos_renyi_graph(10, 0.4)
original_graph = nx.to_numpy_array(random_graph)
nx.draw(random_graph, with_labels=True)
original_graph
# + [markdown] id="hApMvHs6ngXd"
# ## Encoder
#
# Let create test encoder and encoder our random graph first.
# + pycharm={"name": "#%%\n"} colab={"base_uri": "https://localhost:8080/"} id="-285-Lm2ngXe" outputId="e01c122a-bae5-4b01-a34c-80b582aa91e7"
from shapegnet.models.adjacency_encoder import AdjacencyEncoder
encoder = AdjacencyEncoder()
encoded_adj = encoder.encode(original_graph.copy(), 10)
encoded_adj
# + [markdown] id="H-SzzmA5ngXe"
# ## Decoder
#
# Now let's decode our graph back and visualize it and verify
# that we don't have any strange issues.
# + pycharm={"name": "#%%\n"} id="x1NHeelgngXf" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="03d5a261-8306-4935-885f-c0b5e90129c8"
from shapegnet.models.adjacency_decoder import AdjacencyDecoder
decoder = AdjacencyDecoder()
decoded = decoder.decode(encoded_adj)
decoded
decoded_graph = nx.from_numpy_array(decoded, create_using=nx.MultiGraph)
nx.draw(decoded_graph, with_labels=True)
# + [markdown] id="k_B6Ge_qngXf"
# ## Metric Computation
# Now let compute metric between two graph in simular way as it describe in GraphRNN
# and NetGAN paper
# + pycharm={"name": "#%%\n"} id="LJtj0BTrngXg" colab={"base_uri": "https://localhost:8080/", "height": 384} outputId="71d1ac4a-f760-497a-9794-6c14952f57ae"
from shapegnet.external.stats import degree_stats, clustering_stats, orbit_stats_all
mmd_degree = degree_stats([random_graph], [decoded_graph])
mmd_degree
# This two computation are heavy and I ported C++ code but for now it suffice
# to understand we want compute degree , clustering and orbits between graphs.
# we will use it later to compute metrics.
# mmd_clustering = clustering_stats(original_graph, [decoded_graph])
# mmd_clustering
# mmd_4orbits = orbit_stats_all(original_graph, [decoded_graph])
# mmd_clustering
#
# + [markdown] id="sZKPBUhxngXg"
# ## Graph Rnn BFS Ordering.
#
# - In case of Graph RNN we need compute BFS order.
# - First let create sample graph lader topology and
#
# Note there are many generators we can use. Please check for details
# https://networkx.org/documentation/stable/reference/generators.html
#
# For this step we only need small graph to undestand how we compute
# BFS order at each step.
# + pycharm={"name": "#%%\n"} colab={"base_uri": "https://localhost:8080/", "height": 1000} id="jc6SeWtEngXh" outputId="e7305904-24e1-4b0d-958e-e9c023a1aa5c"
from queue import LifoQueue as stack
from queue import SimpleQueue as queue
import networkx as nx
import pylab as plt
from IPython.core.display import HTML, display
# import pygraphviz
# from networkx.drawing.nx_agraph import graphviz_layout
def bfs_seq(input_graph, start_id):
"""
Get a bfs node sequence.
:param input_graph:
:param start_id:
:return:
"""
dictionary = dict(nx.bfs_successors(input_graph, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
frontier = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
frontier = frontier + neighbor
output = output + frontier
start = frontier
return output
def gc(qe):
if not qe.empty():
while not qe.empty():
qe.get()
def bdfs(graph : nx.classes.graph.Graph, start, goal, search='dfs'):
"""
This is a template. Taking fringe = stack() gives DFS and
fringe = queue() gives BFS. We need to add a priority function to get UCS.
Usage: bp = bdfs(graph, start, goal, queue_or_stack = stack()) (this is dfs)
bp = bdfs(graph, start, goal, queue_or_stack = queue()) (this is bfs)
"""
depth = {}
if search == 'dfs':
queue_or_stack = stack()
weight = -1
else:
queue_or_stack = queue()
weight = 1
gc(queue_or_stack)
current = start
closed = set()
back_pointer = {}
depth[start] = 0
queue_or_stack.put(current)
while True:
if queue_or_stack.empty():
return None
while True:
current = queue_or_stack.get()
if current not in closed:
break
if queue_or_stack.empty():
return None
closed.add(current)
if current == goal:
return back_pointer
if graph[current]:
for node in graph[current]:
if node not in closed:
node_depth = depth[current] + weight
if node not in depth or node_depth < depth[node]:
back_pointer[node] = current
depth[node] = node_depth
queue_or_stack.put(node)
def dfs(graph : nx.classes.graph.Graph, start, goal):
return bdfs(graph, start, goal, search='dfs')
def bfs(graph : nx.classes.graph.Graph, start, goal):
return bdfs(graph, start, goal, search='bfs')
def get_gr(digraph=True):
if digraph:
return nx.DiGraph()
else:
return nx.Graph()
def adj2graph(graph : nx.classes.graph.Graph, digraph=True):
"""
for list representation of adj
"""
gr = get_gr(digraph=digraph)
for node in graph:
gr.add_node(node)
if graph[node]:
for adj in graph[node]:
gr.add_edge(node, adj)
gr[node][adj]['weight'] = graph[node][adj]
return gr
def edges_color(graph : nx.classes.graph.Graph, bfs_edge):
"""
Return edge color and edge weight for a bfs path
"""
edge_col = ['purple' if e in bfs_edge else 'blue' if e in bfs_edge else 'orange' for e in graph.edges()]
edge_width = [3 if e in bfs_edge else 1 for e in graph.edges()]
return edge_col, edge_width
def show_graph(graph : nx.classes.graph.Graph, start: int, goal: int, node_labels='default',
node_pos='neato', plot_size=(14, 14), file_name=None, is_digraph=True):
"""
node_labels label to use: 'default', 'none', or a list of labels to use.
file_name - a file nama 'my_graph.png'
"""
fig, ax = plt.subplots(figsize=plot_size)
Gr = g
if node_pos == 'project_layout':
node_pos = dict(zip(Gr.nodes(), [(b, 9 - a) for a, b in Gr.nodes()]))
else:
node_pos = nx.nx_pydot.graphviz_layout(Gr, prog=node_pos, root=start)
edge_weight = nx.get_edge_attributes(Gr, 'weight')
def path_edges(_path):
"""
@param _path:
@return:
"""
edges = list(zip(_path[:-1], _path[1:]))
# print(type(Gr[z[0]][z[1])
# cost = sum([Gr[z[0]][z[1]]['weight'] for z in edges])
if not is_digraph:
edges += list(zip(_path[1:], _path[:-1]))
return edges, 1
bfs_path = getPath(bdfs(graph, start, goal, search='bfs'), start, goal)
bfs_edge, bfs_cost = path_edges(bfs_path)
node_col = ['red' if node in bfs_path else 'lightgray' for node in Gr.nodes()]
if node_labels == 'default':
nodes = nx.draw_networkx_nodes(Gr, node_pos, ax=ax, node_color=node_col, node_size=400)
nodes.set_edgecolor('k')
nx.draw_networkx_labels(Gr, node_pos, ax=ax, font_size=8)
elif node_labels == 'none':
nodes = nx.draw_networkx_nodes(Gr, node_pos, ax=ax, node_color=node_col, node_size=50)
else:
# labels must be a list
nodes = nx.draw_networkx_nodes(Gr, node_pos, ax=ax, node_color=node_col, node_size=400)
nodes.set_edgecolor('k')
mapping = dict(zip(Gr.nodes, node_labels))
nx.draw_networkx_labels(Gr, node_pos, labels=mapping, ax=ax, font_size=8)
edge_col, edge_width = edges_color(Gr, bfs_edge)
if is_digraph:
nx.draw_networkx_edge_labels(Gr, node_pos, ax=ax, label_pos=0.3, edge_labels=edge_weight)
else:
nx.draw_networkx_edge_labels(Gr, node_pos, ax=ax, edge_labels=edge_weight)
nx.draw_networkx_edges(Gr, node_pos, ax=ax, edge_color=edge_col, width=edge_width, alpha=.3)
if file_name:
plt.savefig(file_name)
plt.show()
display(HTML())
def getPath(bp, start: int, goal : int):
"""
@param bp: back pointer
@param start:
@param goal:
@return:
"""
current = goal
s = [current]
while current != start:
current = bp[current]
s += [current]
return list(reversed(s))
# create test graph
g = nx.ladder_graph(4)
# get ordered at level bfs
paths = bfs_seq(g, 0)
# display each step
for path in paths:
show_graph(g, 0, path, plot_size=(6, 6))
# ladder_graph(n)
print(paths)
# + [markdown] id="oHzMEN2AngXj"
# ### Trainer configuration
# + pycharm={"name": "#%%\n"} id="7QRvg0XrngXj"
trainer_config ="""
train: True # train or not, default is True for generation we only need load pre-trained model
active: 'grid_small' # dataset set generated.
use_model: 'GraphGruRnn' # model to use , it must be defined in models section.
draw_prediction: True # at the of training draw. (TODO here now it will draw last epocs)
load_model: True # load model or not, and what
load_epoch: 500 # load model. last epoch
save_model: True # save model,
regenerate: True # regenerated, factor when indicated by epochs_save
active_setting: mini # indicate what setting to use, so we can switch from debug to production
evaluate: True
early_stopping:
monitor: loss
min_delta:
patience: 100
mode: max
settings:
# debug mode
debug:
epochs_log: 1000
start_test: 10
epochs_test: 10
epochs_save: 10
# baseline
mini:
# if we need enable early stopping
early_stopping: True
epochs_log: 1000
start_test: 10
epochs_test: 10
epochs_save: 50
# baseline
baseline:
early_stopping: True
epochs_log: 1000
start_test: 100
epochs_test: 100
epochs_save: 100
debug:
# debug graph generation
graph_generator: True
# benchmark dataset loader and sampler, if it true it will return after benchmark
benchmark_read: False
# debug model creation
model_creation: False
# debug training loops
train_verbose: False
# trace early stopping
trace_early: False
training:
train_ratio: 0.8
test_ration: 0.8
validation_ratio: 0.2 # validation ration
num_workers: 1 # num workers to load data, default 4
batch_ratio: 32 # num batches of samples per each epoch, 1 epoch = n batches
sample_time: 1 # default num sample, note each dataset can overwrite
optimizers:
node_optimizer:
eps: 1e-8
weight_decay: 0
amsgrad: False
momentum=0:
betas: [0.9, 0.999]
type: Adam
edge_optimizer:
eps: 1e-8
weight_decay: 0
amsgrad: False
momentum=0:
betas: [ 0.9, 0.999 ]
type: Adam
# lr_schedulers definition
lr_schedulers:
- type: multistep
milestones: [ 400, 1000 ]
name: main_lr_scheduler
- type: secondary
milestones: [ 400, 1000 ]
name: secondary
# Model definition
models:
# this pure model specific, single model can describe both edges and nodes
# in case we need use single model for edge and node prediction task ,
# use keyword single_model: model_name
GraphGruRnn:
node_model:
model: GraphGRU
optimizer: node_optimizer
lr_scheduler: main_lr_scheduler
has_input: True
has_output: True
edge_model:
model: GraphGRU
optimizer: edge_optimizer
lr_scheduler: main_lr_scheduler
input_size: 1
GraphLstmRnn:
node_model:
model: GraphLSTM
optimizer: node_optimizer
lr_scheduler: main_lr_scheduler
has_input: True
has_output: True
edge_model:
model: GraphLSTM
optimizer: edge_optimizer
lr_scheduler: main_lr_scheduler
input_size: 1
plots:
limit: 100
metrics:
degree: True
orbits: True
clustering: True
trace_prediction_timer: True
trace_training_timer: True
trace_epocs: 1
graph:
# multiplied (640x10 and 15x32)
# Generated Grid
grid:
# https://networkx.org/documentation/stable/reference/generated/networkx.generators.lattice.grid_2d_graph.html
epochs: 100
parameter_shrink: 1
batch_size: 32
test_batch_size: 32
test_total_size: 1000
num_layers: 4
lr: 0.003
milestones: [ 400, 1000 ]
lr_rate: 0.3
graph_spec:
grid_n: [ 10, 20 ]
grid_m: [ 10, 20 ]
max_num_node: 0
max_prev_node: 40
# just to test code logic
grid_min:
epochs: 100
parameter_shrink: 2
batch_size: 32
test_batch_size: 32
test_total_size: 1000
num_layers: 4
lr: 0.003
milestones: [ 400, 1000 ]
lr_rate: 0.3
graph_spec:
grid_n: [ 2, 5 ]
grid_m: [ 2, 6 ]
# max_num_node: 10
max_prev_node: 15
grid_small:
epochs: 500
parameter_shrink: 2
batch_size: 32
test_batch_size: 32
test_total_size: 1000
num_layers: 4
lr: 0.003
milestones: [ 400, 1000 ]
lr_rate: 0.3
graph_spec:
grid_n: [ 2, 5 ]
grid_m: [ 2, 6 ]
# max_num_node: 10
max_prev_node: 15
# Generated Community
caveman:
epochs: 20
parameter_shrink: 1
batch_size: 32
test_batch_size: 32
test_total_size: 1000
num_layers: 4
lr: 0.003
milestones: [ 400, 1000 ]
lr_rate: 0.3
graph_spec:
size_of_cliques: 10
num_of_cliques_i: [ 2, 3 ]
num_of_cliques_j: [ 30, 81 ]
p_edge: 0.8
# max number num nodes
max_num_node: 100
# max nodes
max_prev_node: 100
# small caveman community network.
# check networkx doc for details
caveman_small:
epochs: 20
num_layers: 4
parameter_shrink: 2
test_batch_size: 32
test_total_size: 1000
batch_size: 32
milestones: [ 400, 1000 ]
lr: 0.003
lr_rate: 0.3
# graph specs
graph_spec:
size_of_cliques: 20
num_of_cliques_i: [2, 3]
num_of_cliques_j: [6, 11]
p_edge: 0.3
# max number num nodes
max_num_node: 20
# max nodes
max_prev_node: 20
root_dir: "."
log_dir: "logs"
nil_dir: "timing"
graph_dir: "graphs"
results_dir: "results"
timing_dir: "timing"
figures_dir: "figures"
prediction_dir: "prediction" # where we save prediction
model_save_dir: "model_save" # where we save model
#figures_prediction_dir: "prediction_figures" #
"""
# + [markdown] id="UZwMX09TngXl"
# Let's create model trainer class
#
# + id="h6s9y9eZwOHt"
# !ls shapegnet/
# !pip install ipykernel
# !pip install ipython
# !pip install ipykernel
# !pip install tornado
# !pip install prompt-toolkit
# !pip install pyzmq
# !git pull
# !python main.py
# + id="FvcYZfREHn0P"
# this is pyemd requirmnets
# !apt-get install -y python3-dev
# !apt-get install -y python3.8-dev
# !apt-get install -y libpython3.8-dev
# !pip install docutils
# !pip install pygments
# !pip install twine
# !pip install wheel
# !pip install pyemd
# !pip install rich
# !pip install python-louvain
# widges fix
# !pip install tqdm
# !pip install ipywidgets
# #!jupyter nbextension enable --py widgetsnbextension
# #!jupyter labextension install @jupyter-widgets/jupyterlab-manager
# !pip install PyYAML
# + pycharm={"name": "#%%\n"} colab={"base_uri": "https://localhost:8080/"} id="8eRa6f7CngXl" outputId="80f2a9e4-a040-4cb5-9422-b134c4bc2593"
import io
from typing import Dict, List, Set, Tuple
from shapegnet import create_graphs
from shapegnet.external.graphrnn_eval.stats import degree_stats, clustering_stats, orbit_stats_all
from shapegnet.generator_trainer import GeneratorTrainer
from shapegnet.model_config import ModelSpecs
from shapegnet.models.adjacency_decoder import AdjacencyDecoder
from shapegnet.models.sampler.GraphSeqSampler import GraphSeqSampler
from shapegnet.plotlib import plot
from shapegnet.plotlib.plot import draw_single_graph
from shapegnet.utils import fmt_print, fmtl_print
trainer_spec = ModelSpecs(template_file_name=io.StringIO(trainer_config), verbose=True)
# + [markdown] id="v730OkhEngXm"
# Let define main traning loop
# + pycharm={"name": "#%%\n"} id="uJRUupuEngXm"
import sys
from shapegnet.models.sampler.GraphSeqSampler import GraphSeqSampler
import argparse
import random
import sys
import time
from datetime import time
from datetime import timedelta
#from typing import Final
import numpy as np
import torch
import torch.utils as tutil
import pandas as pd
from shapegnet import create_graphs
from shapegnet.external.graphrnn_eval.stats import degree_stats, clustering_stats, orbit_stats_all
from shapegnet.generator_trainer import GeneratorTrainer
from shapegnet.model_config import ModelSpecs
from shapegnet.model_creator import ModelCreator
from shapegnet.models.adjacency_decoder import AdjacencyDecoder
from shapegnet.models.sampler.GraphSeqSampler import GraphSeqSampler
from shapegnet.plotlib import plot
from shapegnet.plotlib.plot import draw_single_graph
from shapegnet.utils import fmt_print, fmtl_print
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TRAIN = 1
TEST = 2
PREDICTION = 3
def generate_train_test(g, specs: ModelSpecs, is_fix_seed=True, is_shuffled=True):
"""
Generate test , train , validation split
"""
# split datasets
if is_fix_seed:
random.seed(123)
if is_shuffled:
random.shuffle(g)
graphs_len = len(g)
return g[int(specs.test_ratio() * graphs_len):], \
g[0:int(specs.train_ratio() * graphs_len)], \
g[0:int(specs.validation_ratio() * graphs_len)]
def compute_graph_split_len(gv, gt):
"""
Compute split based on number of nodes and edges
and return normalized value.
@param gv:
@param gt:
@return:
"""
return sum(g.number_of_nodes() for g in gv) / len(gv), \
sum(g.number_of_nodes() for g in gt) / len(gt)
def draw_samples(trainer_spec: ModelSpecs, from_epoch=None, limit=None, graph_type=TRAIN,
num_samples=10):
"""
Reads spool dir for generated sample and plot each.
"""
fmtl_print("Train graph", trainer_spec.get_active_train_graph())
fmtl_print("Train graph spec", trainer_spec.get_active_train_graph_spec())
fmtl_print("Prediction files", trainer_spec.get_active_model_prediction_files())
fmtl_print("Last saved epoch", trainer_spec.get_last_saved_epoc())
fmtl_print("Last graph stat", trainer_spec.get_last_graph_stat())
last_saved_epoch = trainer_spec.get_last_saved_epoc()['node_model']
graphs = trainer_spec.get_last_graph_stat()
for i, g in enumerate(graphs):
file_name = trainer_spec.generate_prediction_figure_name(last_saved_epoch, sample_time=1, gid=i)
draw_single_graph(g, file_name=file_name, plot_type='prediction', graph_name="test")
def prepare(trainer_spec : ModelSpecs):
"""
Prepare dir , clean up etc.
"""
trainer_spec.build_dir()
trainer_spec.setup_tensorflow()
def create_dataset_sampler(trainer_spec: ModelSpecs, graphs, num_workers=None):
"""
@param trainer_spec: trainer specification, include strategy how to sample ration etc.
@param graphs: a graph that we use to train network
@param num_workers:
@return: return torch.util.data.DataLoader
"""
# dataset initialization
if trainer_spec.max_prev_node() > 0:
dataset = GraphSeqSampler(graphs,
max_prev_node=trainer_spec.max_prev_node(),
max_num_node=trainer_spec.max_num_node())
else:
dataset = GraphSeqSampler(graphs)
normalized_weight = [1.0 / len(dataset) for i in range(len(dataset))]
sample_strategy = tutil.data.sampler.WeightedRandomSampler(normalized_weight,
num_samples=trainer_spec.compute_num_samples(),
replacement=True)
_num_workers = trainer_spec.num_workers()
if num_workers is not None:
_num_workers = num_workers
dataset_loader = tutil.data.DataLoader(dataset,
batch_size=trainer_spec.batch_size(),
num_workers=_num_workers,
sampler=sample_strategy,
pin_memory=False)
return dataset_loader
def clean_graphs(graph_real, graph_pred, is_shuffle=True):
"""
Selecting graphs generated that have the similar sizes.
It is usually necessary for GraphRNN-S version, but not the full GraphRNN model.
"""
#
if is_shuffle:
random.shuffle(graph_real)
random.shuffle(graph_pred)
# get length
real_graph_len = np.array([len(graph_real[i]) for i in range(len(graph_real))])
pred_graph_len = np.array([len(graph_pred[i]) for i in range(len(graph_pred))])
fmt_print("Real graph size", real_graph_len)
fmt_print("Prediction graph size", pred_graph_len)
# # select pred samples
# # The number of nodes are sampled from the similar distribution as the training set
# pred_graph_new = []
# pred_graph_len_new = []
# for value in real_graph_len:
# pred_idx = find_nearest_idx(pred_graph_len, value)
# pred_graph_new.append(graph_pred[pred_idx])
# pred_graph_len_new.append(pred_graph_len[pred_idx])
# return graph_real, pred_graph_new
def compute_generic_stats(epoch_predicted):
"""
"""
graph_pred_aver = 0
for graph in epoch_predicted:
graph_pred_aver += graph.number_of_nodes()
graph_pred_aver /= len(epoch_predicted)
fmt_print('Prediction average number of nodes', graph_pred_aver)
def evaluate(cmds, trainer_spec: ModelSpecs,
epoch_start=1,
epoch_step=1):
# get a graphs
try:
train_graph, graph_in_test = trainer_spec.load_train_test()
graph_test_len = len(train_graph)
except FileNotFoundError:
print("No graph file found.")
return
graph_train = train_graph[0:int(0.8 * graph_test_len)] # train
# graph_validate = train_graph[0:int(0.2 * graph_test_len)] # validate
# graph_test = train_graph[int(0.8 * graph_test_len):] # test on a hold out test set
graph_test_aver = 0
for graph in graph_in_test:
graph_test_aver += graph.number_of_nodes()
graph_test_aver /= len(graph_in_test)
print('test average len', graph_test_aver)
predictions = trainer_spec.get_prediction_graph()
# x_df = pd.DataFrame(x_np)
# x_np = x.numpy()
for i, (file_name, epoch_predicted) in enumerate(predictions):
if i < 27:
continue
print(i, file_name)
print("Computing statistic for", file_name)
# get filename
# fname_pred = trainer_spec.prediction_filename(epoch, sample_time)
# graph_pred = load_graph_from_file(fname_pred, is_real=False)
#
# clean graphs
# if is_clean:
# graph_test, graph_pred = clean_graphs(graph_test, graph_pred)
# else:
# shuffle(graph_pred)
# graph_pred = graph_pred[0:len(graph_test)]
#
fmt_print('Graph in test:', len(graph_in_test))
fmt_print('Graph in prediction:', len(epoch_predicted))
#
compute_generic_stats(epoch_predicted)
clean_graphs(graph_in_test, epoch_predicted)
#
# # evaluate MMD test
mmd_degree = -1
if trainer_spec.mmd_degree():
mmd_degree = degree_stats(graph_in_test, epoch_predicted)
fmt_print('Evaluated MMD:', mmd_degree)
#
mmd_clustering = -1
if trainer_spec.mmd_clustering():
mmd_clustering = clustering_stats(graph_in_test, epoch_predicted)
fmt_print('Graph clustering:', mmd_clustering)
#
mmd_4orbits = -1
if trainer_spec.mmd_orbits():
mmd_orbits = orbit_stats_all(graph_in_test, epoch_predicted)
fmt_print('Graph orbit:', mmd_4orbits)
# x_np = [i] = [mmd_degree, mmd_clustering, mmd_orbits]
print('degree', mmd_degree, 'clustering', mmd_clustering, 'orbits', mmd_4orbits)
# x_df.to_csv('tmp.csv')
def main_train(cmds, trainer_spec: ModelSpecs):
"""
"""
# prepare test environment
prepare(trainer_spec)
# create model creator
model_creator = ModelCreator(trainer_spec, device)
# model graph specs
print("###############################################")
fmtl_print("Creating graphs type", trainer_spec.active)
fmtl_print("Maximum previous node to track", trainer_spec.max_prev_node())
fmtl_print("Maximum nodes to track", trainer_spec.max_num_node())
# create dataset based on specs in config.yaml
graph_spec = trainer_spec.max_prev_node()
graphs = create_graphs.create(trainer_spec)
max_num_node = max([graphs[i].number_of_nodes() for i in range(len(graphs))])
min_num_edge = min([graphs[i].number_of_edges() for i in range(len(graphs))])
max_num_edge = max([graphs[i].number_of_edges() for i in range(len(graphs))])
trainer_spec.trainer_spec = trainer_spec.set_max_num_node(max_num_node)
#
fmtl_print('traing/test/val ratio', trainer_spec.train_ratio(),
trainer_spec.test_ratio(), trainer_spec.validation_ratio())
fmtl_print('max previous node', trainer_spec.max_prev_node())
fmtl_print('max number node', trainer_spec.max_num_node())
fmtl_print('max/min number edge', max_num_edge, min_num_edge)
# compute splits
graphs_test, graphs_train, graphs_validate = generate_train_test(graphs, trainer_spec)
graph_validate_len, graph_test_len = compute_graph_split_len(graphs_validate, graphs_test)
fmtl_print('total/train/test/validate sizes', len(graphs), len(graphs_train), len(graphs_test),
len(graphs_validate))
fmtl_print('validation/test', graph_validate_len, graph_test_len)
fmtl_print('total graph number, training subset', len(graphs), len(graphs_train))
print("###############################################")
# load_pretrained(trainer_spec)
# save ground truth graphs
# To get train and test set, after loading you need to manually slice
GeneratorTrainer.save_graphs(graphs, str(trainer_spec.train_graph_file()))
GeneratorTrainer.save_graphs(graphs, str(trainer_spec.test_graph_file()))
# plot training set if needed
if cmds is not None and cmds.plot == 'train':
plot.draw_samples_from_file(trainer_spec.train_graph_file(), plot_type='train',
file_prefix=trainer_spec.train_plot_filename(),
num_samples=10)
dataset_loader = create_dataset_sampler(trainer_spec, graphs_train)
if trainer_spec.is_read_benchmark():
read_start_timer = time.monotonic()
for _, _ in enumerate(dataset_loader):
pass
read_stop_timer = time.monotonic()
fmt_print("Dataset read time", timedelta(seconds=read_stop_timer - read_start_timer), "sec")
return
models = model_creator.create_model(verbose=True)
if trainer_spec.is_train_network():
decoder = AdjacencyDecoder()
trainer = model_creator.create_trainer(dataset_loader, models, decoder)
trainer.train()
def main_trainer(trainer_spec: ModelSpecs):
"""
Main entry
@param cmds:
@param trainer_spec:
@return:
"""
if trainer_spec.is_train_network():
main_train(None, trainer_spec)
if trainer_spec.is_draw_samples():
if not trainer_spec.is_trainer():
sys.exit("Check configuration file, it looks like model {} "
"is untrained.".format(trainer_spec.get_active_model()))
draw_samples(trainer_spec)
if trainer_spec.is_evaluate():
if not trainer_spec.is_trainer():
sys.exit("Check configuration file, it looks like model {} "
"is untrained.".format(trainer_spec.get_active_model))
evaluate(None, trainer_spec)
# + [markdown] id="p3fUlwNrngXm"
#
# + pycharm={"name": "#%%\n", "is_executing": true} colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["1ffa5aac1f694d62b70886f14b1ec139", "5233d8222a69403eb0b2babce82da41c", "37f1299eeef848359cd71eab95309890", "9735f1aa127942c98ff07362e7766646", "644b300f5eda4b5ca488dcc42c1c5286", "bd05ad9fc8eb4b4a8c1ba0ee5957aafe", "<KEY>", "<KEY>", "d66a5240e52247d38bae20b5ef61c070", "5ebd9ef4f1d94af4a21c744c546f66e3", "39d76c8dd1dd4b7fba7ec98b9b0b75bb"]} id="VC9U4yHengXn" outputId="a613054c-2e5f-4292-d4e0-f1792417071f"
trainer_spec = ModelSpecs(template_file_name=io.StringIO(trainer_config), verbose=True)
model_specs = trainer_spec.get_active_model_spec()
model_specs
fmtl_print("Torch cudnn backend version: ", torch.backends.cudnn.version())
fmtl_print("Model in training mode", trainer_spec.is_train_network())
fmtl_print("Model in evaluate mode", trainer_spec.is_evaluate())
fmtl_print("Model in generate sample", trainer_spec.is_draw_samples())
fmtl_print("Model active dataset", trainer_spec.active)
fmtl_print("Model active dataset", trainer_spec.epochs())
fmtl_print("Model active dataset", trainer_spec.batch_size())
fmtl_print("Model number of layers", trainer_spec.num_layers())
fmtl_print("Active model", trainer_spec.active_model)
# run
main_trainer(trainer_spec)
# + pycharm={"name": "#%%\n", "is_executing": true} id="mK7SGSebngXn"
|
colab_generative_v1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/tillaczel/Machine-learning-workshop/blob/resturcture/Q_learning/Q_Learning_tic_tac_toe.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="nR0W_JJTx90P" colab_type="text"
# # Tic tac toe with Q learning
#
# This notebook contains an inplementation of the Q learning agorithm aplied to the 3X3 tic tac toe game. I created an environment and an agent object. For the environment object I used the tic tac toe written by <NAME> (https://github.com/kenwalger/Python-Tic-Tac-Toe/blob/master/tic-tac-toe2.py). I expended the tic tac toe implementation with the step function, used for expirience gathering. The agent uses 1 time step bootstrapping and $\epsilon$ greedy strategy.
# + id="8OZm30wG-IJU" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
# + id="1Q6BQ8Ly6IS9" colab_type="code" colab={}
class environment():
def __init__(self):
self.X = "X"
self.O = "O"
self.EMPTY = " "
self.TIE = "TIE"
self.NUM_SQUARES = 9
self.computer, self.human = self.pieces()
self.turn = self.X
self.board = self.new_board()
def display_instruct(self):
"""Display game instructions."""
print(
"""
Welcome to the greatest intellectual challenge of all time: Tic-Tac-Toe
This will be a showdown between your human brain and my silicon processor.
You will make your move known by entering a number, 0 - 8. The number
will corresponde to the board position as illustrated:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8
Prepare yourself, human. The ultimate battle is about to begin. \n
"""
)
def ask_yes_no(self, question):
"""Ask a yes or no question."""
response = None
while response not in ("y", "n"):
response = input(question).lower()
return response
def ask_number(self, question, low, high):
"""Ask for a number within a range."""
response = None
while response not in range(low,high):
response = int(input(question))
return response
def pieces(self):
"""Determine if player or computer goes first."""
go_first = self.ask_yes_no("Do you require the first move? (y/n): ")
if go_first == "y":
print("\nThen take the first move. You will need it.")
human = self.X
computer = self.O
else:
print("\nYour bravery will be your undoing... I will go first.")
computer = self.X
human = self.O
return computer, human
def new_board(self):
"""Create new game board."""
board = []
for square in range(self.NUM_SQUARES):
board.append(self.EMPTY)
return board
def display_board(self, board):
"""Display game board on screen."""
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\n\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\n\t", board[6], "|", board[7], "|", board[8], "\n")
def legal_moves(self, board):
"""Create list of legal moves."""
moves = []
for square in range(self.NUM_SQUARES):
if board[square] == self.EMPTY:
moves.append(square)
return moves
def winner(self, board):
"""Determine the game winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != self.EMPTY:
winner = board[row[0]]
return winner
if self.EMPTY not in board:
return self.TIE
return None
def human_move(self, board, human):
"""Get human move."""
legal = self.legal_moves(board)
move = None
while move not in legal:
move = self.ask_number("Where will you move? (0-8): ", 0, self.NUM_SQUARES)
if move not in legal:
print("\nThat square is already occupied, foolish human. Choose another.\n")
print("Fine...")
return move
def computer_move(self, board, computer, human):
"""Make computer move."""
# make a copy to work with since function will be changing list
board = board[:]
# the best positions to have, in order
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
# print("I shall take square number", end=" ")
# if computer can win, take that move
for move in self.legal_moves(board):
board[move] = computer
if self.winner(board) == computer:
# print(move)
return move
# done checking this move, undo it
board[move] = self.EMPTY
# if human can win, block that move
for move in self.legal_moves(board):
board[move] = human
if self.winner(board) == human:
# print(move)
return move
# done checking this move, undo it
board[move] = self.EMPTY
# since no one can win on next move, pick best open square
for move in BEST_MOVES:
if move in self.legal_moves(board):
# print(move)
return move
def next_turn(self, turn):
"""Switch turns."""
if turn == self.X:
return self.O
else:
return self.X
def congrat_winner(self, the_winner, computer, human):
"""Congratulate the winner."""
if the_winner != self.TIE:
print(the_winner, "won!\n")
else:
print("It's a tie!\n")
if the_winner == computer:
print("As I predicted, human, I am triumphant once more. \n"
"Proof that computers are superior to humans in all regards.")
elif the_winner == human:
print("No, no! It cannot be! Somehow you tricked me, human. \n"
"But never again! I, the computer, so swear it!")
elif the_winner == self.TIE:
print("You were most lucky, human, and somehow managed to tie me. \n"
"Celebrate today... for this is the best you will ever achieve.")
def main(self):
self.display_instruct()
computer, human = self.pieces()
turn = self.X
board = self.new_board()
self.display_board(board)
while not self.winner(board):
if turn == human:
move = self.human_move(board, human)
board[move] = human
else:
move = self.computer_move(board, computer, human)
board[move] = computer
self.display_board(board)
turn = self.next_turn(turn)
the_winner = self.winner(board)
self.congrat_winner(the_winner, computer, human)
def step(self, state, action):
reward = 0
done = 0
self.board = state.copy()
if self.turn == self.computer:
move = self.computer_move(self.board, self.computer, self.human)
self.board[move] = self.computer
self.turn = self.next_turn(self.turn)
if not self.winner(self.board):
move = action
if move not in self.legal_moves(self.board):
reward = -1
done = 1
self.turn = self.X
self.board = self.new_board()
else:
self.board[move] = self.human
self.turn = self.next_turn(self.turn)
if not self.winner(self.board):
move = self.computer_move(self.board, self.computer, self.human)
self.board[move] = self.computer
self.turn = self.next_turn(self.turn)
next_state = self.board.copy()
if self.winner(self.board)==self.human:
reward = 1
done = 1
self.turn = self.X
self.board = self.new_board()
elif self.winner(self.board)==self.computer:
reward = -1
done = 1
self.turn = self.X
self.board = self.new_board()
elif self.winner(self.board)==self.TIE:
reward = 0
done = 1
self.turn = self.X
self.board = self.new_board()
return state, action, reward, next_state, done
# + id="PwnrkVtY-Mx4" colab_type="code" colab={}
class agent():
def __init__(self, alpha, gamma):
self.Q = np.zeros((3, 3, 3, 3, 3, 3, 3, 3, 3, 9))
self.alpha = alpha
self.gamma = gamma
def transform_board(self, board):
result = np.zeros((len(board)))
for i, b in enumerate(board):
if b==' ':
result[i] = 0
elif b=='X':
result[i] = 1
elif b=='O':
result[i] = 2
return tuple(result.astype(int))
def act(self, state, epsilon=1):
if epsilon<np.random.rand():
action = np.random.randint(9)
else:
action = np.argmax(self.Q[self.transform_board(state)])
# print(state, self.Q[self.transform_board(state)])
return action
def train(self, env, iters, epsilon, epsilon_decay):
reward_hist = np.zeros((iters))
for iter in range(iters):
state = env.board.copy()
action = self.act(state, epsilon)
epsilon *= 1/epsilon_decay
state, action, reward, next_state, done = env.step(state, action)
# print(state, action, reward, next_state)
self.Q[self.transform_board(state)][action] += self.alpha*(reward+(1-done)*self.gamma*np.argmax(self.Q[self.transform_board(next_state)])
-self.Q[self.transform_board(state)][action])
reward_hist[iter] = reward
return reward_hist
def play(self, env):
env.turn = env.X
env.board = env.new_board()
env.display_board(env.board)
while not env.winner(env.board):
if env.turn == env.human:
move = self.act(env.board)
if env.board[move]==env.EMPTY:
env.board[move] = env.human
else:
move = env.computer_move(env.board, env.computer, env.human)
env.board[move] = env.computer
env.display_board(env.board)
env.turn = env.next_turn(env.turn)
env.the_winner = env.winner(env.board)
env.congrat_winner(env.the_winner, env.computer, env.human)
# + id="UOM9hBoG94IF" colab_type="code" outputId="0fb86871-9237-49d4-d5f6-12e46590480b" colab={"base_uri": "https://localhost:8080/", "height": 67}
alpha = 0.1
gamma = 1
env = environment()
ag = agent(alpha, gamma)
# + id="aQ-Zd46N_tr1" colab_type="code" outputId="79328cf8-1c69-4040-9e83-2d8096ad9cde" colab={"base_uri": "https://localhost:8080/", "height": 717}
reward_hist = ag.train(env, 10000, 0.1, 0.9999)
print(np.mean(reward_hist))
# + id="0d5H2iA-GsMv" colab_type="code" outputId="54e02d99-1def-4b47-c94c-c2679e0cc05e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
ag.play(env)
# + id="BV9T6W1_LJez" colab_type="code" outputId="f6c13c51-583c-4b88-b724-9162222e3d56" colab={"base_uri": "https://localhost:8080/", "height": 740}
env.main()
|
Q_learning/Q_Learning_tic_tac_toe.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
display(HTML("<style>.container { width:100% !important; }</style>"))
# #### MIT License (c) 2019 by <NAME>
# #### Jupyter notebook written in Julia 1.1.0. It illustrates the construction of univariate histograms and the Monte Carlo simulation technique for multivariate Gaussian samples with a given covariance matrix. See (3.67), p. 91, in "Stochastic Methods in Asset Pricing."
using LinearAlgebra
using SpecialFunctions
using StatsBase
using Random
using Plots
pyplot()
# We write our own histogram function. It takes as an input a single 1-dimensional array of data. The number of bins in the histogram is determined automatically by using the Diaconis-Friedman rule. The function returns two arrays: the mid-points of the bins and the (unnormalized) heights of the bars.
function hstgram(data_sample::Array{Float64,1})
data_sorted=sort(data_sample)
first=data_sorted[1]
last=data_sorted[end]
nmb=length(data_sorted)
IQR=percentile(data_sorted,75)-percentile(data_sorted,25)
bin_size_loc = 2*IQR*(nmb^(-1.0/3))
num_bins=Int(floor((last-first)/bin_size_loc))
bin_size=(last-first)/(num_bins)
bin_end_points=[first+(i-1)*bin_size for i=1:(num_bins+1)]
ahist_val=[length(data_sorted[data_sorted .< u]) for u in bin_end_points]
hist_val=[ahist_val[i+1]-ahist_val[i] for i=1:num_bins]
mid_bins=[first-bin_size/2+i*bin_size for i=1:num_bins]
return mid_bins, hist_val
end
# First, create data sampled from the standard univariate normal density.
val=(x->((2*π)^(-1/2)*exp(-x^2/2))).(-3.3:0.05:3.3);
# Now create 10000 randomly sampled points from the standard univariate Gaussian law. Note that the random numbers generator is called in such a way that it generates the same sample each time it is called. Notice also the normalization of the heights of the bars meant to ensure that the empirical density integrates to 1 (i.e., the area of the histogram equals 1).
# +
#Random.seed!(0xabcdef12); # if needed to recover the same data
# -
nval=randn!(zeros(10000));
U,V=hstgram(nval);
VV=V/(sum(V)*(U[2]-U[1]));
# See the total number of bins:
length(VV)
# We now plot the histogram created from the data against the standard normal density.
plot(U.+(U[2]-U[1])/2,VV,line=(:steppre,1),linewidth=0.05,label="histogram")
xlabel!("samples")
ylabel!("frequency")
plot!(-3.3:0.05:3.3,val,label="normal density")
# Generate another sample:
nval=randn(10000);
U,V=hstgram(nval);
VV=V/(sum(V)*(U[2]-U[1]));
plot(U.+(U[2]-U[1])/2,VV,line=(:steppre,1),linewidth=0.05,label="histogram")
xlabel!("samples")
ylabel!("frequency")
plot!(-3.3:0.05:3.3,val,label="normal density")
# Univariate Gaussian samples can be generated also by transformung an uniformly distributed sample in $[0,1[$ (this method may be less reliable).
uval=rand(10000);
nval=(x->sqrt(2)*erfinv(2*x-1)).(uval);
U,V=hstgram(nval);
VV=V/(sum(V)*(U[2]-U[1]));
plot(U.+(U[2]-U[1])/2,VV,line=(:steppre,1),linewidth=0.05,label="histogram")
xlabel!("samples")
ylabel!("frequency")
plot!(-3.3:0.05:3.3,val,label="normal density")
# Now we generate 10000 samples from the bi-variate Gaussian distribution with independent and standard normal marginals.
nnval=randn!(zeros(10000));
scatter(nval,nnval,ratio=1,markersize=1,label="")
# First, create a 2x2 matrix that is positive definite and symmetric (a candidate covariance matrix). Check if it is indeed positive definite by computing the eigenvalues.
A=rand(2,2);
#Cov=A'A; # another alternative that always yields a positive definite matrix
Cov=Symmetric(A); # may not produce a positive definite matrix
eigvals(Cov) # repeat the previous cell if the eigenvalues are not positive
# Now generate the "square-root" of the covariance matrix by using either the spectral decomposition or the Cholesky factorization.
eigdCov=Diagonal(eigvals(Cov).^0.5)
eigCov=eigvecs(Cov); # matrix of eigen vectors
chlCov=cholesky(Cov); # Cholesky "square root" of Cov
MM=eigdCov*eigCov; # Spectral "square root" of Cov
# Check that the factorizations give what is expected:
Cov-MM'*MM
Cov-chlCov.L*chlCov.U
Cov-(chlCov.U)'chlCov.U
eigCov'eigCov # eigCov should be an orthogonal matrix
# Transform the randomly generated standard bi-variate sample through the "square root" of the covariance matrix.
#method 1
NN=hcat(nval,nnval)';
data_2_dim=MM'NN;
scatter(data_2_dim[1,:],data_2_dim[2,:],ratio=1,markersize=1,label="")
#method 2
data_2_dim=(chlCov.L)*NN;
scatter(data_2_dim[1,:],data_2_dim[2,:],ratio=1,markersize=1,label="")
|
Multivariate_Normal_Dist_Examples_Julia.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
g = nx.read_edgelist("Wiki-Vote.txt",create_using=nx.DiGraph(),nodetype=int)
# +
def plot_fun():
node_cnt = g.number_of_nodes()
max_degree = 0
for i in g.nodes():
max_degree = max(max_degree,i)
x=[]
y=[]
for i in range(max_degree+1):
x.append(i)
y.append(0)
for j in g.nodes():
if(g.degree(j)==i):
y[i]+=1
y1 = [i/node_cnt for i in y]
plt.plot(x,y1,'o')
plt.xscale('log')
plt.yscale('log')
plt.title('Degree Distribution(log-log scale)')
y2 = [i for i in range(1,10)]
x1 = []
for i in y2:
j = (i**-3)
x1.append(j)
plt.plot(y2,x1)
plt.xlabel('k')
plt.ylabel('P(k')
plt.show()
plt.hist(x1)
y2.sort()
x1.sort()
slope,intercept = np.polyfit(y2,x1,1)
print(slope)
plt.show()
# -
|
real_world.py.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="xXHciiU1ob3I" colab_type="text"
# # TP2: Logistic Regression as your first simple Neural Network
#
# You will build a NN to recognize cats (actually it is a logistic regression classifier, as you have done in last exercise, but with this assignement, you'll get familar to some terms in Neural network).
#
# - Do not use loops (for/while) in your code, unless you are asked to do so.
#
# <font color='blue'> There is a slight changing in notations in pratical assignments (compared to those in lecture). In lecture $t$ is used for target (true "label") and from this assignment, we will note $y$ as true label (for convinence with the variables in model of librarys that will be used). We will note $a$ as the output of the activation function. Actually you should get familar with the "activation" term which is an important term in neural network.
#
# + [markdown] id="HgniQMKgob3L" colab_type="text"
#
#
# First, import all the packages that you will need.
# + id="XUE40mpMob3N" colab_type="code" outputId="547d12de-bdf5-4e6a-e7aa-33a5f6cc3d95" executionInfo={"status": "ok", "timestamp": 1580911168888, "user_tz": -60, "elapsed": 623, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 36}
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
# TODO: change the path
import os
os.chdir('/content/drive/My Drive/Option_AI_2nd/TP2-todo')
# + id="Ii51uGl0ob3U" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
from IPython.display import Image
# + [markdown] id="_I-b2LRBob3Y" colab_type="text"
# ## 1 - Problem##
#
# You are given a dataset containing:
#
# - a training set of N_train images labeled as cat (y=1) or non-cat (y=0)
#
# - a test set of N_test images labeled as cat or non-cat
#
# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
#
#
# Let's look at the dataset.
# + id="oJRLTE6Dob3Z" colab_type="code" colab={}
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# + [markdown] id="NS9d_lKLob3c" colab_type="text"
# We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
#
# Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code.
#
# <font color='red'>Change the `index` value and re-run to see other images.
# + id="_5e3U1Qsob3d" colab_type="code" outputId="e32852f5-51c7-4d21-df95-24543f993ae9" executionInfo={"status": "ok", "timestamp": 1580917276822, "user_tz": -60, "elapsed": 728, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 287}
# Example of a picture
index = 3
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
# + [markdown] id="3ERyRd4nob3h" colab_type="text"
# <font color='red'>**Exercise:** Find the values for:
#
# - N_train (number of training examples)
#
# - N_test (number of test examples)
#
# - num_px (= height = width of a training image)
#
# </font>
# `train_set_x_orig` is a numpy-array of shape (N_train, num_px, num_px, 3). For instance, you can access `N_train` by writing `train_set_x_orig.shape[0]`.
# + id="w-2CjHeXp0Js" colab_type="code" colab={}
### START CODE HERE ### (≈ 3 lines of code)
N_train = ... ### COMPLETE YOUR CODE
N_test = ...
num_px = ...
### END CODE HERE ###
print ("Number of training examples: N_train = " + str(N_train))
print ("Number of testing examples: N_test = " + str(N_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# + [markdown] id="U2VkO_P3ob3k" colab_type="text"
# **Expected Output**:
# <table style="width:15%">
# <tr>
# <td>**N_train**</td>
# <td> 209 </td>
# </tr>
#
# <tr>
# <td>**N_test**</td>
# <td> 50 </td>
# </tr>
#
# <tr>
# <td>**num_px**</td>
# <td> 64 </td>
# </tr>
#
# </table>
#
# + [markdown] id="Pn0G2DECob3l" colab_type="text"
# For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be N_train (respectively N_test) columns.
#
# <font color='red'> **Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
# </font>
# A trick: when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
# ```python
# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
# ```
# + id="S1hgAgyFs1wS" colab_type="code" colab={}
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = ...
test_set_x_flatten = ....
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))# some error in the expected output?
# + [markdown] id="pjDFe_AJob3p" colab_type="text"
# **Expected Output**:
#
# <table style="width:35%">
# <tr>
# <td>**train_set_x_flatten shape**</td>
# <td> (12288, 209)</td>
# </tr>
# <tr>
# <td>**train_set_y shape**</td>
# <td>(1, 209)</td>
# </tr>
# <tr>
# <td>**test_set_x_flatten shape**</td>
# <td>(12288, 50)</td>
# </tr>
# <tr>
# <td>**test_set_y shape**</td>
# <td>(1, 50)</td>
# </tr>
# <tr>
# <td>**sanity check after reshaping**</td>
# <td>[17 31 56 22 33]</td>
# </tr>
# </table>
# + [markdown] id="rj1DfN6JtEHR" colab_type="text"
# #<font color='red'> Question: explain the values of 12288, 209, 50?
# + [markdown] id="GZTuhDT2ob3p" colab_type="text"
# Preprocessing: one common preprocessing step in ML is to center and standardize your dataset (as in the last exercise). But for picture datasets, it is simpler and more convenient and works almost as well: to divide every row of the dataset by 255 (the maximum value of a pixel channel).
#
# <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
#
# <font color='red'> TODO: Let's standardize our dataset.
# + id="boPEMMk8ob3q" colab_type="code" colab={}
#TODO
train_set_x = ...### WRITE CODE HERE ###
test_set_x = ...### WRITE CODE HERE ###
# + [markdown] id="PxQS3ueUob3s" colab_type="text"
# ## 2 - Learning algorithm ##
#
# The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
#
# + id="fySojfopob3t" colab_type="code" outputId="af98fae0-b5ed-4ea1-c06b-036aca0b4780" executionInfo={"status": "ok", "timestamp": 1580915796596, "user_tz": -60, "elapsed": 2458, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 465}
Image("images/LogReg_kiank.png",width=600,height=450)
# + [markdown] id="IbD0mBazob3v" colab_type="text"
# **Mathematical expression of the algorithm**:
#
# For one example $x^{(i)}$ (or $x_{n}$ in the lecture)
# $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
# $$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
# $$ \mathcal{E}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
# (recall in lecture $ \mathcal{E}(y^{(i)}, t^{(i)}) = - t^{(i)} \log(y^{(i)}) - (1-t^{(i)} ) \log(1-y^{(i)})\tag{4}$, here we use $\mathcal {y}$ for the true label or target instead of $\mathcal {t}$, we use $\mathcal {a}$ for activation function, you should get familar with "activation" term, an important term in neural network.)
#
# The cost is then computed by summing over all training examples:
# $$ E = \frac{1}{N} \sum_{i=1}^N \mathcal{E}(a^{(i)}, y^{(i)})\tag{5}$$
#
# #Note that $\mathcal{b}$ is the bias ($\mathcal{w_0}$ in the last exercise). Once again, we change the notation to make you get familar to the notation in Neural network.
#
#
#
# #In this exercise, you will carry out the following steps:
#
# - Initialize the parameters of the model
#
# - Learn the parameters for the model by minimizing the cost
#
# - Use the learned parameters to make predictions (on the test set)
#
# - Analyse the results and conclude
# + [markdown] id="C-sICo7Mob3w" colab_type="text"
# ## 3 - Building the parts of our algorithm ##
#
# The main steps for building a Neural Network are:
# 1. Define the model structure (such as number of input features)
# 2. Initialize the model's parameters
# 3. Loop:
# - Calculate current loss (forward propagation)
# - Calculate current gradient (backward propagation)
# - Update parameters (gradient descent)
#
#
# ### 3.1 - Helper functions
#
# **Exercise**: Implement `sigmoid()`. Use np.exp().
# + id="TzJj4v6mob3w" colab_type="code" colab={}
# GRADED FUNCTION: sigmoid
def sigmoid(z):
### START CODE HERE ### (≈ 1 line of code)
#...
### END CODE HERE ###
return s
# + id="mR3O5fwqob3y" colab_type="code" outputId="06d62c58-ab85-4e0e-ac47-c6e76b6e3bd9" executionInfo={"status": "ok", "timestamp": 1580912750711, "user_tz": -60, "elapsed": 528, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 36}
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
# + [markdown] id="9me6wu8Kob30" colab_type="text"
# **Expected Output**:
#
# <table>
# <tr>
# <td>**sigmoid([0, 2])**</td>
# <td> [ 0.5 0.88079708]</td>
# </tr>
# </table>
# + [markdown] id="_WUISbLJob30" colab_type="text"
# ### 3.2 - Initializing parameters
#
# **Exercise:** Implement parameter initialization in the cell below (np.zeros() in the Numpy).
# + id="pn6XTnTn81Hh" colab_type="code" colab={}
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ###
w...
b...
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
# + id="bvxVejF8ob32" colab_type="code" outputId="a62b40a4-f61e-48e8-b4ce-8cc6dff691a6" executionInfo={"status": "ok", "timestamp": 1580916171435, "user_tz": -60, "elapsed": 1263, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 76}
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# + [markdown] id="iyoNwx7-ob33" colab_type="text"
# **Expected Output**:
#
#
# <table style="width:15%">
# <tr>
# <td> ** w ** </td>
# <td> [[ 0.]
# [ 0.]] </td>
# </tr>
# <tr>
# <td> ** b ** </td>
# <td> 0 </td>
# </tr>
# </table>
#
# For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
# + [markdown] id="i0CwqO4Mob34" colab_type="text"
# ### 3.3 - Forward and Backward propagation
#
# Do the "forward" and "backward" propagation steps for learning the parameters.
#
# **Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
#
# **Hints**: ($y$: true label, $a$: activation output, here the output of sigmoid function)
#
# Forward Propagation:
# - You get X
# - You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(N-1)}, a^{(N)})$
# - You calculate the cost function: $E = -\frac{1}{N}\sum_{i=1}^{N}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
#
# Here are the two formulas you will be using:
#
# $$ \frac{\partial E}{\partial w} = \frac{1}{N}X(A-Y)^T\tag{7}$$
# $$ \frac{\partial E}{\partial b} = \frac{1}{N} \sum_{i=1}^N (a^{(i)}-y^{(i)})\tag{8}$$
# + id="0yI5kD1-ob35" colab_type="code" colab={}
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
... # compute activation
... # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = ...
db = ...
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
# + id="Alck-BQeob36" colab_type="code" outputId="a236ab9c-3556-4a0e-9f41-36130357c483" executionInfo={"status": "ok", "timestamp": 1580917358665, "user_tz": -60, "elapsed": 1285, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 96}
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
# + [markdown] id="ixtwcbPZob38" colab_type="text"
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td> ** dw ** </td>
# <td> [[ 0.99845601]
# [ 2.39507239]]</td>
# </tr>
# <tr>
# <td> ** db ** </td>
# <td> 0.00145557813678 </td>
# </tr>
# <tr>
# <td> ** cost ** </td>
# <td> 5.801545319394553 </td>
# </tr>
#
# </table>
# + [markdown] id="9fRV6Ujfob38" colab_type="text"
# ### 3.4 Optimization
# You want to update the parameters using gradient descent.
#
# <font color='red'> **Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $E$.
# + id="xwJ2vNOsob39" colab_type="code" colab={}
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, iters, eta, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
iters -- number of iterations of the optimization loop
eta -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
"""
costs = []
for i in range(iters):
# Cost and gradient calculation
### START CODE HERE ###
... # call the function propagate(w, b, X, Y) to compute grads and cost
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = ...
b = ...
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
# + id="sS-P01kQob3-" colab_type="code" outputId="eeb52194-2d2f-4518-fa53-692455dac18c" executionInfo={"status": "ok", "timestamp": 1580917368904, "user_tz": -60, "elapsed": 880, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
params, grads, costs = optimize(w, b, X, Y, iters= 100, eta = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# + [markdown] id="_XGHIWNWob4A" colab_type="text"
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td> **w** </td>
# <td>[[ 0.19033591]
# [ 0.12259159]] </td>
# </tr>
#
# <tr>
# <td> **b** </td>
# <td> 1.92535983008 </td>
# </tr>
# <tr>
# <td> **dw** </td>
# <td> [[ 0.67752042]
# [ 1.41625495]] </td>
# </tr>
# <tr>
# <td> **db** </td>
# <td> 0.219194504541 </td>
# </tr>
#
# </table>
#
# + [markdown] id="8ncuRMySob4A" colab_type="text"
# <font color='red'> **Exercise:** We can now use w and b to predict the labels for a dataset X. Implement the `predict()` function. There is two steps to computing predictions:
# </font>
# 1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
#
# 2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
# + id="RDgU8X--ob4B" colab_type="code" colab={}
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
...
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if A[0,i] > 0.5:
...
else:
...
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
# + id="Yts2YPHGob4C" colab_type="code" outputId="6fa6e71b-1cec-49a0-cf8a-ecc4dbee64d7" executionInfo={"status": "ok", "timestamp": 1580917381324, "user_tz": -60, "elapsed": 988, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 36}
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
# + [markdown] id="Tp7pPY3sob4D" colab_type="text"
# **Expected Output**:
#
# <table style="width:30%">
# <tr>
# <td>
# **predictions**
# </td>
# <td>
# [[ 1. 1. 0.]]
# </td>
# </tr>
#
# </table>
#
# + [markdown] id="3pKzmhupob4E" colab_type="text"
# ## 4 - Build the overal model ##
#
# You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts), in the right order.
#
# <font color='red'> **Exercise:** Implement the model function. Use the following notation:
# - Y_prediction for your predictions on the test set
# - Y_prediction_train for your predictions on the train set
# - w, costs, grads for the outputs of optimize()
# + id="V6_rdxPTob4E" colab_type="code" colab={}
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, iters = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter
learning_rate -- hyperparameter
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w_old, b_old = ...
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w_old, b_old, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = ...#predict
Y_prediction_train = ...
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
# + [markdown] id="X6CfoQO8ob4F" colab_type="text"
# Run the following cell to train your model.
# + id="QlHpW3QUob4F" colab_type="code" outputId="379f2020-4b3a-4e2f-b6d1-9928e8e3ae33" executionInfo={"status": "ok", "timestamp": 1580917407290, "user_tz": -60, "elapsed": 7336, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 456}
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# + [markdown] id="fmOFzDzZob4H" colab_type="text"
# **Expected Output**:
#
# <table style="width:40%">
#
# <tr>
# <td> **Cost after iteration 0 ** </td>
# <td> 0.693147 </td>
# </tr>
# <tr>
# <td> <center> $\vdots$ </center> </td>
# <td> <center> $\vdots$ </center> </td>
# </tr>
# <tr>
# <td> **Train Accuracy** </td>
# <td> 99.04306220095694 % </td>
# </tr>
#
# <tr>
# <td>**Test Accuracy** </td>
# <td> 70.0 % </td>
# </tr>
# </table>
#
#
#
# + [markdown] id="uaQoEUcwob4H" colab_type="text"
# **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. You'll build an even better classifier in the next assignement!
#
# <font color='red'> Question: What is problem here? How can we avoid it?
# + id="nzWNxUpCob4J" colab_type="code" outputId="2f4a4357-9db2-4800-e024-3c4677a01aa5" executionInfo={"status": "ok", "timestamp": 1580918165788, "user_tz": -60, "elapsed": 1013, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 327}
# Example of a picture that was wrongly classified.
index = 28
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]))
t=d["Y_prediction_test"]
print("you predicted that it is ")
print(t[0,index])
# + [markdown] id="oGs_B7eDob4K" colab_type="text"
# Let's also plot the cost function and the gradients.
# + id="Os_kYEcJob4K" colab_type="code" outputId="515c12bb-35e3-4708-c402-da1581b968fa" executionInfo={"status": "ok", "timestamp": 1580918191159, "user_tz": -60, "elapsed": 962, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09950164676861804363"}} colab={"base_uri": "https://localhost:8080/", "height": 294}
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
# + [markdown] id="h5QA0fmNob4L" colab_type="text"
# **Interpretation**:
# You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
# + [markdown] id="5wKXxivhob4L" colab_type="text"
# ## 5 - Further analysis ##
#
# + [markdown] id="mKtd5hwCob4M" colab_type="text"
# #### Choice of learning rate ####
#
# <font color='red'> Question: what is the problem if the learning rate is too small or too large </font>.
#
#
# Let's compare the learning curve of our model with several choices of learning rates. <font color='red'> TODO: try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
# + id="puRUUJGOob4M" colab_type="code" outputId="51ec51c9-0c55-4b34-d816-870ba86e7a69" colab={}
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
# + [markdown] id="cfkZn872ob4N" colab_type="text"
# **Interpretation**:
# - Different learning rates give different costs and thus different predictions results.
# - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
# - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
#
# + [markdown] id="2jEbETCZob4O" colab_type="text"
# ## 6 - Test with your own image##
#
# To do that:
#
# 1. Add your image to the "images" folder
#
# 2. Change your image's name in the following code and run the code.
# + id="gqf49AR6ob4O" colab_type="code" colab={}
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = ...
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
# + [markdown] id="ybGKit4lob4P" colab_type="text"
# <font color='red'> TODO:
# Try different things on this Notebook: </font>
#
# - Play with the learning rate and the number of iterations
#
# - Try different initialization methods and compare the results
#
# - Test other preprocessings (center the data, or divide each row by its standard deviation)
# + id="20E6c0Xxob4P" colab_type="code" colab={}
###WRITE YOUR CODE HERE####
|
TensorFlow/TP2 LR as NN/First NN with LR_todo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Traveling Companions
#
# In this activity you will be taking three separate csvs that were gathered by Tourism Malaysia, merging them together, and then creating charts to visualize a country's change of traveling companions to Malaysia over the course of three years.
#
# ### Part 1 - Merging Companions
#
# * You will likely need to perform two different merges over the course of this activity, changing the names of your columns as you go along.
# +
# Import the necessary modules
# +
# Bring each CSV into a separate data frame
# -
# Merge the first two datasets on "COUNTRY OF NATIONALITY" so that no data is lost (should be 44 rows)
# +
# Rename our _x columns to "2016 Alone", "2016 With Spouse", "2016 With Children", "2016 With Family/Relatives",
# "2016 Student Group", "2016 With Friends", "2016 With Business Associate", "2016 With Incentive Group",
# and "2016 Others"
# Rename our _y columns to "2016 Alone", "2016 With Spouse", "2016 With Children", "2016 With Family/Relatives",
# "2016 Student Group", "2016 With Friends", "2016 With Business Associate", "2016 With Incentive Group",
# and "2016 Others"
# -
# Merge our newly combined dataframe with the 2018 dataframe
# Rename "ALONE", "WITH SPOUSE", "WITH CHILDREN", "WITH FAMILY/RELATIVES", "STUDENT GROUP", "WITH FRIENDS",
# "WITH BUSINESS ACCOCIATE","WITH INCENTIVE GROUP", "OTHERS" to
# "2018 Alone", "2018 With Spouse", "2018 With Children", "2018 With Family/Relatives", "2018 Student Group",
# "2018 With Friends", "2018 With Business Associate", "2018 With Incentive Group", and "2018 Others"
|
01-Lesson-Plans/05-Matplotlib/2/Activities/08-Stu_Travel-Part1/Unsolved/traveling_companions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
# %matplotlib inline
from plotting_helper import plot_intervention_range
# -
# # Plotting
# +
idx = 0
model_name = 'ConditionalVISEM'
interventions = [
{'thickness': 0.5},
{'thickness': 1.5},
{'thickness': 5.},
{'thickness': 7.},
{'intensity': 224.},
{'intensity': 64.}
]
plot_intervention_range(model_name, interventions, idx, normalise_all=True, num_samples=32)
|
deepscm/experiments/plotting/morphomnist/interactive_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis Frequentist Approach
# This tutorial shows how to perform post-test analysis of an A/B test experiment with two variants, so called control and
# treatment groups, using frequentist statistics. It handles both the case of means comparison and conversions comparison
# with closed-form-solutions. It assumes that sample data are normally distributed.
#
# Let's import first the tools needed.
import numpy as np
from abexp.core.analysis_frequentist import FrequentistAnalyzer
from abexp.visualization.analysis_plots import AnalysisPlot
# ## Compare means
# Here we want to compare the mean of the control group versus the mean of the treatment group given the sample
# observations.
# Define the analyzer
analyzer = FrequentistAnalyzer()
# We will compare the *average revenue per user* of the control group versus the treatment group, making separate
# analysis for standard and premium users.
# +
# Revenue for standard users
np.random.seed(42)
revenueS_contr = np.random.normal(270, 200, 1000)
revenueS_treat = np.random.normal(300, 200, 1000)
# Revenue for premium users
revenueP_contr = np.random.normal(300, 200, 1000)
revenueP_treat = np.random.normal(310, 200, 1000)
# +
pval_S, ciS_contr, ciS_treat = analyzer.compare_mean_obs(obs_contr=revenueS_contr,
obs_treat=revenueS_treat,
alpha=0.05)
pval_P, ciP_contr, ciP_treat = analyzer.compare_mean_obs(obs_contr=revenueP_contr,
obs_treat=revenueP_treat,
alpha=0.05)
# -
print('Standard users: p-value = {:.6f}'.format(pval_S))
print('Premium users: p-value = {:.6f}'.format(pval_P))
# If ``p-value`` $\leq$``0.05`` the test result is statistically significant. There is a significative difference between
# control and treatment groups.
#
# Otherwise if ``p-value`` $>$ ``0.05`` the test result is not statistically significant. There is not a statistical
# significant difference between control and treatment groups.
# Computer groups mean
meanS_contr = np.mean(revenueS_contr)
meanS_treat = np.mean(revenueS_treat)
meanP_contr = np.mean(revenueP_contr)
meanP_treat = np.mean(revenueP_treat)
# Display test results in barplots.
# +
# Define height of the control group bars
bars_contr = [meanS_contr, meanP_contr]
# Define height of the treatment group bars
bars_treat = [meanS_treat, meanP_treat]
# Define upper and lower limit of the error bars for the control group
ci_contr = [[ciS_contr[0], ciP_contr[0]], # 2.5 percetiles
[ciS_contr[1], ciP_contr[1]]] # 97.5 percentiles
# Define upper and lower limit of the error bars for the treatment group
ci_treat = [[ciS_treat[0], ciP_treat[0]], # 2.5 percetiles
[ciS_treat[1], ciP_treat[1]]] # 97.5 percentiles
bars = [bars_contr, bars_treat]
ci = [ci_contr, ci_treat]
fig = AnalysisPlot.barplot(bars, ci, title='Barplot',
ylabel='average revenue per user',
xlabel=['standard', 'premium'],
groupslabel=['control', 'treatment'])
# -
# ## Compare conversions
# Here we want to compare the number of user that made a purchase in the control group versus the treatment group.
# +
# Number of users that made a purchase
purchase_contr = 400
purchase_treat = 470
# Total number of users
total_usr_treat = 5000
total_usr_contr = 5000
# -
p_val, ci_contr, ci_treat = analyzer.compare_conv_stats(conv_contr=purchase_contr,
conv_treat=purchase_treat,
nobs_contr=total_usr_treat,
nobs_treat=total_usr_contr)
print('p-value = {:.6f}'.format(p_val))
# In this case ``p-value`` $\leq$``0.05``, the test result is statistically significant. There is a significative
# difference between control and treatment groups. The treatment applied on the test group was successful.
|
docs/src/tutorials/AnalysisFrequentistApproach.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
import json
# %run ../Python_files/util.py
# %run ../Python_files/load_dicts.py
data_folder = '/home/jzh/INRIX/All_INRIX_2012_filtered_journal/'
def aggregate_speed_data(month):
tmc_day_speed_AM_dict = {}
# reading JSON data
input_file_AM = data_folder + 'filtered_month_%s_AM_dict_journal.json' %(month)
with open(input_file_AM, 'r') as json_file_AM:
filtered_month_AM_dict = json.load(json_file_AM)
for tmc in tmc_ref_speed_dict_journal.keys():
if tmc not in ['129P05793', '129+14189', '129-14188']:
days_ = days(month)
for day in range(days_)[1:]:
speed_AM = []
travel_time_AM = []
for hour in [7, 8]:
for minute in range(60):
key = tmc + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute)
# dealing with missing data
if filtered_month_AM_dict[key] == '_':
filtered_month_AM_dict[key] = '0.1_0.01'
speed_AM.append(float(filtered_month_AM_dict[key].split('_')[0]))
travel_time_AM.append(float(filtered_month_AM_dict[key].split('_')[1]))
tmc_day_speed_AM = TMC_Day_Speed(tmc, day, speed_AM, travel_time_AM)
tmc_day_speed_AM_dict[tmc + str(day)] = tmc_day_speed_AM
# dealing with missing data
else:
days_ = days(month)
for day in range(days_)[1:]:
tmc_day_speed_AM_dict[tmc + str(day)] = tmc_day_speed_AM_dict['129+04217' + str(day)]
zdump(tmc_day_speed_AM_dict, '../temp_files/%s_AM/tmc_day_speed_dict_journal.pkz' %(month_to_str(month)))
# -
aggregate_speed_data(1)
aggregate_speed_data(4)
aggregate_speed_data(7)
aggregate_speed_data(10)
|
01_INRIX_data_preprocessing_journal18/INRIX_data_preprocessing_08_aggregate_speed_data_journal_AM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="3VLQuhT3XOh1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b5a88130-b10c-48f3-d4e4-7dcc887b9d9f" executionInfo={"status": "ok", "timestamp": 1583255799755, "user_tz": -60, "elapsed": 30309, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
# !pip install datadotworld
# !pip install datadotworld[pandas]
# + id="OPkKh-Jal7Qe" colab_type="code" colab={}
# # !dw configure
# + id="XaPIUOuKWVqh" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="yrmMebVrl9mc" colab_type="code" colab={}
# drive.mount("/content/drive")
# + [markdown] id="Azwsi2X6mtoK" colab_type="text"
#
# + id="c764bxS0mWlD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="3d65e946-8a0e-4e55-b99b-9c046e9d38b2" executionInfo={"status": "ok", "timestamp": 1583259253875, "user_tz": -60, "elapsed": 587, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
# cd "drive/My Drive/Colab Notebooks/dw_matrix"
# + id="3mxe2Yekmvnr" colab_type="code" colab={}
# !mkdir data
# + id="NX6yh4OHmzsa" colab_type="code" colab={}
# !echo data > .gitignore
# + id="R56u6iQEnCvZ" colab_type="code" colab={}
# !git add .gitignore
# + id="kYVaw8A1nKW2" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="9Je6MpcUndPd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="24ae379b-f8d9-4ba0-93a9-28ad144462ed" executionInfo={"status": "ok", "timestamp": 1583259685250, "user_tz": -60, "elapsed": 1948, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df = data.dataframes['7004_1']
# + id="03LvARwPoYy4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 652} outputId="9c0b6a28-21c9-4ff0-fe73-248ed343977d" executionInfo={"status": "ok", "timestamp": 1583259707272, "user_tz": -60, "elapsed": 2789, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df.sample(5)
# + id="STOQsKoRodkm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="d6ba961b-6ba6-4b03-85ef-0bfd870aac08" executionInfo={"status": "ok", "timestamp": 1583259722443, "user_tz": -60, "elapsed": 926, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df.columns
# + id="aZ8LEDY0oh77" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="e6a6e545-79d2-4c7d-88ff-026244b04a7d" executionInfo={"status": "ok", "timestamp": 1583259748259, "user_tz": -60, "elapsed": 882, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df.prices_currency.unique( )
# + id="Uh4oIVT4ooQD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="39eef9d8-7ccc-4d37-9af4-00e81c2a068a" executionInfo={"status": "ok", "timestamp": 1583259840581, "user_tz": -60, "elapsed": 708, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df.prices_currency.value_counts(normalize=True)
# + id="a0rCO17ro4km" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1a85bbf2-e1e2-40af-a205-39e0aceca7d9" executionInfo={"status": "ok", "timestamp": 1583260381406, "user_tz": -60, "elapsed": 1342, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df_usd = df[df.prices_currency == 'USD'].copy()
df_usd.shape
# + id="KS0OQNk8pKeB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="17f8c330-1bbb-4d2d-f7ab-f3c0f3843b05" executionInfo={"status": "ok", "timestamp": 1583260410807, "user_tz": -60, "elapsed": 5339, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="f0gswQ_DqCLh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6f61cfed-bda0-472d-feeb-1d257c4fe9d7" executionInfo={"status": "ok", "timestamp": 1583260454181, "user_tz": -60, "elapsed": 1232, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
filter_max = np.percentile(df_usd['prices_amountmin'],99)
filter_max
# + id="ByKToYr_q6O1" colab_type="code" colab={}
df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max]
# + id="T1oPGbWprg4y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="78e3fa5d-79bd-442f-ebbd-69ae9edb690b" executionInfo={"status": "ok", "timestamp": 1583260629941, "user_tz": -60, "elapsed": 1079, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="azxcEkQ1r_c4" colab_type="code" colab={}
# !git add matrix_one/day3.ipynb
# + id="mv9xIp3Bsowl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="19c5e986-0251-433a-fb18-d1044f170285" executionInfo={"status": "ok", "timestamp": 1583261847967, "user_tz": -60, "elapsed": 3361, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
# !git commit -m "Read Men's Shoe Prices dataset from data.world without keys"
# + id="Qgao0vobtPn3" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.email "pkownacka"
# + id="at30v2I4tZWW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="64f2395c-2596-45b5-c7da-839476f508b3" executionInfo={"status": "ok", "timestamp": 1583261719728, "user_tz": -60, "elapsed": 2201, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
# !git push origin master
# + id="IeYgFe8Lt4rL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ff69dae1-453e-4f00-9dd4-a3a24e1b9437" executionInfo={"status": "ok", "timestamp": 1583261831146, "user_tz": -60, "elapsed": 2418, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11383590561543950654"}}
# ls
|
matrix_one/day3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NEURAL NETWORKS
#
# This notebook covers the neural network algorithms from chapter 18 of the book *Artificial Intelligence: A Modern Approach*, by <NAME> and <NAME>. The code in the notebook can be found in [learning.py](https://github.com/aimacode/aima-python/blob/master/learning.py).
#
# Execute the below cell to get started:
# +
from learning import *
from notebook import psource, pseudocode
# -
# ## NEURAL NETWORK ALGORITHM
#
# ### Overview
#
# Although the Perceptron may seem like a good way to make classifications, it is a linear classifier (which, roughly, means it can only draw straight lines to divide spaces) and therefore it can be stumped by more complex problems. To solve this issue we can extend Perceptron by employing multiple layers of its functionality. The construct we are left with is called a Neural Network, or a Multi-Layer Perceptron, and it is a non-linear classifier. It achieves that by combining the results of linear functions on each layer of the network.
#
# Similar to the Perceptron, this network also has an input and output layer; however, it can also have a number of hidden layers. These hidden layers are responsible for the non-linearity of the network. The layers are comprised of nodes. Each node in a layer (excluding the input one), holds some values, called *weights*, and takes as input the output values of the previous layer. The node then calculates the dot product of its inputs and its weights and then activates it with an *activation function* (e.g. sigmoid activation function). Its output is then fed to the nodes of the next layer. Note that sometimes the output layer does not use an activation function, or uses a different one from the rest of the network. The process of passing the outputs down the layer is called *feed-forward*.
#
# After the input values are fed-forward into the network, the resulting output can be used for classification. The problem at hand now is how to train the network (i.e. adjust the weights in the nodes). To accomplish that we utilize the *Backpropagation* algorithm. In short, it does the opposite of what we were doing up to this point. Instead of feeding the input forward, it will track the error backwards. So, after we make a classification, we check whether it is correct or not, and how far off we were. We then take this error and propagate it backwards in the network, adjusting the weights of the nodes accordingly. We will run the algorithm on the given input/dataset for a fixed amount of time, or until we are satisfied with the results. The number of times we will iterate over the dataset is called *epochs*. In a later section we take a detailed look at how this algorithm works.
#
# NOTE: Sometimes we add another node to the input of each layer, called *bias*. This is a constant value that will be fed to the next layer, usually set to 1. The bias generally helps us "shift" the computed function to the left or right.
# 
# ### Implementation
#
# The `NeuralNetLearner` function takes as input a dataset to train upon, the learning rate (in (0, 1]), the number of epochs and finally the size of the hidden layers. This last argument is a list, with each element corresponding to one hidden layer.
#
# After that we will create our neural network in the `network` function. This function will make the necessary connections between the input layer, hidden layer and output layer. With the network ready, we will use the `BackPropagationLearner` to train the weights of our network for the examples provided in the dataset.
#
# The NeuralNetLearner returns the `predict` function which, in short, can receive an example and feed-forward it into our network to generate a prediction.
#
# In more detail, the example values are first passed to the input layer and then they are passed through the rest of the layers. Each node calculates the dot product of its inputs and its weights, activates it and pushes it to the next layer. The final prediction is the node in the output layer with the maximum value.
psource(NeuralNetLearner)
# ## BACKPROPAGATION
#
# ### Overview
#
# In both the Perceptron and the Neural Network, we are using the Backpropagation algorithm to train our model by updating the weights. This is achieved by propagating the errors from our last layer (output layer) back to our first layer (input layer), this is why it is called Backpropagation. In order to use Backpropagation, we need a cost function. This function is responsible for indicating how good our neural network is for a given example. One common cost function is the *Mean Squared Error* (MSE). This cost function has the following format:
#
# $$MSE=\frac{1}{n} \sum_{i=1}^{n}(y - \hat{y})^{2}$$
#
# Where `n` is the number of training examples, $\hat{y}$ is our prediction and $y$ is the correct prediction for the example.
#
# The algorithm combines the concept of partial derivatives and the chain rule to generate the gradient for each weight in the network based on the cost function.
#
# For example, if we are using a Neural Network with three layers, the sigmoid function as our activation function and the MSE cost function, we want to find the gradient for the a given weight $w_{j}$, we can compute it like this:
#
# $$\frac{\partial MSE(\hat{y}, y)}{\partial w_{j}} = \frac{\partial MSE(\hat{y}, y)}{\partial \hat{y}}\times\frac{\partial\hat{y}(in_{j})}{\partial in_{j}}\times\frac{\partial in_{j}}{\partial w_{j}}$$
#
# Solving this equation, we have:
#
# $$\frac{\partial MSE(\hat{y}, y)}{\partial w_{j}} = (\hat{y} - y)\times{\hat{y}}'(in_{j})\times a_{j}$$
#
# Remember that $\hat{y}$ is the activation function applied to a neuron in our hidden layer, therefore $$\hat{y} = sigmoid(\sum_{i=1}^{num\_neurons}w_{i}\times a_{i})$$
#
# Also $a$ is the input generated by feeding the input layer variables into the hidden layer.
#
# We can use the same technique for the weights in the input layer as well. After we have the gradients for both weights, we use gradient descent to update the weights of the network.
# ### Pseudocode
pseudocode('Back-Prop-Learning')
# ### Implementation
#
# First, we feed-forward the examples in our neural network. After that, we calculate the gradient for each layers' weights by using the chain rule. Once that is complete, we update all the weights using gradient descent. After running these for a given number of epochs, the function returns the trained Neural Network.
psource(BackPropagationLearner)
# +
iris = DataSet(name="iris")
iris.classes_to_numbers()
nNL = NeuralNetLearner(iris)
print(nNL([5, 3, 1, 0.1]))
# + [markdown] pycharm={"name": "#%% md\n"}
# The output should be 0, which means the item should get classified in the first class, "setosa". Note that since the algorithm is non-deterministic (because of the random initial weights) the classification might be wrong. Usually though, it should be correct.
#
# To increase accuracy, you can (most of the time) add more layers and nodes. Unfortunately, increasing the number of layers or nodes also increases the computation cost and might result in overfitting.
#
#
|
neural_nets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from XRD import crystal, Element, XRD
import sys
import os
from similarity import Similarity
def testFWHM(structure, max2theta, ext1, ext2, profiling):
wavelength = 1.54056
path = './data/'
if ext1 == 'POSCAR':
struct = crystal(ext1,filename=path+structure+'-'+ext1)
else:
struct = crystal(ext1,filename=path+structure+'.'+ext1)
# pxrd
xrd1 = XRD(struct, wavelength, max2theta, False, 0.55)
xrd1.get_profile(xrd1.theta2,xrd1.xrd_intensity/np.max(xrd1.xrd_intensity),N,**profiling)
# profiling from vesta
merc_pattern = np.loadtxt(structure+'.'+ext2)
mx = merc_pattern[:,0]
my = merc_pattern[:,1]/ np.max(merc_pattern[:,1])
patterng = np.vstack((mx,my))
plt.figure(figsize=(15,4))
plt.plot(xrd1.spectra[0],xrd1.spectra[1],'b--',label = 'pxrd')
plt.plot(mx,my, 'orange',label = 'vesta')
plt.legend()
plt.show()
S = Similarity(xrd1.spectra, patterng, N, None, 1, 'cosine')
print('Similarity = ',S.calculate())
N = 10000
U = 5.776410E-03 # FWHM parameter, U
V = -1.673830E-03 # FWHM parameter, V
W = 5.668770E-03 # FWHM parameter, W
A = 1.03944 # Asymmetry parameter, a1
eta_h = 0.504656 # Mixing parameter, eta_H0
eta_l = 0.611844 # Mixing parameter, eta_L0
# mercury
profiling = {'function':'split-type', 'FWHM': 0.05, 'A':A, 'eta_h':eta_h, 'eta_l':eta_l}
testFWHM('A2=a', 90,'POSCAR', 'tsv',profiling)
# mercury
testFWHM('Bba2', 90,'POSCAR', 'tsv',profiling)
# mercury
testFWHM('F-43c', 90,'POSCAR', 'tsv',profiling)
# mercury
testFWHM('I23', 90,'POSCAR', 'tsv',profiling)
# mercury
testFWHM('Pnna', 90,'POSCAR', 'tsv',profiling)
# vesta
profiling = {'function':'split-type', 'theta_dependence': True, 'U': U, 'V':V, 'W':W, 'A':A, 'eta_h':eta_h, 'eta_l':eta_l}
testFWHM('A-1', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('B-1',90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('Cm', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('Cccm', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('I2mb', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('Pcab', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('P-43n', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('R3m', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('P4_2=ncm', 90, 'POSCAR', 'gpd',profiling)
# vesta
testFWHM('Pmma', 90, 'POSCAR', 'gpd',profiling)
|
notebooks/FWHM-angle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nonrad_py3
# language: python
# name: nonrad_py3
# ---
# # NONRAD Tutorial
#
# This notebook serves as a tutorial for how to use the NONRAD code to compute the nonradiative capture coefficient for a given defect. In this tutorial, we will examine the capture of a hole by the negatively charge C substiution on the N site in wurtzite GaN.
#
#
# **Recommendation**: For every function provided by NONRAD, read the docstring to understand how the function behaves. This can be done using `function?` in a notebook or `print(function.__doc__)`.
# ## 0. First-Principles Defect Calculation
#
# Before we begin using the code provided by NONRAD, we must perform a first-principles calculation to obtain the equilibrium structures and thermodynamic level for our defect. This results in a formation energy plot such as the following.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
Efermi = np.linspace(0., 3.5, 10)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(Efermi, - Efermi + 1.058)
ax.plot(Efermi, np.zeros(10))
ax.scatter(1.058, 0., color='r', marker='|', zorder=10)
ax.text(1.058, 0., '$\epsilon (0/-)$ = 1.058 eV', color='r', va='bottom')
ax.set_xlabel(r'$E_{\rm{Fermi}}$ [eV]')
ax.set_ylabel(r'$E_f - E_f(q=0)$ [eV]')
plt.show()
# -
# The formation energy plot tells us the most stable charge state as a function of the Fermi level. The blue line corresponds to the C substitution being in the negative charge state, and the orange line corresponds to the neutral charge state. The thermodynamic transition level is the crossing between these two lines and for this defect, we find a value of 1.058 eV. This will be one input parameter to the calculation of the nonradiative capture coefficient, `dE`. Let's save this value for later:
dE = 1.058 # eV
# ## 1. Compute Configuration Coordinate Diagram
#
# #### Preparing the CCD Calculations
# We now are ready to prepare our configuration coordinate diagram. The configuration coordinate diagram gives us a practical method to depict the coupling between electron and phonon degrees of freedom. The potential energy surface in each charge state is plotted as a function of displacement. The displacement is generated by a linear interpolation between the ground and excited configurations and also corresponds to the special phonon mode used in our calculation of the nonradiative recombination rates.
#
# The following code can be used to prepare the input files for the ab initio calculation of the configuration coordinate diagram (example for `VASP` is shown below).
# +
import os
from pathlib import Path
from shutil import copyfile
from pymatgen import Structure
from nonrad.ccd import get_cc_structures
# equilibrium structures from your first-principles calculation
ground_files = Path('/path/to/C0/relax/')
ground_struct = Structure.from_file(str(ground_files / 'CONTCAR'))
excited_files = Path('/path/to/C-/relax/')
excited_struct = Structure.from_file(str(excited_files / 'CONTCAR'))
# output directory that will contain the input files for the CC diagram
cc_dir = Path('/path/to/cc_dir')
os.mkdir(str(cc_dir))
os.mkdir(str(cc_dir / 'ground'))
os.mkdir(str(cc_dir / 'excited'))
# displacements as a percentage, this will generate the displacements
# -50%, -37.5%, -25%, -12.5%, 0%, 12.5%, 25%, 37.5%, 50%
displacements = np.linspace(-0.5, 0.5, 9)
# note: the returned structures won't include the 0% displacement, this is intended
# it can be included by specifying remove_zero=False
ground, excited = get_cc_structures(ground_struct, excited_struct, displacements)
for i, struct in enumerate(ground):
working_dir = cc_dir / 'ground' / str(i)
os.mkdir(str(working_dir))
# write structure and copy necessary input files
struct.to(filename=str(working_dir / 'POSCAR'), fmt='poscar')
for f in ['KPOINTS', 'POTCAR', 'INCAR', 'submit.job']:
copyfile(str(ground_files / f), str(working_dir / f))
for i, struct in enumerate(excited):
working_dir = cc_dir / 'excited' / str(i)
os.mkdir(str(working_dir))
# write structure and copy necessary input files
struct.to(filename=str(working_dir / 'POSCAR'), fmt='poscar')
for f in ['KPOINTS', 'POTCAR', 'INCAR', 'submit.job']:
copyfile(str(excited_files / f), str(working_dir / f))
# -
# Before submitting the calculations prepared above, the INCAR files should be modified to remove the `NSW` flag (no relaxation should be performed).
#
# One of the nice features provided by the NONRAD code is the `get_Q_from_struct` function, which can determine the Q value from the interpolated structure and the endpoints. Therefore, we don't need any fancy naming schemes or tricks to prepare our potential energy surfaces.
#
# #### Extracting the Potential Energy Surface and Relevant Parameters
#
# Once the calculations have completed, we can extract the potential energy surface using the functions provided by NONRAD. The below code extracts the potential energy surfaces and plots them. Furthermore, it will extract the dQ value and the phonon frequencies of the potential energy surfaces. These are 3 input parameters for the calculation of the nonradiative capture coefficient.
# +
from glob import glob
from nonrad.ccd import get_dQ, get_PES_from_vaspruns, get_omega_from_PES
# calculate dQ
dQ = get_dQ(ground_struct, excited_struct) # amu^{1/2} Angstrom
# this prepares a list of all vasprun.xml's from the CCD calculations
ground_vaspruns = glob(str(cc_dir / 'ground' / '*' / 'vasprun.xml'))
excited_vaspruns = glob(str(cc_dir / 'excited' / '*' / 'vasprun.xml'))
# remember that the 0% displacement was removed before? we need to add that back in here
ground_vaspruns = ground_vaspruns + [str(ground_files / 'vasprun.xml')]
excited_vaspruns = excited_vaspruns + [str(excited_files / 'vasprun.xml')]
# extract the potential energy surface
Q_ground, E_ground = get_PES_from_vaspruns(ground_struct, excited_struct, ground_vaspruns)
Q_excited, E_excited = get_PES_from_vaspruns(ground_struct, excited_struct, excited_vaspruns)
# the energy surfaces are referenced to the minimums, so we need to add dE (defined before) to E_excited
E_excited = dE + E_excited
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(Q_ground, E_ground, s=10)
ax.scatter(Q_excited, E_excited, s=10)
# by passing in the axis object, it also plots the fitted curve
q = np.linspace(-1.0, 3.5, 100)
ground_omega = get_omega_from_PES(Q_ground, E_ground, ax=ax, q=q)
excited_omega = get_omega_from_PES(Q_excited, E_excited, ax=ax, q=q)
ax.set_xlabel('$Q$ [amu$^{1/2}$ $\AA$]')
ax.set_ylabel('$E$ [eV]')
plt.show()
# -
# The resulting input parameters that we have extracted for our calculation of the nonradiative recombination coefficient are below.
print(f'dQ = {dQ:7.05f} amu^(1/2) Angstrom, ground_omega = {ground_omega:7.05f} eV, excited_omega = {excited_omega:7.05f} eV')
# ## 2. Calculate the Electron-Phonon Coupling Matrix Element
#
# Before computing the el-ph matrix elements, it is highly suggested that you re-read the [original methodology paper](https://doi.org/10.1103/PhysRevB.90.075202) and the [code implementation paper](https://doi.org/10.1016/j.cpc.2021.108056) to make sure you understand the details.
#
# The most important criteria for selecting the geometry in which the el-ph matrix elements are calculated is the presence of a Kohn-Sham level associated with the defect in the gap. For the C substitution we are considering, when the geometry of the defect ($\{Q_0\}$) corresponds to the neutral charge state, a well-defined Kohn-Sham state associated with the defect is clear and sits in the gap. Therefore, we compute the el-ph matrix elements by expanding around this configuration.
#
# To perform this calculation with `VASP`, access to version 5.4.4 or greater is necessary. The calculation amounts to calculating the overlap $\langle \psi_i (0) \vert \psi_f (Q) \rangle$ (where $Q = 0$ corresponds to the geometry $\{Q_0\}$ described above) as a function of $Q$ and computing the slope with respect to $Q$. The el-ph matrix element is then $W_{if} = (\epsilon_f - \epsilon_i) \langle \psi_i (0) \vert \delta \psi_f (Q) \rangle$. For each $Q$, one sets up the calculation by copying the `INCAR`, `POSCAR`, `POTCAR`, `KPOINTS`, and `WAVECAR` from $Q = 0$ to a new directory and sets `LWSWQ = True` in the `INCAR` file. The `WAVECAR` from the $Q$ configuration is copied to `WAVECAR.qqq`. This calculation produces the file `WSWQ`, which includes the overlap information for all bands and kpoints. These files can then be parsed to obtain the matrix element using NONRAD as below.
# +
from nonrad.ccd import get_Q_from_struct
from nonrad.elphon import get_Wif_from_WSWQ
# this generates a list of tuples where the first value of the tuple is a Q value
# and the second is the path to the WSWQ file that corresponds to that tuple
WSWQs = []
for d in glob(str(cc_dir / 'ground' / '*')):
pd = Path(d)
Q = get_Q_from_struct(ground_struct, excited_struct, str(pd / 'CONTCAR'))
path_wswq = str(pd / 'WSWQ')
WSWQs.append((Q, path_wswq))
# by passing a figure object, we can inspect the resulting plots
fig = plt.figure(figsize=(12, 5))
Wifs = get_Wif_from_WSWQ(WSWQs, str(ground_files / 'vasprun.xml'), 192, [189, 190, 191], spin=1, fig=fig)
plt.tight_layout()
plt.show()
# -
# We pass as input, the indices of the 3 valence bands. What we find is that the valence band that is pushed down in energy has the greatest el-ph matrix element. This makes sense because it is pushed down by the interaction with the defect state.
#
# **NOTE**: We highly recommend passing a figure object to view the resulting plot. This ensures that the value obtained is reasonable.
#
# The resulting values of the matrix elements are shown below. They are in units of eV amu$^{-1/2}$ $\unicode{xC5}^{-1}$. The VBM of wz-GaN has three (nearly degenerate) bands, so we must average over the matrix elements. The resulting value can then be directly input into the nonradiative capture calculation.
Wif = np.sqrt(np.mean([x[1]**2 for x in Wifs]))
print(Wifs, Wif)
# #### Alternative Method (Note: not publication quality)
#
# Another method for obtaining the Wif value would be to use the pseudo-wavefunctions from the `WAVECAR` files. This will neglect the core information. For some defect systems, this is not a bad approximation. The quality of the result can generally be judged by the overlap at $Q = 0$. If the overlap is almost zero (maybe < 0.05), then the result should be reasonably reliable. Please only use this to get a rough idea, the above method is preferred. This is facilitated by the `get_Wif_from_wavecars` function.
# +
from nonrad.elphon import get_Wif_from_wavecars
# this generates a list of tuples where the first value of the tuple is a Q value
# and the second is the path to the WAVECAR file that corresponds to that tuple
wavecars = []
for d in glob(str(cc_dir / 'ground' / '*')):
pd = Path(d)
Q = get_Q_from_struct(ground_struct, excited_struct, str(pd / 'CONTCAR'))
path_wavecar = str(pd / 'WAVECAR')
wavecars.append((Q, path_wavecar))
# by passing a figure object, we can inspect the resulting plots
fig = plt.figure(figsize=(12, 5))
Wifs = get_Wif_from_wavecars(wavecars, str(ground_files / 'WAVECAR'), 192, [189, 190, 191], spin=1, fig=fig)
plt.tight_layout()
plt.show()
# -
# As we can see, the results are reasonably close because the Q = 0 value is somewhat low.
print(Wifs, np.sqrt(np.mean([x[1]**2 for x in Wifs])))
# ## 3. Compute Scaling Parameters
#
# When calculating the capture coefficient, we need to take into account two effects. First is the coulombic interaction between the carrier and defect. This occurs when the carrier is captured into a defect with a non-zero charge state. Second, there is the effect on the el-ph matrix element as a result of using a finite-size charged supercell. This leads to a suppression or enhancement of the charge density near the defect and would not occur in an infinitely large supercell.
#
# #### Sommerfeld Parameter
#
# The Sommerfeld parameter captures the long-range coulombic interaction that can affect the capture rates. The interaction can be attractive or repulsive and may enhance or suppress the resulting rate.
#
# For our system, we have the C substitution capturing a hole in the negative charge state, so there will be a long-range coulombic attraction that enhances the capture rates. One input parameter for the Sommerfeld parameter is the Z value. We define it as $Z = q_d / q_c$, where $q_d$ is the charge of the defect and $q_c$ is the charge of the carrier. For a negatively charge defect ($q_d = -1$) interacting with a hole ($q_c = +1$), we have $Z = -1$. $Z < 0$ is an attractive center, while $Z > 0$ is a repulsive center.
#
# Below, we calculate the scaling coefficient. Note, we use the hole effective mass (because we are capturing a hole) and the static dielectric constant.
# +
from nonrad.scaling import sommerfeld_parameter
Z = -1
m_eff = 0.18 # hole effective mass of GaN
eps_0 = 8.9 # static dielectric constant of GaN
# We can compute the Sommerfeld parameter at a single temperature
print(f'Sommerfeld Parameter @ 300K: {sommerfeld_parameter(300, Z, m_eff, eps_0):7.05f}')
# or we can compute it at a range of temperatures
T = np.linspace(25, 800, 1000)
f = sommerfeld_parameter(T, Z, m_eff, eps_0)
# -
# #### Charged Supercell Effects
#
# Ideally, one could always calculate the el-ph matrix elements in the neutral charge state, and for many defects, this is possible. However, sometimes it is unavoidable to use a charged defect cell for computing the matrix elements. As a result of the charge on the supercell, an interaction between the defect and the delocalized band edges occurs. This leads to an enhancement or suppression of the charge density near the defect that would not exist in an infinite-size supercell, and therefore, a scaling of the el-ph matrix element.
#
# For the C substitution that we are considering, the el-ph matrix element is computed in the neutral charge state, so *no correction is necessary*. For illustration purposes, we shall examine how we would compute this scaling coefficient *if it were necessary* by studying the wavefunctions in the negative charge state. Here, we have a spurious interaction that suppresses or enhances the charge density of the bulk wavefunctions near the charged defect. The scaling coefficient is calculated by comparing the radial distribution of the charge density to a purely homogenous distribution. The function `charged_supercell_scaling` computes the scaling factor.
#
# Below is an example of the interaction with the valence band:
# +
from nonrad.scaling import charged_supercell_scaling
wavecar_path = str(excited_files / 'WAVECAR')
fig = plt.figure(figsize=(12, 5))
factor = charged_supercell_scaling(wavecar_path, 189, def_index=192, fig=fig)
plt.tight_layout()
plt.show()
print('scaling =', 1 / factor)
# -
# The left-most plot is of the cumulative charge density (blue) against a homogenous distribution (red). The scaling parameter that brings the two into agreement is shown in the second plot. A plateau is found around ~2-3 $\unicode{xC5}$. This is the value that we use for the scaling. If we had calculated the el-ph matrix elements in the negative charge state, we would scale the capture coefficient by 1 over this value squared (printed above). For completeness, the right-most plot is the derivative of the scaling coefficient, which provides an algorithmic way to find the plateau.
#
# Below we show the process for the interaction with the conduction band.
# +
fig = plt.figure(figsize=(12, 5))
factor = charged_supercell_scaling(wavecar_path, 193, def_index=192, fig=fig)
plt.tight_layout()
plt.show()
print('scaling =', 1 / factor)
# -
# Here we see that the distribution is suppressed near the defect.
# ## 4. Compute the Nonradiative Capture Coefficient
#
# We are now ready to compute the capture coefficient. The last input parameter we need to think about is the configurational degeneracy. For a C substitution, there are 4 identical defect configurations (one along each bond) that the hole can be captured into.
# +
from nonrad import get_C
g = 4 # configurational degeneracy
volume = ground_struct.volume # Angstrom^3
# we pass in T, which is a numpy array
# we will get the capture coefficient at each of these temperatures
Ctilde = get_C(dQ, dE, excited_omega, ground_omega, Wif, volume, g=g, T=T)
# apply Sommerfeld parameter, evaluated at the same temperatures
C = f * Ctilde
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].semilogy(T, C)
ax[0].set_xlabel('$T$ [K]')
ax[0].set_ylabel('$C_p$ [cm$^{3}$ s$^{-1}$]')
ax[1].semilogy(1000 / T[200:], C[200:])
ax[1].set_xlabel('$1000 / T$ [K$^{-1}$]')
ax[1].set_ylabel('$C_p$ [cm$^{3}$ s$^{-1}$]')
plt.tight_layout()
plt.show()
# -
# We may also want to calculate the capture cross section, $\sigma = C / \langle v \rangle$. We can do this using the `thermal_velocity` function.
# +
from nonrad.scaling import thermal_velocity
sigma = C / thermal_velocity(T, m_eff) # cm^2
sigma *= (1e8)**2 # (cm to Angstrom)^2
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].semilogy(T, sigma)
ax[0].set_xlabel('$T$ [K]')
ax[0].set_ylabel('$\sigma$ [$\AA^{2}$]')
ax[1].semilogy(1000 / T[200:], sigma[200:])
ax[1].set_xlabel('$1000 / T$ [K$^{-1}$]')
ax[1].set_ylabel('$\sigma$ [$\AA^{2}$]')
plt.tight_layout()
plt.show()
|
notebooks/tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The CoinMarketCap API is a suite of high-performance RESTful JSON endpoints
#
# Libraries
import os
from dotenv import load_dotenv
import requests
import time
import json
#Load TOKEN
load_dotenv()
key=os.getenv('API-KEY')
print(key)
# # Listings latest
# +
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
params = {
'start':'1',
'limit':'6',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': key
}
crypto_dict = requests.get(url,params = params, headers = headers).json()
# -
# # Quotes Latest
# +
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
params = {
'id':'1',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': key
}
crypto_dict = requests.get(url,params = params, headers = headers).json()
# -
crypto_dict
# save json file
with open('mydata.json', 'w') as f:
json.dump(crypto_dict, f)
# open json file
f = open('mydata.json')
data = json.load(f)
|
API-CoinMarketCap.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### SVD/PCA brainstorm
#
# 1. Optimize: <br>
# -$min_W,H ||V - WH||^2$ <br>
# -s.t. $W^T W = I$ <br>
# -restrict W to singular vectors of V: Looking for eigenvectors, <br>
# -find best possible basis to represent V <br>
# -V is an orthongonal matrix <br>
# -can be non-negative <br>
# -best possible matrix approximation for k <br>
# -great for compression/filter out noise <br>
# -violates non-negativety: bad for data analysis <br>
# -basis vectors usually not interpretable big disadvantage in DS<br>
# -PCA for any rank K will give most optimal data representation <br>
# -does not mean it is optimal for interpretation <br>
# -cannot assign any meaning to the basis vectors <br>
#
# 2. matrix factorization:
# -extreme case reconstructing a matrix using the outer product of two
# vectors: this would not be able to reconstruct the matrix exactly
# -if we had a vector with the relative frequency of each vocab word
# out of the total word count and another with the average words per
# document, then the outer product would be close to approx the matrix
# -if you increase it to two rows then you can have two clusters
# 3. SVD:
# -we expect word that appear more frequently in one topic to not
# appear as frequently in another
# -we expect the topics to be orthogonal
# -SVD factorizaes a matrix into on ematrix w/orthogonal columsn and
# one with orthoganl rows, along with a diag matirx which contains
# the relative importance of each factor
# 4. Truncated SVD
# -Just interested in the vectors corresponding to the largest singular values
# -Randomized SVD
# -Not every matrix has an eigen decomposition, any matrix has a SVD
# -SVD is a generalization of the eigendecomposition
#
# Machine Epsilon
# https://en.wikipedia.org/wiki/Machine_epsilon
#
# QR algorithm
# -let's us find all the eigenvalues
# -QR algorithm v. QR decomposition:
# -QR decomposition decomposes a matrix $A = QR$ into a set of orthonormal columns $Q$
# and triangle matrix $R$
# -QR algorithm uses QR decomposition
# -Linear algebra
# -two matrices are simliar if there exists a non-singular matrix X such that
# $B = X^-1AX$
# -If X is non-singular then $A$ and $X^-1$ have the same eigenvalues
# -Schur factoriation of a matrix $A$ is a factorization $A = QTQ$
# -Every square matrix has a Schur factorization
#
# most basic QR algorithm
# Q,R = A # get the decomposition for A from previous iteration
# A = R @ Q # new A to R x Q
#
# converges to the Schur form of A (return something triangular
# Key: the QR algorithm constructs orthonormal bases for successive powers of A. And remeber the close relationship between powers of A and the eigen decomposition
#
# Rayleigh quotients
n = 3
A = np.random.rand(n,n)
AT = A @ A.T
def pure_qr(A, max_iter=50000):
Ak = np.copy(A) # copy of A can do in place
n = A.shape[0] # inti
QQ = np.eye(n) # intialize Q to be the identify
for k in range(max_iter):
Q, R = np.linalg.qr(Ak) # QR factorization using np.lin
Ak = R @ Q
QQ = QQ @ Q # running total of what happens when you mult Q*Q
if k % 10000 == 0:
print(Ak)
print("\n")
return Ak, QQ
Ak, Q = pure_qr(A) # approaches triangular matrix
np.linalg.eigvals(A) # look at the eignencalues in the triangular matrix
np.allclose(np.eye(n), Q @ Q.T), np.allclose(np.eye(n), Q.T @ Q)
# if you start with a hessenberg form its faster --> use a phase I matrix to get to phase II QR matrix
# +
# gram-schmidt
def cgs(A):
# get the shape in the form (m,n)
m,n = A.shape
# make two matrices filled with zeros
# Q = m x n and R = n x n
Q = np.zeros([m,n], dtype=np.float64)
R = np.zeros([n,n], dtype=np.float64)
# loop n times
for j in range(n):
# set v = to a row in range(n)
v = A[:,j]
for i in range(j):
R[i,j] = np.dot(Q[:,i], A[:,j])
v -= (R[i,j] * Q[:,i])
R[j,j] = np.linalg.norm(v)
Q[:,j] = v / R[j,j]
return Q,R
# -
A[:,0]
A[:,0]
# j = 1, i = 0
print(A[:,1])
print(Q[:,0])
print(np.dot(Q[:,0], A[:,1]))
RX = np.zeros([n,n], dtype=np.float64)
RX[0,1] = np.dot(Q[:,0], A[:,1])
print('')
print(RX[0,1])
print('')
print(RX[0,1] * Q[:,1])
# +
vectors = np.array([
[1, 1, 2, 0, 1, 1],
[0, 0, 0, 1, 2, 1],
[1, 2, 3, 1, 3, 2],
[1, 0, 1, 0, 1, 1]
], dtype=float)
np.zeros(vectors.shape)[:, :6]
# -
def gram_schmidt(X):
# a
O = np.zeros(X.shape) # vector space (the ground)
# loop from 0 to the n dimension (6 in this case)
for i in range(X.shape[1]):
# orthogonalization
# b
vector = X[:, i] # grab a column
# c
space = O[:, :i] # grab a column and then next iteration grab another one plus previous one
# d
projection = vector @ space # dot product of vector * space
# e
vector = vector - np.sum(projection * space, axis=1)
# normalization
# f
norm = np.sqrt(vector @ vector)
# g
vector /= abs(norm) < 1e-8 and 1 or norm
# h
O[:, i] = vector
return 0
# +
# input X
X = np.array([
[1, 1, 2, 0, 1, 1],
[0, 0, 0, 1, 2, 1],
[1, 2, 3, 1, 3, 2],
[1, 0, 1, 0, 1, 1]
], dtype=float)
# creat the vector space (the ground)
O = np.zeros(X.shape); O
# -
# take a peak at X
X
# grab a column - first column in this case & name it vector
vector = X[:, 0]
vector
# +
# grab a column - we will grab a new col + each previous col
# during each successive iteration
# name this space
space = O[:, :0]
space
# +
# multiply vector by space
# in the first iteration it does nothing
projection = vector @ space
projection
# +
# subtract the vector from the sum of projection * space
# set to vector for the next iteration
vector = vector - np.sum(projection * space, axis=1)
vector
# +
# normalize
norm = np.sqrt(vector @ vector)
norm
# +
# divide the vector by the norm scalar
vector /= abs(norm) < 1e-8 and 1 or norm
# see if abs value < some values
# -
vector
# +
# make the first column of O equal to the vector
O[:, 0] = vector
O[:, 0]
# +
# iteration 2a
# grab a column - first column in this case & name it vector
vector = X[:, 1]
vector
# +
# iteration 2b
# grab a column - we will grab a new col + each previous col
# during each successive iteration
# name this space
# on the second iteration it will be the first column
space = O[:, :1]
space
# +
# iteration 2c
# multiply vector by space
# in the first iteration it does nothing
projection = vector @ space
projection
# +
# iteration 2d
# subtract the vector from the sum of projection * space
# set to vector for the next iteration
vector = vector - np.sum(projection * space, axis=1)
vector
# +
# iteration 2e
# normalize
norm = np.sqrt(vector @ vector)
norm
# +
# iteration 2f
# divide the vector by the norm scalar if less than some values?
vector /= abs(norm) < 1e-8 and 1 or norm
# see if abs value < some values
vector
# +
# iteration 2a
# make the first column of O equal to the vector
O[:, 1] = vector
O[:, 1]
# +
# iteration 3
# grab a column - first column in this case & name it vector
vector = X[:, 2]
vector
# +
# iteration 3
# grab a column - we will grab a new col + each previous col
# during each successive iteration
# name this space
# on the second iteration it will be the first column
space = O[:, :2]
space
# -
# +
m,n = A.shape
Q = np.zeros([m,n], dtype=np.float64)
R = np.zeros([n,n], dtype=np.float64)
# come up with a factorization - in QR factorizatoin
# 34:00 m - youtube
# create orthonormal columns of Q that will represent A
# take the first column of A and have it normalized (first column of Q)
# to find the second column of A you need to find
# for each j calculate a single projection (v_j = P_j*a_j)
# where P_j projects onto the space orthogonal to the span of q_1...q_j-1 (previous q's)
# first column of Q is A normalized
# second column of Q: take the second column of A, project it onto the first col of Q, subtract that off and that
# will be your second column of Q once you normalize
# each time you are going through and you want to subtract everything off that has been accounted for
# as you are doing this you are creating these orthonormal columns of Q that represent your columns of A
# the gram-schmidt process is like staring at a column at high noon
# the column is the vector you are inputting to GS
# the floor is vector space
# the shadow is the projection
# perpendicular is orthoganol
# pushing it over until the shadow disppears is the gram-schmidt process
for x,j in enumerate(range(n)):
v = A[:,j] # single projection a vector
print(f'j: {j} {v}')
print('')
#print(x, v)
for i in range(j):
print(f'dot product: {np.dot(Q[:,i], A[:,j])}')
R[i,j] = np.dot(Q[:,i], A[:,j])
print(f'second R: {R[i,j]}')
v -= (R[i,j] * Q[:,i])
print(f'v: {v}')
print('-------')
# +
n = 5
A = np.random.rand(n,n)
Q,R = cgs(A)
# -
np.allclose(A, Q @ R)
# +
# practical QE (QR w/shifts)
# +
# power iteration
# start with a vector b, which maybe an approximation to the dominant eigenvector
# or a random vector
# start with random numbers for b
b_k = np.random.rand(A.shape[1])
# -
# calculate the matrix by vector product Ab - the norm is the max of the resulting vector
# looking to find the Eigenpair (Eigenvalue + Eigenvector)
# will find the dominant eingenvalue
b_kl_norm = np.linalg.norm(b_kl)
# renormalize the vector
b_k = b_kl / b_kl_norm
# +
#
import numpy as np
A = np.array([[0,1],[1,1]])
b = np.array([1,1])
# +
# mutliply A*b
Ab = A.dot(b)
Ab
# +
# normalize using vector max norm L^inf
from numpy import inf
Ab_norm = np.linalg.norm(Ab, inf)
Ab_norm
# -
# find b2
b2 = 0.5 * Ab
b2
# +
# restart
Ab2 = np.linalg.norm(A.dot(b2), inf)
Ab2
# -
0.666 * A.dot(b2)
np.array([1,2]) * 1/2
# +
# how does this converge
A = np.array([[0,1],[1,0]])
b = np.array([-1,1])
eigenvalue = []
for i,x in enumerate(range(100)):
print(i)
Ab = A.dot(b)
Ab_norm = np.linalg.norm(Ab, inf)
print(f'eigenvalue: {Ab_norm}')
b_i = (1/Ab_norm) * Ab
print(f'eigenvector: {b_i}')
print('')
b = b_i
eigenvalue.append(Ab_norm)
# -
import matplotlib.pyplot as plt
plt.plot(eigenvalue)
plt.show()
|
drafts/.ipynb_checkpoints/SVD-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 20191303 - My data exploration lab notes
# # Miniproject - data exploration
#
# [Global Terrorism Database](https://www.kaggle.com/START-UMD/gtd)
#
# Questions:
#
# # Instructions:
#
# 1. Download data set, Global Terrorism Database, from https://www.kaggle.com/START-UMD/gtd
# 2. Take a quick look at the data set. Check what's inside, how the data is structured, and where the data is corrupted (missing values, bad structure, etc).
# 3. Think and create 5 questions to the data. Try to ask yourself what's really interesting in the data set. What's not so obvious. E.g. some trends, patterns, correlations.
# 4. Create a jupyter notebook and use python, numpy, pandas, matplotlib (at least) to provide all the answers to your questions.
# 5. Create a new github repository, and put your jupyter notebook there.
# 6. Create readme.md file as well in your github root directory with all necessary instructions (what is in the repo, what libs are necessary to run the code, where to find data set and where to save it - this is necessary because the dataset is too big for github repo).
# 7. Provide the necessary documentation and introduction in your notebook using markdown language, at least: data source description, data structure, importing process, data processing process.
# 8. Put some data visualization in your notebook. Sometimes it's much easier to present the answer using a chart rather than numbers
# 9. Check if your notebook run smoothly - use 'Reset & Run All' command from the menu. Save it.
# 10. Export the notebook as HTML as well, and save the file in the repo.
# 11. Do not forget to commit/push all the changes to your repo on hithub.
# 12. Smile :) You did a good job!
#
# FAQ:
# 1. Can I take a look at different solution provided at kaggle? Yes, you can. But check more than one solution. Try to understand what the authors are trying to solve, and how could it be used in your project. Try to find really good examples - easy to understand and not so complicated. Remember - you create the notebook as an instruction to someone else! Try to not complicate the process.
# 2. Can I take a look at my friend's solution, that he/she has just put on github? Yes, you can. But it's the smart way of solving the project. I'm sure that you want to be smarter in the next semester - so try to create a better solution and your own one :)
# 3. Jupyter notebook provide R kernel, so can I use R instead? Nope, R sucks. Even if you love R, try to solve the project using Python.
#
# ## Importing data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('globalterrorismdb_0718dist.csv',encoding='ISO-8859-1')
data.head()
# Unfortunately in many records there is sth "Unknown". Let's find out how much is that?
len(data[data['gname']=="Unknown"])/len(data)*100
# There is around 45% "Unknown's" attackers in the database.
# ## 1.Which groups are the most active?
plt.subplots(figsize=(15,10))
no_of_classes = data['gname'].value_counts(sort=True)
no_of_classes_sample = no_of_classes.sample(n=100)
p = sns.barplot(no_of_classes_sample.index,no_of_classes_sample)
p.set(xlabel="Group Name",ylabel="Number of Attacks")
p.set_xticklabels(no_of_classes_sample.index,rotation=90)
plt.show()
# Let's delete the unknown groups from our database
data = data[data['gname']!='Unknown']
data = data[data['city']!='Unknown']
# ## 2.Number of kills from 1970 to 2017 around the world.
data['nkill'].fillna(0)
no_of_kills = data.groupby('iyear')['nkill'].sum()
plt.subplots(figsize=(15,10))
plot1 = sns.barplot(no_of_kills.index,no_of_kills)
plot1.set_xlabel("Year")
plot1.set_ylabel("Number of Kills")
plot1.set_xticklabels(no_of_kills.index,rotation=90)
plt.show()
# We can observe a little sinusoidal patterns in number of deaths. Unfortunately the peak of deaths is in 21st century.
#
# ## 3.Terrorist Activity in every year by Region
plt.subplots(figsize=(15,10))
d = data.groupby(['region_txt','iyear'])['iyear'].count()
d = pd.DataFrame(d)
d.reset_index(level=0,inplace=True)
d.columns = ['Region','Number of Attacks']
d.reset_index(level=0,inplace=True)
d['dummy']=0
color_set = ["#e82727","#ff8649","#ffc549","#fced20","#8fe222","#22e28e","#22e2db","#1f85c4","#361599","#d11fce","#00ff99","#fa97fc"]
t = sns.tsplot(time='iyear',value='Number of Attacks',condition='Region',data=d,unit='dummy',color = sns.color_palette(color_set))
t.set_xlabel("Year")
plt.show()
# Based on this chart. We can assume that Australia is the safest continent to live in. Asia, Middle East and North Africa and Sub-Saharan Africa are the most dangerous regions.
# ## 4.Most dangerous city
# Here are the top 10 dangerous cities in 1970-2017
def attacks_terr(data):
plt.subplots(figsize=(15,10))
d = pd.DataFrame(data.groupby(['city','iyear'])['iyear'].count())
d.columns=['Number_of_Attacks']
d.reset_index(inplace=True)
d2 = d.loc[d['city'].isin(data['city'].value_counts().sort_values(ascending=False).index[:10])]
d2 = d2.pivot(index='city',columns='iyear',values='Number_of_Attacks').fillna(0)
d2 = pd.DataFrame(d2.unstack(level=0))
d2.reset_index(inplace=True)
d2.columns=['Year','City','Number of Attacks']
color_set = ["#e82727","#ff8649","#ffc549","#fced20","#8fe222","#22e28e","#22e2db","#1f85c4","#361599"]
d2['dummy']=0
sns.tsplot(time='Year',value='Number of Attacks',condition='City',data=d2,unit='dummy',color = sns.color_palette(color_set))
plt.show()
attacks_terr(data)
# ## 5. Statistics about 2017
# In 2017 majority of attacks happened within 10 countries.
plt.subplots(figsize=(10,10))
d = pd.DataFrame(data[data['iyear']==2017]['country_txt'].value_counts(sort=True)[:10])
d.reset_index(inplace=True)
d.columns=['country','no_of_attacks']
append = ['Rest of the world',sum(data[data['iyear']==2017]['country_txt'].value_counts(sort=True)) - sum(data[data['iyear']==2017]['country_txt'].value_counts(sort=True)[:10])]
d.loc[len(d)]= append
color_set = ["#e82727","#ff8649","#ffc549","#fced20","#8fe222","#22e28e","#22e2db","#1f85c4","#361599","#d11fce","#00ff99","#fa97fc"]
plt.title("Percentage of attacks in each country")
plt.pie(d['no_of_attacks'],labels=d['country'],colors= color_set,autopct='%.1f%%')
plt.show()
# Another interesting statisic shows that for 2017 more than 85% of deaths due to attacks occured in 10 countries.
plt.subplots(figsize=(10,10))
d = data[data['iyear']==2017]
d2 = pd.DataFrame(d.groupby('country_txt')['nkill'].sum().sort_values(ascending=False)[:10])
d2.reset_index(inplace=True)
append = ['Rest of the world',(d['nkill'].sum()- d2['nkill'].sum())]
d2.loc[len(d2)] = append
color_set = ["#e82727","#ff8649","#ffc549","#fced20","#8fe222","#22e28e","#22e2db","#1f85c4","#361599","#d11fce","#00ff99","#fa97fc"]
plt.title("Percentage of deaths in each country")
plt.pie(d2['nkill'],labels=d2['country_txt'],colors= color_set,autopct='%.1f%%')
plt.show()
|
dataexp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('./home/aistudio/work')
# +
# the following code is mainly used to unzip the required data to specific folder.
# import os,zipfile
# src_file='home.zip'
# zf=zipfile.ZipFile(src_file)
# zf.extractall('./home')
# zf.close
# -
import transformers
print(transformers.__version__)
# ! ls ./home/aistudio/data/data56340
# ! ls ./home/aistudio/work
import torch
from torch import nn
class Config:
def __init__(self):
self.hidden_size=768
self.num_attention_heads=12
self.attention_probs_dropout_prob=0.1
class BertPooler(nn.Module):
def __init__(self,config):
super(BertPooler,self).__init__()
self.dense=nn.Linear(config.hidden_size,config.hidden_size)
self.activation=nn.Tanh()
def forward(self,hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
# hidden_states.shape 为[batch_size, seq_len, hidden_dim]
first_token_tensor=hidden_states[:,0]
pooled_output=self.dense(first_token_tensor)
pooled_output=self.activation(pooled_output)
return pooled_output
config=Config()
bertpooler=BertPooler(config)
input_tensor=torch.ones([8,50,768])
output_tensor=bertpooler(input_tensor)
assert output_tensor.shape==torch.Size([8,768])
# internal function from tokenizer
from typing import List,Optional,Tuple
def build_inputs_with_special_tokens(self,token_ids_0:List[int],token_ids_1:Optional[List[int]]=None)->List[int]:
if token_ids_1 is None:
return [self.cls_token_id]+token_ids_0+[self.sep_token_id]
cls=[self.cls_token_id]
sep=[self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
from transformers import BertTokenizer
tokenizer=BertTokenizer.from_pretrained('./home/aistudio/data/data56340')
inputs_1=tokenizer('欢迎大家来到后厂理工学院学习.')
print(inputs_1)
inputs_2=tokenizer('欢迎大家来到后厂理工学院学习','认识新朋友是一件快乐的事情.')
print(inputs_2)
inputs_3=tokenizer.encode('欢迎大家来到后厂理工学院学习','认识新朋友是一件快乐的事情.')
print(inputs_3)
inputs_4=tokenizer.build_inputs_with_special_tokens(inputs_3)
print(inputs_4)
# +
# 将每个输入的数据句子中15%的概率随机抽取token,在这15%中的80%概论将token替换成[MASK],如上图所示,15%中的另外10%替换成其他token,比如把‘理’换成‘后’,15%中的最后10%保持不变,就是还是‘理’这个token。
# 之所以采用三种不同的方式做mask,是因为后面的fine-tuning阶段并不会做mask的操作,为了减少pre-training和fine-tuning阶段输入分布不一致的问题,所以采用了这种策略。
# MLM output layer definition
class BertLMPredictionHead(nn.Module):
def __init__(self,config):
super().__init__()
self.transform=BertPredictionHeadTransform(config)
# 在nn.Linear操作过程中的权重和bert输入的embedding权重共享
# Embedding层和FC层权重共享,Embedding层中和向量 v 最接近的那一行对应的词,会获得更大的预测概率。
# 实际上,Embedding层和FC层有点像互为逆过程。
self.decoder=nn.Linear(config.hidden_size,config.vocab_size,bias=False)
self.bias=nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias=self.bias
def forward(self,hidden_states):
hidden_states=self.transform(hidden_states)
hidden_states=self.decoder(hidden_states)
return hidden_states
# 只考虑MLM任务,通过BertForMaskedLM完成预训练,loss为CrossEntropyLoss
# 同时考虑MLM和NSP,通过BertForPreTraining完成预训练,loss为CrossEntropyLoss
# as for NSP, self.seq_relationship=nn.Linear(config.hidden_size,2)
# +
# DAPT:领域自适应预训练(Domain-Adaptive Pretraining)
# TAPT:任务自适应预训练(Task-Adaptive Pretraining
# -
# mask token处理
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
def mask_token(inputs:torch.Tensor,tokenizer:BertTokenizer,args)->Tuple[torch.Tensor,torch.Tensor]:
if tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language model. Remove the --mlm flag if you want to use this tokenizer.')
labels=inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix=torch.full(labels.shape,args.mlm_probability)
# filter the exist special token which will not be masked anymore.
special_tokens_mask=[tokenizer.get_special_tokens_mask(val,already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask,dtype=torch.bool),value=0.0)
# filter the exist pad token which will not be masked anymore
if tokenizer.pad_token is not None:
padding_mask=labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask,value=0.0)
# get out the possible masked position with 1.0 which means 15% of all pure tokens will be picked out for relevant masking.
masked_indices=torch.bernoulli(probability_matrix).bool()
# we only need the masked position to compute loss while the other token ids are set to be -100
labels[~masked_indices]=-100
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced=torch.bernoulli(torch.full(labels.shape,0.8)).bool()&masked_indices
inputs[indices_replaced]=tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random=torch.bernoulli(torch.full(labels.shape,0.5)).bool()&masked_indices&~indices_replaced
random_words=torch.randint(len(tokenizer),labels.shape,dtype=torch.long)
inputs[indices_random]=random_words[indices_random]
# The rest of the time(10%) we keep the masked input tokens unchanged
return inputs,labels
from transformers import BertTokenizer
tokenizer=BertTokenizer.from_pretrained('./home/aistudio/data/data56340')
txt = 'AI Studio是基于百度深度学习平台飞桨的人工智能学习与实训社区,提供在线编程环境、免费GPU算力、海量开源算法和开放数据,帮助开发者快速创建和部署模型。'
inputs_all=tokenizer(txt)
pre_inputs=torch.tensor([inputs_all['input_ids']])
print(pre_inputs)
class Args:
def __init__(self):
self.mlm_probability = 0.15
args=Args()
inputs,labels=mask_token(pre_inputs,tokenizer,args)
print(inputs)
print(labels)
# # large scale model training strategy
#
# # gradient accumulation
# # 一般在单卡GPU训练时采用,防止显存溢出
# if args.max_steps>0:
# t_total=args.max_steps
# args.num_train_epochs=args.max_steps//(len(train_dataloader)//args.gradient_accumulation_steps)+1
# else:
# t_total=len(train_dataloader)//args.gradient_accumulation_steps*args.num_train_epochs
#
# # for i, (inputs, labels) in enumerate(training_set):
# # loss = model(inputs, labels) # 计算loss
# # loss = loss / accumulation_steps # Normalize our loss (if averaged)
# # loss.backward() # 反向计算梯度,累加到之前梯度上
# # if (i+1) % accumulation_steps == 0:
# # optimizer.step() # 更新参数
# # model.zero_grad() # 清空梯度
#
# # Nvidia 混合精度工具apex
# if args.fp16:
# try:
# from apex import amp
# except ImportError:
# raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training")
# model,optimizer=amp.initialize(model,optimizer,opt_level=args.fp16_opt_level)
#
# # multi-gpu training (should be after apex fp16 initialization)
# if args.n_gpu>1:
# model=torch.nn.DataParallel(model)
#
# # distributed traing (should be after apex fp16 initialization)
# if args.local_rank != -1:
# model=torch.nn.parallel.DistributedDataParallel(model,device_ids=[args.local_rank],output_device=args.local_rank,find_unused_parameters=True)
#
# # 基于Transformer结构的大规模预训练模型预训练和微调都会采用wramup的方式
# # scheduler =get_linear_schedule_with_warmup(optimizer,num_warmup_steps=args.warmup_steps,num_training_steps=t_total)
# +
# 在预训练模型训练的开始阶段,BERT模型对数据的初始分布理解很少,在第一轮训练的时候,模型的权重会迅速改变。如果一开始学习率很大,非常有可能对数据产生过拟合的学习,后面需要很多轮的训练才能弥补,会花费更多的训练时间。但模型训练一段时间后,模型对数据分布已经有了一定的学习,这时就可以提升学习率,能够使得模型更快的收敛,训练也更加稳定,这个过程就是warmup,学习率是从低逐渐增高的过程。
# 当BERT模型训练一定时间后,尤其是后续快要收敛的时候,如果还是比较大的学习率,比较难以收敛,调低学习率能够更好的微调。
# +
# train process
import os
import tqdm
import logging
logging.basicConfig(level=logging.INFO,format='%(asctime)s-%(name)s-%(levelname)s-%(message)s')
logger=logging.getLogger(__name__)
def train(args,train_dataset,model:PreTrainedModel,tokenizer:BertTokenizer)->Tuple[int,float]:
if args.local_rank in [-1,0]:
tb_writer=SummaryWriter()
args.train_batch_size=args.per_gpu_batch_size*max(1,args.n_gpu)
# 补齐 pad
def collate(examples:List[torch.tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples,batch_first=True)
return pad_sequence(examples,batch_first=True,padding_value=tokenizer.pad_token_id)
train_sampler=RandomSampler(train_dataset) if args.local_rank==-1 else DistributedSampler(train_dataset)
# create dataloader for training
train_dataloader=DataLoader(train_dataset,sampler=train_sampler,batch_size=args.train_batch_size,collate_fn=collate)
# prepare gradient accumulation
if args.max_steps>0:
t_total=args.max_steps
args.num_train_epochs=args.max_steps//(len(train_dataloader)//args.gradient_accumulation_steps)+1
else:
t_total=len(train_dataloader)//args.gradient_accumulation_steps*args.num_train_epochs
# load the model
model=model.module if hasattr(model,'module') else model # take care of distribute/parallel training
model.resize_token_embeddings(len(dataloader))
# Prepare optimizer and schedule(linear warmup and decay)
no_decay=['bias','LayerNorm.weight']
optimizer_grouped_parameters=[{'params':[p for n,p in model.named_parameters() if not any(nd in n for nd in no_decay)],'weight_decay':args.weight_decay},{'params':[p for n,p in model.named_parameters() if any(nd in n for nd in no_decay)],'weight_decay':0.0}]
optimizer=AdamW(optimizer_grouped_parameters,lr=args.learning_rate,eps=args.adam_epsilon)
scheduler=get_linear_schedule_with_warmup(optimizer,num_warmup_steps=args.warmup_steps,num_training_steps=t_total)
# check if saved optimizer or scheduler state exist
if (args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path,'optimizer.pt'))
and os.path.isfile(os.path.join(args.model_name_or_path,'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path,'optimizer.pt')))
scheduler.load_state_dict(torch.laod(os.path.join(args.model_name_or_path,'scheduler.pt')))
# 混合精度训练
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
model,optimizer=amp.initialize(model,optimizer,opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu>1:
model=torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank!=-1:
model=torch.nn.parallel.DistributedDataParallel(model,device_ids=[args.local_rank],output_device=args.local_rank,find_unused_parameters=True)
# display log information before training
logger.info("***** Running training *****")
logger.info('Num examples =%d',len(train_dataset))
logger.info("Num Epochs =%d",args.num_train_epochs)
logger.info("Instantaneous batch size per GPU=%d",args.per_gpu_batch_size)
logger.info("Total train batch size(w.parallel,distribute&accumulation)=%d",
args.train_batch_size*args.gradient_accumulation_steps*
(torch.distributed.get_world_size() if args.local_rank!=-1 else 1),)
logger.info("Gradient Accumulation steps=%d",args.gradient_accumulation_steps)
logger.info("Total optimization steps=%d",t_total)
global_step=0
epochs_trained=0
steps_trained_in_current_epoch=0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to global step of last saved checkpoint from model path
checkpoint_suffix=args.model_name_or_path.split('-')[-1].split('/')[0]
global_step=int(checkpoint_suffix)
epochs_trained=global_step//(len(train_dataloader)//args.gradient_accumulation_steps)
steps_trained_in_current_epoch=global_step%(len(train_dataloader)//args.gradient_accumulation_steps)
logger.info("Continuing training from checkpoint, will skip to saved global step")
logger.info("Continuing training from epcoh %d",epochs_trained)
logger.info("Continuing training from global step %d",global_step)
logger.info("Will skip the first %d step in the first epoch",steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine_tuning")
tr_loss,logging_loss=0.0,0.0
model.zero_grad()
train_iterator=trange(epochs_trained,int(args.num_train_epochs),desc='Epoch',disable=args.local_rank not in [-1,0])
set_seed(args) # Added here for reproducibility
for epoch in train_iterator:
epoch_iterator=tqdm(train_dataloader,desc='Iteration',disable=args.local_rank not in [-1,0])
if args.local_rank!=-1:
train_sampler.set_epoch(epoch)
for step,batch in enumerate(epoch_iterator):
# skip past any already trained step if resuming training
if steps_trained_in_current_epoch >0:
steps_trained_in_current_epoch -= 1
continue
# 对输入数据进行mask处理
inputs,labels=mask_tokens(batch,tokenizer,args) if args.mlm else (batch,batch)
inputs=inputs.to(args.device)
labels=labels.to(args.device)
model.train()
outputs=model(inputs,masked_lm_labels=labels) if args.mlm else model(inputs,labels=labels)
loss=outputs[0]
if args.n_gpu>1:
loss=loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps>1:
loss=loss/args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss,optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss+=loss.item()
if (step+1)%args.gradient_accumulation_steps==0:
if args.fp16:
torch.nn.utils.clip_grad_norm(amp.master_params(optimizer),args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm(model.parameters(),args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step+=1
if agrs.local_rank in [-1,0] and args.logging_steps>0 and global_step%args.logging_steps==0:
# log metrics
if args.local_rank==-1 and args.evaluate_during_training:
# only evaluate when single GPU otherwise metrics may not average well
results=evaluate(args,model,tokenizer)
for key,value in results.items():
tb_writer.add_scaler("eval_{}".format(key),value,global_step)
tb_writer.add_scaler('lr',scheduler.get_lr()[0],global_step)
tb_writer.add_scaler('loss',(tr_loss-logging_loss)/args.logging_steps,global_step)
logging_loss=tr_loss
if args.local_rank in [-1,0] and args.save_steps>0 and global_step%args.save_steps==0:
checkpoint_predix='checkpoint'
# save model check point
output_dir=os.path.join(args.outout_dir,"{}-{}".format(checkpoint_prefix,global_step))
os.makedirs(output_dir,exist_ok=True)
model_to_save=(model.module if hasattr(model,"module") else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args,os.path.join(output_dir,'training_args.bin'))
logger.info('Saving model checkpoint to %s',output_dir)
_rotate_checkpoints(args,checkpoint_prefix)
torch.save(optimizer.state_dict(),os.path.join(output_dir,'optimizer.pt'))
torch.save(scheduler.state_dict(),os.path.join(output_dir,'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s',output_dir)
if args.max_steps>0 and global_step>args.max_steps:
epoch_iterator.close()
break
if args.max_steps>0 and global_step>args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1,0]:
tb_writer.close()
return global_step,tr_loss/global_step
# -
# ```python
# from tensorboardX import SummaryWriter
# writer = SummaryWriter('runs/scalar_example')
# for i in range(10):
# writer.add_scalar('quadratic', i**2, global_step=i)
# writer.add_scalar('exponential', 2**i, global_step=i)
# ```
from transformers import get_linear_schedule_with_warmup
# ## Bert pretraining skills study
# We study the skills around the **MLM** and **NSP** and learn how to do MLM and NSP, learn how to get **bertpool** output,learn how to use **BertTokenizer**, replicate the function of **mask token** operation, go through all the process of **training**.
#
# ### Main Content
# - How to create and use BertPooler
# - How to use BertTokenizer
# - Reference code for understanding BertForMaskedLM
# - Introduction about DAPT and TAPT
# - How to mask token for MLM
# - Large scale model training strategy
# - Learn the whole training code and process
#
# ### Packages
# - torch
# - transformers
# - typing
# - apex
# - logging
# - tensorboardX
# - tqdm
#
# ### Important functions
# - nn.Module
# - nn.Linear()
# - nn.parameter()
# - torch.full()
# - torch.eq()
# - torch.tensor(dtype=torch.bool)
# - torch.masked_fill()
# - torch.bernoulli()
# - torch.randint()
# - torch.nn.DataParallel()
# - torch.nn.parallel.DistributedParallel()
# - torch.utils.data.DataLoader()
# - torch.nn.utils.rnn.pad_sequence()
# - hasattr()
# - AdamW()
# - logging.getLogger().info()
# - logging.basicConfig()
# - SummaryWriter().add_scaler()
# - SummaryWriter().close()
# - os.path.isfile()
# - os.path.join()
# - for step,batch in enumerate(tqdm(dataloader))
# - trange means tqdm(range())
# - epoch_iterator.close()
# - loss.backward()
# - torch.nn.utils.clip_grad_norm_()
# - optimizer.step()
# - scheduler.step()
# - model.zero_grad()
# - os.makedirs(exist_ok=True)
# - model.save_pretrained()
# - tokenizer.save_pretrained()
# - torch.save(optimizer.state_dict(),filedir)
# - get_linear_schedule_with_warmup(optimizer,num_warmup_steps,num_training_steps)
#
# ### Special code
# ```python
# # class BertLMPredictionHead segment
# self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# self.decoder.bias = self.bias
#
# # mask token segment
# probability_matrix = torch.full(labels.shape, args.mlm_probability)
# special_tokens_mask = [
# tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
# ]
# probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
# if tokenizer._pad_token is not None:
# padding_mask = labels.eq(tokenizer.pad_token_id)
# probability_matrix.masked_fill_(padding_mask, value=0.0)
# masked_indices = torch.bernoulli(probability_matrix).bool()
# labels[~masked_indices] = -100 # We only compute loss on masked tokens
#
# # train process
#
# # 补齐pad and create dataloader
# def collate(examples: List[torch.Tensor]):
# if tokenizer._pad_token is None:
# return pad_sequence(examples, batch_first=True)
# return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
#
# train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
# train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate
# )
#
# # Prepare optimizer and schedule (linear warmup and decay)
# no_decay = ["bias", "LayerNorm.weight"]
# optimizer_grouped_parameters = [
# {
# "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
# "weight_decay": args.weight_decay,
# },
# {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
# ]
# optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# scheduler = get_linear_schedule_with_warmup(
# optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
# )
#
# # Load in optimizer and scheduler states
# optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
# scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
#
# ```
|
bert_pretraining/Bert_Pretraining.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/riaasindh12/LetsUpgrade-python-B7/blob/master/RiyaAssignday5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="OMZ-Y_F5mzxo" colab_type="code" colab={}
#ASSIGNMENT OF DAY 5
#PYTHON ESSENTIALS
#Batch 7
# + id="EgU1zOix6gX5" colab_type="code" colab={}
#ASSIGNMENT NO. 2
# + id="_5Bgv8nA6FBD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="458ec799-024d-4030-dd8c-33c8327ebfe9"
def prime(n):
if n==1 :
return False
elif(n==2):
return True
else:
for x in range(2,n):
if(n%x==0):
return False
else:
return True
prime(38)
# + id="XTDvm32brl6s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5fd1a91d-9c4c-4899-aa41-642af4fb1853"
prime(39)
# + id="_V5AYxj1oDJ0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="9ef788ae-7c0a-42eb-e87d-792afc485764"
lst=list(range(2500))
lst_prime=[]
for item in lst:
if prime(item):
lst_prime.append(item)
fltr=filter(prime,lst)
print(list(fltr))
# + id="0Q4F1ZTWr6HK" colab_type="code" colab={}
#ASSIGNMENT No. 3
#(Using lambda function)
# + id="VmfuTjEWr63Y" colab_type="code" colab={}
lst=['hey this is riya, I am in delhi ']
# + id="HH5nznIPspPN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="87704822-cdaa-426e-d864-1c71c2dc835f"
lst
# + id="A_4Nx_sHuViU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="2991ef97-3e49-4ad7-8eee-e610910d3793"
lst_new=map(lambda x:x.title(),lst)
list(lst_new)
# + id="68An7hdt0ahK" colab_type="code" colab={}
#ASSIGNMENT No. 1
# + id="ChxNrzJj0bPV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="f7c80ea0-9831-44b5-d1af-92ba6a5c4793"
check_list=[1,5,6,1,2,2,3,5]
sub_list=[1,1,5]
print("list :" +str(check_list))
print("Sublist :" +str(sub_list))
flag=0
if (set(sub_list).issubset(set(check_list))):
flag=1
if (flag):
print(" Its a match ")
else:
print("Its gone ")
|
RiyaAssignday5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='https://www.anadronestarting.com/wp-content/uploads/intel-main_opt.png' width=50%>
#
# # 모바일넷을 이용한 이미지분류 - MNIST
# <font size=5><b>(Image Classification using Mobilenet)<b></font>
#
# <div align='right'>성 민 석<br>(Minsuk Sung)</div>
#
# <img src='https://upload.wikimedia.org/wikipedia/commons/thumb/2/27/MnistExamples.png/440px-MnistExamples.png' width=60%>
#
# ---
# + [markdown] toc=true
# <h1>강의목차<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#필요한-라이브러리-및-옵션" data-toc-modified-id="필요한-라이브러리-및-옵션-1"><span class="toc-item-num">1 </span>필요한 라이브러리 및 옵션</a></span><ul class="toc-item"><li><span><a href="#기본-라이브러리(Library)" data-toc-modified-id="기본-라이브러리(Library)-1.1"><span class="toc-item-num">1.1 </span>기본 라이브러리(Library)</a></span></li><li><span><a href="#옵션(Option)" data-toc-modified-id="옵션(Option)-1.2"><span class="toc-item-num">1.2 </span>옵션(Option)</a></span></li></ul></li><li><span><a href="#예제---Fashion-MNIST" data-toc-modified-id="예제---Fashion-MNIST-2"><span class="toc-item-num">2 </span>예제 - Fashion MNIST</a></span><ul class="toc-item"><li><span><a href="#Fashion-MNIST-데이터-불러오기" data-toc-modified-id="Fashion-MNIST-데이터-불러오기-2.1"><span class="toc-item-num">2.1 </span>Fashion MNIST 데이터 불러오기</a></span></li><li><span><a href="#Fashion-MNIST-데이터-형태-확인하기" data-toc-modified-id="Fashion-MNIST-데이터-형태-확인하기-2.2"><span class="toc-item-num">2.2 </span>Fashion MNIST 데이터 형태 확인하기</a></span><ul class="toc-item"><li><span><a href="#Train-데이터셋" data-toc-modified-id="Train-데이터셋-2.2.1"><span class="toc-item-num">2.2.1 </span>Train 데이터셋</a></span></li><li><span><a href="#Validation-데이터셋" data-toc-modified-id="Validation-데이터셋-2.2.2"><span class="toc-item-num">2.2.2 </span>Validation 데이터셋</a></span></li><li><span><a href="#Test-데이터셋" data-toc-modified-id="Test-데이터셋-2.2.3"><span class="toc-item-num">2.2.3 </span>Test 데이터셋</a></span></li></ul></li><li><span><a href="#데이터-시각화하기" data-toc-modified-id="데이터-시각화하기-2.3"><span class="toc-item-num">2.3 </span>데이터 시각화하기</a></span></li><li><span><a href="#데이터-전처리" data-toc-modified-id="데이터-전처리-2.4"><span class="toc-item-num">2.4 </span>데이터 전처리</a></span></li><li><span><a href="#모델링" data-toc-modified-id="모델링-2.5"><span class="toc-item-num">2.5 </span>모델링</a></span><ul class="toc-item"><li><span><a href="#모델-구성" data-toc-modified-id="모델-구성-2.5.1"><span class="toc-item-num">2.5.1 </span>모델 구성</a></span></li><li><span><a href="#모델-컴파일" data-toc-modified-id="모델-컴파일-2.5.2"><span class="toc-item-num">2.5.2 </span>모델 컴파일</a></span></li><li><span><a href="#모델-확인" data-toc-modified-id="모델-확인-2.5.3"><span class="toc-item-num">2.5.3 </span>모델 확인</a></span></li></ul></li><li><span><a href="#신경망-모델-학습" data-toc-modified-id="신경망-모델-학습-2.6"><span class="toc-item-num">2.6 </span>신경망 모델 학습</a></span></li><li><span><a href="#신경망-모델-평가" data-toc-modified-id="신경망-모델-평가-2.7"><span class="toc-item-num">2.7 </span>신경망 모델 평가</a></span></li><li><span><a href="#신경망-모델-검증하기" data-toc-modified-id="신경망-모델-검증하기-2.8"><span class="toc-item-num">2.8 </span>신경망 모델 검증하기</a></span></li><li><span><a href="#다음-예제에서는" data-toc-modified-id="다음-예제에서는-2.9"><span class="toc-item-num">2.9 </span>다음 예제에서는</a></span></li></ul></li><li><span><a href="#참고" data-toc-modified-id="참고-3"><span class="toc-item-num">3 </span>참고</a></span></li></ul></div>
# -
# ## 필요한 라이브러리 및 옵션
#
# ### 기본 라이브러리(Library)
import os
import sys
import glob
import random
import warnings
import itertools
from tqdm import tqdm
from pathlib import Path
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import cv2
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from IPython.display import SVG
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, ElementTree
import keras
import tensorflow as tf
from tensorflow.keras.datasets import mnist,cifar10
from tensorflow.keras.preprocessing.image import load_img,img_to_array,ImageDataGenerator
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, Conv2D,GlobalAveragePooling2D
from tensorflow.keras.optimizers import RMSprop, Adam
from tensorflow.keras.utils import to_categorical,plot_model
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.callbacks import Callback
from tensorflow.python.client import device_lib
# ### 옵션(Option)
# +
os.environ["CUDA_VISIBLE_DEVICES"]="0"
warnings.filterwarnings(action='ignore')
warnings.filterwarnings(action='default')
# %matplotlib inline
print(device_lib.list_local_devices())
keras.backend.tensorflow_backend._get_available_gpus()
# -
# ---
#
# ## 예제 - Fashion MNIST
#
# <img src='https://markusthill.github.io/images/2017-10-12-zalandos-fashion-mnist-dataset/zalando10x10.jpeg' width=70%>
#
# Fashion-MNIST는 60,000 개의 학습 세트와 10,000 개의 테스트 세트로 구성된 Zalando의 기사 이미지 데이터 세트입니다. 각 예제는 28 개의 28 개의 회색조 이미지이며 10 개의 클래스 레이블과 연결되어 있습니다. Fashion-MNIST는 기계 학습 알고리즘 벤치마킹을 위해 원본 MNIST 데이터 세트를 직접 대체하는 역할을합니다.
#
# 출처 : [zalando research](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/)
# ### Fashion MNIST 데이터 불러오기
# +
train = pd.read_csv('./data/fashion-mnist/fashion-mnist_train.csv')
test = pd.read_csv('./data/fashion-mnist/fashion-mnist_test.csv')
X_train = np.array(train.iloc[:, 1:])
y_train = np.array(train.iloc[:, 0])
X_test = np.array(test.iloc[:, 1:])
y_test = np.array(test.iloc[:, 0])
# -
# Train / Validation 데이터 분리하기
# Train 데이터를 Train / Validation 으로 나누어줌으로써 Overfitting 여부를 파악
X_train, X_valid, y_train, y_valid = train_test_split(
X_train, y_train, test_size=0.2, random_state=13)
# Train / Test 데이터의 크기 확인
print('Fashion MNIST Train 데이터의 크기 : {}'.format(len(X_train)))
print('Fashion MNIST Validation 데이터의 크기 : {}'.format(len(X_valid)))
print('Fashion MNIST Test 데이터의 크기 : {}'.format(len(X_test)))
# ### Fashion MNIST 데이터 형태 확인하기
# #### Train 데이터셋
print("X_train Shape : ",X_train.shape) # 28*28짜리 크기의 숫자 이미지 48000개
print("y_train Shape : ",y_train.shape) # 각 이미지별 레이블 48000개
# #### Validation 데이터셋
print("X_val Shape : ",X_valid.shape) # 28*28짜리 크기의 숫자 이미지 12000개
print("y_val Shape : ",y_valid.shape) # 각 이미지별 레이블 12000개
# #### Test 데이터셋
print("X_test Shape : ",X_test.shape) # 28*28짜리 크기의 숫자 이미지 10000개
print("y_test Shape : ",y_test.shape) # 각 이미지별 레이블 10000개
# ### 데이터 시각화하기
#
# 그러면 이제 구체적으로 Fashion MNIST의 Train 데이터셋의 첫번재 데이터를 확인해보자.
X_train[0].shape
plt.imshow(X_train[0].reshape(28,28), cmap='Greys', interpolation='nearest')
plt.show()
# 50000개의 중 랜덤으로 한번 살펴보자
def get_classlabel(code):
label = {0: 'T-Shirt',
1: 'Pants',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandle',
6: 'Shirt',
7: 'Sneakers',
8: 'Bag',
9: 'AnkleBoots'}
return label[code]
# +
# 재연성을 위하여 랜덤시드 고정
np.random.seed(1234)
# random 함수를 통해서 임의의 16개 데이터 가져오기
samples = np.random.randint(0,len(X_train)+1,size=16)
# Fashion MNIST를 그릴 Figure 준비
plt.figure(figsize=(12,8))
# 16개의 이미지 시각화
for count, n in enumerate(samples,start=1):
plt.subplot(4, 4, count)
plt.imshow(X_train[n].reshape(28,28), cmap='Greys', interpolation='nearest')
label_name = "Label:" + str(get_classlabel(y_train[n]))
plt.title(label_name)
plt.tight_layout()
plt.show()
# -
# ### 데이터 전처리
# +
# 데이터 크기 조정(Data Reshape)
X_train = X_train.reshape(X_train.shape[0],28,28,1)
X_valid = X_valid.reshape(X_valid.shape[0],28,28,1)
X_test = X_test.reshape(X_test.shape[0],28,28,1)
# 데이터 포맷 바꾸기
# 정수(int)인 데이터에서 실수(float)으로 변환
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
X_test = X_test.astype('float32')
# 데이터 정규화(Data Regularization)
# 이 과정을 통해서 추후 학습할 신경망이 조금 더 학습이 원할히 될 수 있게함
X_train = X_train / 255
X_valid = X_valid / 255
X_test = X_test / 255
# 원-핫 인코딩(One Hot Encoding)
# Keras의 to_categorical함수를 통해서 모든 Train 데이터의 레이블을 벡터화(Vectorize)
# ex) [3] -> [0 0 0 1 0 0 0 0 0 0]
y_train = to_categorical(y_train, 10)
y_valid = to_categorical(y_valid,10)
y_test = to_categorical(y_test, 10)
# -
# ### 모델링
#
# 간단하게 CNN 모델을 이용하여, MNIST 데이터를 분류해보자.
INPUT_SHAPE = (28,28,1)
KERNEL_SIZE = (3, 3)
DROP_RATE = 0.3
# #### 모델 구성
# Keras에서는 add함수를 통해서 레고 블록을 조립하듯이, 원하는 Layer를 추가 가능함
# 여기서는 Conv Layer와 MaxPooling Layer 그리고 Dense Layer를 이용하여 모델을 구성하도록 하자
# +
model = Sequential()
# 1번쨰 ConvNet
# Conv Layer
model.add(Conv2D(filters=64,
kernel_size=KERNEL_SIZE,
activation='relu',
input_shape=INPUT_SHAPE
))
# Max Pooling Layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# 2번쨰 Conv Layer
# Conv Layer
model.add(Conv2D(filters=128,
kernel_size=KERNEL_SIZE,
activation='relu',
))
# Max Pooling Layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# 분류를 하기 위해 shape을 1차원으로 맞추는 Layer
model.add(Flatten())
model.add(Dense(units=1024, activation='relu'))
model.add(Dropout(rate=DROP_RATE))
# 마지막 분류를 하기 위한 Fully Connected Layer
# One Hot Encoding을 했으므로 activation function은 softmax로 처리함
model.add(Dense(units=10, activation='softmax'))
# -
# #### 모델 컴파일
model.compile(loss=categorical_crossentropy,
optimizer=Adam(),
metrics=['acc'])
# #### 모델 확인
# +
model.summary()
plot_model(model, to_file='./img/model/fashion_mnist_cnn_model.png', show_shapes=True)
# -
# ### 신경망 모델 학습
#
# Keras의 `fit`의 메소드를 통해서 간단하게 학습가능하다. 이번 예제에서는 여기서는 10번의 epoch만으로 학습을 진행하도록 한다. 그리고 앞서 준비한 검증 데이터(Validation Set)을 통해서 신경망의 오버피팅 여부를 판단하도록 하자.
EPOCHS = 10
BATCH_SIZE = 128
history = model.fit(X_train, # 학습할 데이터
y_train, # 학습할 레이블
epochs=EPOCHS, # 전체 학습할 횟수
batch_size=BATCH_SIZE, # 배치 사이즈
use_multiprocessing=True,
validation_data=(X_valid, y_valid) # 검증 데이터로 확인
)
# 학습된 모델 weight 저장
model.save('./bin/fashion_mnist_cnn.h5')
# ### 신경망 모델 평가
#
# 위에서 학습된 결과를 시각화 해보자
# +
# Train 데이터로 평가하기
train_loss, train_acc = model.evaluate(X_train,y_train,verbose=0)
print('Train Loss : {}'.format(train_loss))
print('Train Accuracy : {}'.format(train_acc))
# Validation 데이터로 평가하기
valid_loss, valid_acc = model.evaluate(X_valid,y_valid,verbose=0)
print('Validation Loss : {}'.format(valid_loss))
print('Validation Accuracy : {}'.format(valid_acc))
# +
# Train / Validation 데이터에 대해서 Loss 시각화
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(loss)+1)
plt.plot(epochs,loss,label='Training Loss')
plt.plot(epochs,val_loss,label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
plt.show()
# +
# Train / Validation 데이터에 대해서 Accuracy 시각화
acc = history.history['acc']
val_acc = history.history['val_acc']
epochs = range(1,len(loss)+1)
plt.plot(epochs,acc,label='Training Accuarcy')
plt.plot(epochs,val_acc,label='Validation Accuarcy')
plt.title('Training and Validation Accuarcy')
plt.xlabel('Epochs')
plt.ylabel('Accuarcy')
plt.legend()
plt.grid(True)
plt.show()
# -
# ### 신경망 모델 검증하기
#
# MNIST 데이터셋을 학습한 모델을 놀랍게도 정확도 99% 이상의 성능을 보인다.
# 그렇다면 이러한 신경망이 학습에 전혀 사용되지 않는 데이터인 Test 데이터셋에서는 어떤 결과를 보이는지 살펴보자
test_loss, test_acc = model.evaluate(X_test,y_test,verbose=0)
print('Validation Loss : {}'.format(test_loss))
print('Validation Accuracy : {}'.format(test_acc))
# Test 데이터에 대해서도 99%의 정확도를 보여준다.
#
# 그러면 과연 학습된 신경망이 어떠한 데이터를 잘 못 예측했는지 살펴보자.
# +
# 재연성을 위하여 랜덤시드 고정
random.seed('intel')
# TEST 데이터 예측하기
predicted_result = model.predict(X_test)
predicted_labels = np.argmax(predicted_result, axis=1)
# TEST 데이터의 정답 가져오기
test_labels = np.argmax(y_test, axis=1)
# 잘못 예측한 데이터 찾기
wrong_result = []
for n in range(0, len(test_labels)):
if predicted_labels[n] != test_labels[n]:
wrong_result.append(n)
# random 함수를 통해서 임의의 16개 데이터 가져오기
samples = random.choices(population=wrong_result, k=16)
# MNIST를 그릴 Figure 준비
plt.figure(figsize=(12,8))
# 16개의 이미지 시각화
for count, n in enumerate(samples,start=1):
plt.subplot(4, 4, count)
plt.imshow(X_test[n].reshape(28, 28), cmap='Greys', interpolation='nearest')
tmp = "Label:" + str(get_classlabel(test_labels[n])) + ", Prediction:" + str(get_classlabel(predicted_labels[n]))
plt.title(tmp)
plt.tight_layout()
plt.savefig('./img/result/fashion_mnist_wrong_result.png')
plt.show()
# -
# ### 다음 예제에서는
#
# CNN을 통해서 MNIST와 같은 간단한 예제를 분류할 수 있었다. 다음 예제에서는 RGB 이미지를 한번 분류하는 예제를 더 깊은 CNN으로 수행해보도록 하자.
# ---
#
# ## 참고
#
# - Intel
# - https://www.intel.co.kr/
# - Intel OpenVINO
# - https://software.intel.com/en-us/openvino-toolkit
# - MNIST
# - http://yann.lecun.com/exdb/mnist/
# - https://pinkwink.kr/1121
# - CIFAR10
# - https://www.cs.toronto.edu/~kriz/cifar.html
# - ImageNet
# - http://www.image-net.org
# - Tensorflow
# - https://www.tensorflow.org/?hl=ko
# - Keras
# - https://keras.io/
# - https://tensorflow.blog/2019/03/06/tensorflow-2-0-keras-api-overview/
# - https://tykimos.github.io/2017/02/22/Integrating_Keras_and_TensorFlow/
# - https://tykimos.github.io/2017/03/08/CNN_Getting_Started/
# - https://raw.githubusercontent.com/keras-team/keras-docs-ko/master/sources/why-use-keras.md
# - Keras to Caffe
# - https://github.com/uhfband/keras2caffe
# - http://www.deepvisionconsulting.com/from-keras-to-caffe/
# - Fully Connected Layer
# - https://sonofgodcom.wordpress.com/2018/12/31/cnn%EC%9D%84-%EC%9D%B4%ED%95%B4%ED%95%B4%EB%B3%B4%EC%9E%90-fully-connected-layer%EB%8A%94-%EB%AD%94%EA%B0%80/
# - Convultional Nueral Network
# - http://aikorea.org/cs231n/convolutional-networks/
# - http://cs231n.stanford.edu/
# - CNN Models
# - https://ratsgo.github.io/deep%20learning/2017/10/09/CNNs/
#
# - VOC2012
# - https://blog.godatadriven.com/rod-keras-multi-label
# - https://gist.github.com/rragundez/ae3a17428bfec631d1b35dcdc6296a85#file-multi-label_classification_with_keras_imagedatagenerator-ipynbhttps://fairyonice.github.io/Part_5_Object_Detection_with_Yolo_using_VOC_2012_data_training.html
# - http://research.sualab.com/introduction/2017/11/29/image-recognition-overview-1.html
|
Lecture03 - Fashion MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lostapple9/Machine-Learning-1/blob/main/Natural_Language_Processing_with_RNNs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="K9tEz1rbS7uy"
# https://www.youtube.com/watch?v=tPYj3fFJGjk&list=LL&index=13&t=16948s&ab_channel=freeCodeCamp.org
#
# https://colab.research.google.com/drive/1ysEKrw_LE2jMndo1snrZUh5w87LQsCxk#forceEdit=true&sandboxMode=true&scrollTo=01BJLcGb4ZqK
#
# recurrent neural network (rnn)
# + colab={"base_uri": "https://localhost:8080/"} id="wpU9sLw83pAD" outputId="46dab440-a59d-42f2-8f3f-3b91d00a537d"
vocab = {} # maps word to integer representing it
word_encoding = 1
def bag_of_words(text):
global word_encoding
words = text.lower().split(" ") # create a list of all of the words in the text, well assume there is no grammar in our text for this example
bag = {} # stores all of the encodings and their frequency
for word in words:
if word in vocab:
encoding = vocab[word] # get encoding from vocab
else:
vocab[word] = word_encoding
encoding = word_encoding
word_encoding += 1
if encoding in bag:
bag[encoding] += 1
else:
bag[encoding] = 1
return bag
text = "this is a test to see if this test will work is is test a a"
bag = bag_of_words(text)
print(bag)
print(vocab)
# + id="PItrK646qwG5" colab={"base_uri": "https://localhost:8080/"} outputId="3725e16c-8a5c-403c-c241-b36aab0c7c52"
positive_review = "I thought the movie was going to be bad but it was actually amazing"
negative_review = "I thought the movie was going to be amazing but it was actually bad"
pos_bag = bag_of_words(positive_review)
neg_bag = bag_of_words(negative_review)
print("Positive:", pos_bag)
print("Negative:", neg_bag)
# + colab={"base_uri": "https://localhost:8080/"} id="_wjZwXGJ430V" outputId="5ddd38b2-aa76-4fcf-fa51-d131f782996c"
vocab = {}
word_encoding = 1
def one_hot_encoding(text):
global word_encoding
words = text.lower().split(" ")
encoding = []
for word in words:
if word in vocab:
code = vocab[word]
encoding.append(code)
else:
vocab[word] = word_encoding
encoding.append(word_encoding)
word_encoding += 1
return encoding
text = "this is a test to see if this test will work is is test a a"
encoding = one_hot_encoding(text)
print(encoding)
print(vocab)
# + id="MxDuNzxqqxob" colab={"base_uri": "https://localhost:8080/"} outputId="08b1a94c-f4ad-45a6-e1da-d7db94719100"
positive_review = "I thought the movie was going to be bad but it was actually amazing"
negative_review = "I thought the movie was going to be amazing but it was actually bad"
pos_encode = one_hot_encoding(positive_review)
neg_encode = one_hot_encoding(negative_review)
print("Positive:", pos_encode)
print("Negative:", neg_encode)
# + id="QvaEKeBoqxuQ"
# + id="cdxcoDUNqxyj"
# + id="1WcT-TREqx1r" colab={"base_uri": "https://localhost:8080/"} outputId="ffead85d-9242-4102-c8b3-a05ce8454025"
# %tensorflow_version 2.x # this line is not required unless you are in a notebook
from keras.datasets import imdb
from keras.preprocessing import sequence
import keras
import tensorflow as tf
import os
import numpy as np
VOCAB_SIZE = 88584
MAXLEN = 250
BATCH_SIZE = 64
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = VOCAB_SIZE)
# + id="OiHuebxsqx4f" colab={"base_uri": "https://localhost:8080/"} outputId="2dc4b28e-94d8-4f56-b4f5-d710fbddd6db"
# Lets look at one review
train_data[1]
len(train_data[1])
# + id="XGAyDb7Hqx7p" colab={"base_uri": "https://localhost:8080/"} outputId="6c94b473-d432-4f68-e842-5723d6fd0238"
train_data = sequence.pad_sequences(train_data, MAXLEN)
test_data = sequence.pad_sequences(test_data, MAXLEN)
len(train_data[1])
train_data
# + id="HRGjOJlOqx-T"
model = tf.keras.Sequential([
tf.keras.layers.Embedding(VOCAB_SIZE, 32),
tf.keras.layers.LSTM(32),
tf.keras.layers.Dense(1, activation="sigmoid")
])
# + id="F4TLdxeFqyBD" colab={"base_uri": "https://localhost:8080/"} outputId="da922e34-7a9c-491e-de8a-ef9b40d21153"
model.summary()
# + id="YWWjW0BwqyEM" colab={"base_uri": "https://localhost:8080/"} outputId="81be2682-2fcd-4c1a-dec7-61a4ee4c684e"
model.compile(loss="binary_crossentropy",optimizer="rmsprop",metrics=['acc'])
history = model.fit(train_data, train_labels, epochs=10, validation_split=0.99)
# + id="iReCTW8xqyG3" colab={"base_uri": "https://localhost:8080/"} outputId="553782d8-5de2-4279-8ccd-33d9f838f6b5"
results = model.evaluate(test_data, test_labels)
print(results)
# + id="cIWq1B3KqyJm" colab={"base_uri": "https://localhost:8080/"} outputId="2feaa23e-7984-4e58-b4a2-57a286acbd72"
word_index = imdb.get_word_index()
def encode_text(text):
tokens = keras.preprocessing.text.text_to_word_sequence(text)
tokens = [word_index[word] if word in word_index else 0 for word in tokens]
return sequence.pad_sequences([tokens], MAXLEN)[0]
text = "Death is the solution to all problems. No man - no problem"
encoded = encode_text(text)
print(encoded)
# + id="rbzX9j87rUS3" colab={"base_uri": "https://localhost:8080/"} outputId="47239cfb-a4bc-4cef-c20e-04d6033f2156"
# while were at it lets make a decode function
reverse_word_index = {value: key for (key, value) in word_index.items()}
def decode_integers(integers):
PAD = 0
text = ""
for num in integers:
if num != PAD:
text += reverse_word_index[num] + " "
return text[:-1]
print(decode_integers(encoded))
# + id="wwXHae9drUXo" colab={"base_uri": "https://localhost:8080/"} outputId="652d58d0-b76f-4ca9-d13d-b43acaf24a5e"
# now time to make a prediction
def predict(text):
encoded_text = encode_text(text)
pred = np.zeros((1,250))
pred[0] = encoded_text
result = model.predict(pred)
print(result[0])
positive_review = "That movie was! really loved it and would great watch it again because it was amazingly great"
predict(positive_review)
negative_review = "that movie really sucked. I hated it and wouldn't watch it again. Was one of the worst things I've ever watched"
predict(negative_review)
# + id="vr6H5eddrUab"
# + [markdown] id="ky_BIswnCR3y"
# ##RNN Play Generator
#
# Now time for one of the coolest examples we've seen so far. We are going to use a RNN to generate a play. We will simply show the RNN an example of something we want it to recreate and it will learn how to write a version of it on its own. We'll do this using a character predictive model that will take as input a variable length sequence and predict the next character. We can use the model many times in a row with the output from the last predicition as the input for the next call to generate a sequence.
#
#
# *This guide is based on the following: https://www.tensorflow.org/tutorials/text/text_generation*
# + id="p3Wa5FXGrUdc"
# + id="KPM-AsB2rUgE" colab={"base_uri": "https://localhost:8080/"} outputId="156d2cab-b804-4532-8b6c-c7f876793a61"
# %tensorflow_version 2.x # this line is not required unless you are in a notebook
from keras.preprocessing import sequence
import keras
import tensorflow as tf
import os
import numpy as np
# + id="h3RFujxXrUio" colab={"base_uri": "https://localhost:8080/"} outputId="6ff83a6c-14f5-4842-a81d-5b3190d3d6e6"
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
# + id="dCGSO5rGBjZ3"
from google.colab import files
path_to_file = list(files.upload().keys())[0]
# + colab={"base_uri": "https://localhost:8080/"} id="guhemVXeBjfE" outputId="8bb65782-7838-426c-9488-477d8fc7ecce"
# Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# length of text is the number of characters in it
print ('Length of text: {} characters'.format(len(text)))
# + colab={"base_uri": "https://localhost:8080/"} id="o3a7fyOEBjm0" outputId="e5a9bf63-76b8-4a22-a32e-9cbf2edf7d24"
# Take a look at the first 250 characters in text
print(text[:250])
# + id="Mi5RrrF5Bjp7"
vocab = sorted(set(text))
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
def text_to_int(text):
return np.array([char2idx[c] for c in text])
text_as_int = text_to_int(text)
# + colab={"base_uri": "https://localhost:8080/"} id="XOvf-EufBjxP" outputId="cb53e70d-9a38-44a9-fd61-24c04e9346d7"
# lets look at how part of our text is encoded
print("Text:", text[:13])
print("Encoded:", text_to_int(text[:13]))
# + colab={"base_uri": "https://localhost:8080/"} id="u7ASfxe8Bj0P" outputId="66fb774a-ba09-4f81-dbce-c29aec1df22e"
def int_to_text(ints):
try:
ints = ints.numpy()
except:
pass
return ''.join(idx2char[ints])
print(int_to_text(text_as_int[:13]))
# + id="u40aBxQLBplw"
seq_length = 100 # length of sequence for a training example
examples_per_epoch = len(text)//(seq_length+1)
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
# + id="TRu4IsQPBppN"
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
# + id="TbWxOlCcBps3"
def split_input_target(chunk): # for the example: hello
input_text = chunk[:-1] # hell
target_text = chunk[1:] # ello
return input_text, target_text # hell, ello
dataset = sequences.map(split_input_target) # we use map to apply the above function to every entry
# + colab={"base_uri": "https://localhost:8080/"} id="PyctR9EUBpvx" outputId="e61036dc-5098-4bbb-dd4c-cedf7bffdc49"
for x, y in dataset.take(2):
print("\n\nEXAMPLE\n")
print("INPUT")
print(int_to_text(x))
print("\nOUTPUT")
print(int_to_text(y))
# + id="11fUglpdBpys"
BATCH_SIZE = 64
VOCAB_SIZE = len(vocab) # vocab is number of unique characters
EMBEDDING_DIM = 256
RNN_UNITS = 1024
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
data = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# + colab={"base_uri": "https://localhost:8080/"} id="IxDEUn6QBu1G" outputId="342ee2bf-a232-4d36-9546-4d485b27c07b"
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(VOCAB_SIZE,EMBEDDING_DIM, RNN_UNITS, BATCH_SIZE)
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="0EVPi3ZtBu3v" outputId="de4091c2-97a1-41b7-bb9e-61b178777a33"
for input_example_batch, target_example_batch in data.take(1):
example_batch_predictions = model(input_example_batch) # ask our model for a prediction on our first batch of training data (64 entries)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)") # print out the output shape
# + colab={"base_uri": "https://localhost:8080/"} id="z0djzCg5Bu6c" outputId="4d29a0c4-cad4-4e68-a202-4067913995ac"
# we can see that the predicition is an array of 64 arrays, one for each entry in the batch
print(len(example_batch_predictions))
print(example_batch_predictions)
# + colab={"base_uri": "https://localhost:8080/"} id="6PYVsXz6Bu87" outputId="1790344a-04df-4516-ee22-62f736534cdc"
# lets examine one prediction
pred = example_batch_predictions[0]
print(len(pred))
print(pred)
# notice this is a 2d array of length 100, where each interior array is the prediction for the next character at each time step
# + colab={"base_uri": "https://localhost:8080/"} id="6okZMQcxBvBY" outputId="a9b87bd3-9497-465f-f9ce-bd7dd94fb002"
# and finally well look at a prediction at the first timestep
time_pred = pred[0]
print(len(time_pred))
print(time_pred)
# and of course its 65 values representing the probabillity of each character occuring next
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="JAu0ntRvBvEh" outputId="7f32fda9-82b4-41a8-8a8a-a1f664cd43b1"
# If we want to determine the predicted character we need to sample the output distribution (pick a value based on probabillity)
sampled_indices = tf.random.categorical(pred, num_samples=1)
# now we can reshape that array and convert all the integers to numbers to see the actual characters
sampled_indices = np.reshape(sampled_indices, (1, -1))[0]
predicted_chars = int_to_text(sampled_indices)
predicted_chars # and this is what the model predicted for training sequence 1
# + id="v0UP3XchBvHO"
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
# + id="VJ9cXuxGBvKE"
model.compile(optimizer='adam', loss=loss)
# + id="jakLcSKTBvM1"
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
# + colab={"base_uri": "https://localhost:8080/"} id="CY1AiPx9BvPs" outputId="c4f35577-2a20-4f32-8667-8c2ef6fdeb2f"
history = model.fit(data, epochs=50, callbacks=[checkpoint_callback])
# + id="EdaE93LHB6ax"
model = build_model(VOCAB_SIZE, EMBEDDING_DIM, RNN_UNITS, batch_size=1)
# + id="hAAA91yyB6gS"
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
# + id="kcadExCSB6ll"
checkpoint_num = 10
model.load_weights(tf.train.load_checkpoint("./training_checkpoints/ckpt_" + str(checkpoint_num)))
model.build(tf.TensorShape([1, None]))
# + id="ag4rpt_1B6q0"
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 800
# Converting our start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the character returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# We pass the predicted character as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# + colab={"base_uri": "https://localhost:8080/"} id="nQSU1fVuB6wD" outputId="d8e9bf8f-a975-48d6-f82c-4ffb449bd1d2"
inp = input("Type a starting string: ")
print(generate_text(model, inp))
|
Natural_Language_Processing_with_RNNs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="88e8ec6d-76e5-5f12-d9a6-2b40017a2b99"
# Here we use Python to visualize how certain machine learning algorithms classify certain data points in the Iris dataset. Let's begin by importing the Iris dataset and splitting it into features and labels. We will use only the petal length and width for this analysis.
#
# These visualizations and their code can be found in <NAME>'s book, Python Machine Learning.
# + _cell_guid="6443d6db-6440-5d93-d37c-4fde4c428ca5"
# Import data and modules
import pandas as pd
import numpy as np
from sklearn import datasets
# %pylab inline
pylab.rcParams['figure.figsize'] = (10, 6)
iris = datasets.load_iris()
# We'll use the petal length and width only for this analysis
X = iris.data[:, [2, 3]]
y = iris.target
# Place the iris data into a pandas dataframe
iris_df = pd.DataFrame(iris.data[:, [2, 3]], columns=iris.feature_names[2:])
# View the first 5 rows of the data
print(iris_df.head())
# Print the unique labels of the dataset
print('\n' + 'The unique labels in this data are ' + str(np.unique(y)))
# + [markdown] _cell_guid="eeaf0cb8-4c35-7422-1e08-345a25ed309a"
# Next, we'll split the data into training and test datasets.
# -----------------------------------------------------------
# + _cell_guid="8393598e-1344-0e22-43fc-fdda6c75c08a"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
print('There are {} samples in the training set and {} samples in the test set'.format(
X_train.shape[0], X_test.shape[0]))
print()
# + [markdown] _cell_guid="34a39b41-96cc-b560-c966-ddab1e67f65c"
# For many machine learning algorithms, it is important to scale the data. Let's do that now using sklearn.
# + _cell_guid="5867b347-4875-1e21-4313-633873a56915"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
print('After standardizing our features, the first 5 rows of our data now look like this:\n')
print(pd.DataFrame(X_train_std, columns=iris_df.columns).head())
# + [markdown] _cell_guid="dc68363a-7def-d618-e190-0ddd26324bf2"
# If we plot the original data, we can see that one of the classes is linearly separable, but the other two are not.
# + _cell_guid="6f3d2990-5c6b-8ec3-4a38-63fe8ca3d241"
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
markers = ('s', 'x', 'o')
colors = ('red', 'blue', 'lightgreen')
cmap = ListedColormap(colors[:len(np.unique(y_test))])
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
c=cmap(idx), marker=markers[idx], label=cl)
# + [markdown] _cell_guid="09b4dc98-b068-b70c-bf35-77e82b2adc8b"
# Let's try to use a Linear SVC to predict the the labels of our test data.
# + _cell_guid="7e8e8442-ae96-b764-8ef6-67add2fa2ede"
from sklearn.svm import SVC
svm = SVC(kernel='rbf', random_state=0, gamma=.10, C=1.0)
svm.fit(X_train_std, y_train)
print('The accuracy of the svm classifier on training data is {:.2f} out of 1'.format(svm.score(X_train_std, y_train)))
print('The accuracy of the svm classifier on test data is {:.2f} out of 1'.format(svm.score(X_test_std, y_test)))
# + [markdown] _cell_guid="b76090ca-e371-6ac7-8b32-d5b404328849"
# It looks like our classifier performs pretty well. Let's visualize how the model classified the samples in our test data.
#
# + _cell_guid="1b61e350-768e-a4e7-18df-6a31d617a974"
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# + _cell_guid="78cfec64-2e2a-79d4-017d-e30725060f0b"
plot_decision_regions(X_test_std, y_test, svm)
# + [markdown] _cell_guid="090509b2-66bd-0916-571c-4224d55ffa41"
# Now, let's test out a KNN classifier.
# + _cell_guid="abd39cb6-86d8-32b6-6108-5cf69f5a4470"
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
print('The accuracy of the knn classifier is {:.2f} out of 1 on training data'.format(knn.score(X_train_std, y_train)))
print('The accuracy of the knn classifier is {:.2f} out of 1 on test data'.format(knn.score(X_test_std, y_test)))
# + _cell_guid="0e91c717-6928-e355-e15a-20b4e62ccec7"
plot_decision_regions(X_test_std, y_test, knn)
# + [markdown] _cell_guid="286d43b6-5fe4-da78-41e6-03e4e390d435"
# And just for fun, we'll plot an XGBoost classifier.
# + _cell_guid="66ef8e17-46e6-5c33-131e-0444cf3acb56"
import xgboost as xgb
xgb_clf = xgb.XGBClassifier()
xgb_clf = xgb_clf.fit(X_train_std, y_train)
print('The accuracy of the xgb classifier is {:.2f} out of 1 on training data'.format(xgb_clf.score(X_train_std, y_train)))
print('The accuracy of the xgb classifier is {:.2f} out of 1 on test data'.format(xgb_clf.score(X_test_std, y_test)))
# + _cell_guid="40bf092b-3043-025f-b478-d14b760b58b8"
plot_decision_regions(X_test_std, y_test, xgb_clf)
# + [markdown] _cell_guid="05bffe84-c21b-11b1-0a02-30100d339404"
# In all classifiers, the performance on the test data was better than the training data. At least with the parameters specified in this very simple approach, the KNN algorithm seems to have performed the best. However, this may not be the case depending on the dataset and more careful parameter tuning.
# -
|
Iris Dataset/Visualizing KNN, SVM, and XGBoost on Iris Dataset/Visualizing KNN, SVM, and XGBoost on Iris Dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function
# %matplotlib inline
import numpy as np
from scipy import stats
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
import seaborn as sns
from patsy import dmatrices
import os
sns.set_style('whitegrid')
# Step-2. Import data
os.chdir('/Users/pauline/Documents/Python')
df = pd.read_csv("Tab-Morph.csv")
df = df.dropna()
nsample = 25
#x = np.linspace(0, 25, nsample)
x = df.sedim_thick
X = np.column_stack((x, (x - 5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, -0.01]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6//10:] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:,[0,1]]
# Step-3.
mod_wls = sm.WLS(y, X, weights=1./(w ** 2))
res_wls = mod_wls.fit()
print(res_wls.summary())
# Step-4.
res_ols = sm.OLS(y, X).fit()
print(res_ols.params)
print(res_wls.params)
# Step-5.
se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se],
[res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])
se = np.round(se,4)
colnames = ['x1', 'const']
rownames = ['WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3']
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print(tabl)
# Step-6.
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb,X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
# Step-7.
prstd_ols, iv_l_ols, iv_u_ols = wls_prediction_std(res_ols)
# Step-8.
prstd, iv_l, iv_u = wls_prediction_std(res_wls)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(x, y, 'o', label="Bathymetric \nObservations", linewidth=.7, c='#0095d9')
ax.plot(x, y_true, '-', c='#1e50a2', label="True", linewidth=.9)
# OLS
ax.plot(x, res_ols.fittedvalues, 'r--', linewidth=.7)
ax.plot(x, iv_u_ols, 'r--', label="Ordinary Least Squares", linewidth=.7)
ax.plot(x, iv_l_ols, 'r--', linewidth=.7)
# WLS
ax.plot(x, res_wls.fittedvalues, '--.', c='#65318e', linewidth=.7, )
ax.plot(x, iv_u, '--', c='#65318e', label="Weighted Least Squares", linewidth=.7)
ax.plot(x, iv_l, '--', c='#65318e', linewidth=.7)
ax.legend(loc="best");
ax.set_xlabel('Sediment thickness, m', fontsize=10)
plt.title("Weighted Least Squares \nof sediment thickness at Mariana Trench by 25 bathymetric profiles", fontsize=14)
plt.annotate('D', xy=(-0.01, 1.06), xycoords="axes fraction", fontsize=18,
bbox=dict(boxstyle='round, pad=0.3', fc='w', edgecolor='grey', linewidth=1, alpha=0.9))
plt.show()
|
Script-030d-SM-WLS-sedim_thickness.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Operators
# Operators are special symbols in Python that carry out arithmetic or logical computation. The value that the operator operates on is called the operand.
# # Operator Types
# 1. Arithmetic operators
#
# 2. Comparison (Relational) operators
#
# 3. Logical (Boolean) operators
#
# 4. Bitwise operators
#
# 5. Assignment operators
#
# 6. Special operators
# # Arithmetic Operators
# Arithmetic operators are used to perform mathematical operations like addition, subtraction, multiplication etc.
# + , -, *, /, %, //, ** are arithmetic operators
# Example:
# +
x, y = 10, 20
#addition
print(x + y)
#subtraction(-)
#multiplication(*)
#division(/)
#modulo division (%)
#Floor Division (//)
#Exponent (**)
# -
# # Comparision Operators
# Comparison operators are used to compare values. It either returns True or False according to the condition.
# >, <, ==, !=, >=, <= are comparision operators
# +
a, b = 10, 20
print(a < b) #check a is less than b
#check a is greater than b
#check a is equal to b
#check a is not equal to b (!=)
#check a greater than or equal to b
#check a less than or equal to b
# -
# # Logical Operators
# Logical operators are **and, or, not** operators.
# +
a, b = True, False
#print a and b
print(a and b)
#print a or b
#print not b
# -
# # Bitwise operators
# Bitwise operators act on operands as if they were string of binary digits. It operates bit by bit
# &, |, ~, ^, >>, << are Bitwise operators
# +
a, b = 10, 4
#Bitwise AND
print(a & b)
#Bitwise OR
#Bitwise NOT
#Bitwise XOR
#Bitwise rightshift
#Bitwise Leftshift
# -
# # Assignment operators
# Assignment operators are used in Python to assign values to variables.
#
# a = 5 is a simple assignment operator that assigns the value 5 on the right to the variable a on the left.
# =, +=, -=, *=, /=, %=, //=, **=, &=, |=, ^=, >>=, <<= are Assignment operators
# +
a = 10
a += 10 #add AND
print(a)
#subtract AND (-=)
#Multiply AND (*=)
#Divide AND (/=)
#Modulus AND (%=)
#Floor Division (//=)
#Exponent AND (**=)
# -
# # Special Operators
# # Identity Operators
# **is and is not** are the identity operators in Python.
#
# They are used to check if two values (or variables) are located on the same part of the memory.
# +
a = 5
b = 5
print(a is b) #5 is object created once both a and b points to same object
#check is not
# -
l1 = [1, 2, 3]
l2 = [1, 2, 3]
print(l1 is l2)
s1 = "Satish"
s2 = "Satish"
print(s1 is not s2)
# # MemberShip Operators
# **in and not in** are the membership operators in Python.
#
# They are used to test whether a value or variable is found in a sequence (string, list, tuple, set and dictionary).
# +
lst = [1, 2, 3, 4]
print(1 in lst) #check 1 is present in a given list or not
#check 5 is present in a given list
# -
d = {1: "a", 2: "b"}
print(1 in d)
|
Python Programs for YouTube/1_Introduction/.ipynb_checkpoints/5_operators-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <!-- <figure>
# <img src="https://raw.githubusercontent.com/SalAlba/machine-learning/master/data/images/work-in-progress.png" width="300" height="300"/>
# </figure> -->
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import pandas as pd
import os
import time
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set()
# Ignore useless warnings (see SciPy issue #5998)
# import warnings
# warnings.filterwarnings(action="ignore", message="^internal gelsd")
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import recall_score, precision_score, f1_score
import imblearn
# -
# Paths ...
PROJECT_ROOT_DIR = os.getcwd()
PATH_TO_DATA = '../../data' # TODO make cross
# # Imbalanced datasets
#
#
# ## Table of content
# 1. [What is imbalanced dataset](#imbalanced-dataset)
# 1. [How Know if data not balanced](#)
# 2. [Side effects](#)
# 3. [Can we learn from imbalanced data](#)
# 4. [What to do ?](#)
# 2. [Oversampling](#oversampling)
# 1. [solutions](#)
# 2. [limitations](#)
# 3. [Undersampling](#undersampling)
# 1. [solutions](#)
# 2. [limitations](#)
# 4. [Combine Oversampling and Undersampling](#)
# 5. [In practice](#in-practice)
# 6. [Summary](#summary)
# 7. [What Next ?](#what-next)
# 8. [Resources](#resources)
# ## Imbalanced dataset
# <a id="imbalanced-dataset"></a>
#
# **Imbalanced dataset** when number of samples which belong to one class dominate the whole dataset.
#
# > " A dataset is imbalanced if at least one of the classes constitutes only a very small minority. " [[3.3.]](#resources)
#
# <!--  -->
# <figure>
# <figcaption>img. src [4.1.]</figcaption>
# <img src="./images/imbalanced-data.png" width="400" height="400" />
# </figure>
#
#
# 1. Does imbalanced dataset appear just in binary dataset? No.
# 2. Does imbalanced dataset appear in multi label dataset? Yes.
#
# > Imbalanced data prevail in banking, insurance, engineering, and many other fields. It is common in fraud detection that the imbalance is on the order of 100 to 1.
#
# ### How Know if data not balanced
# 1. count number of samples in every class.
# 2. make bar plot where the length encode number of samples per class.
# 3. make scatter plot of data, select two features from dataset color of every sample represent to which class belong.
# 4. ...
#
# ### Side effects
# Imbalanced dataset can cause bad side effects on ML algo. like neural networks, SVM etc.
#
# 1. ML will ignore the minority class entirely.
# 2. When we split data into train/test could be a big chance to ignore the minority class samples in training data.
# 3. ...
#
#
# The learning phase and the subsequent prediction of machine learning algorithms can be affected by the problem of imbalanced data set. The balancing issue corresponds to the difference of the number of samples in the different classes. We illustrate the effect of training a linear SVM classifier with different level of class balancing.[[3.2.]](#resources)
#
# <figure>
# <figcaption>img. src [4.2.]</figcaption>
# <img src="./images/sphx_glr_plot_comparison_over_sampling_0011.png"/>
# </figure>
#
#
#
#
# ### Can we learn from imbalanced data
# > TODO ...
#
# ### What to do ?
# 1. Undersampling.
# 2. Oversampling.
# 3. Combination of over-under sampling.
# 4. ...
#
# **Oversampling and undersampling** a set of techniques used to adjust the ratio of samples in dataset. [[3.1.]](#resources)
#
# > [Imbalanced learn](https://imbalanced-learn.readthedocs.io/en/stable/index.html)
#
# > TODO ...
#
# <figure>
# <figcaption>img. src [4.3.]</figcaption>
# <img src="./images/oversampling-undersampling.png"/>
# </figure>
#
# +
# Make imbalanced dataset
X, y = make_classification(
n_samples=10000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
flip_y=0,
random_state=1
)
idx0 = np.where(y == 0)[0]
idx1 = np.where(y == 1)[0]
# Make imbalanced data frame
df = pd.DataFrame({
'f1': X[:,0],
'f2': X[:,1],
'y': y,
})
df.head(5)
# -
df.y.value_counts()
df.groupby('y').count()
# +
plt.figure(figsize=(8, 4), dpi=150)
plt.bar([0, 1], df.y.value_counts())
plt.title('Imbalanced data y=Counter({0:99%, 1:1%})')
plt.xticks([0, 1], ('0', '1'))
plt.show();
# +
# scetter plot ...
plt.figure(figsize=(8, 4), dpi=150)
plt.plot(X[idx0, 0], X[idx0, 1], '.', label='0')
plt.plot(X[idx1, 0], X[idx1, 1], '+', label='1')
plt.title('Imbalanced data with skew (99:1)')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.grid()
plt.show();
# -
# ## Oversampling
#
# Generate new samples in the classes which are under-represented (Majority), the most naive strategy is to generate new samples randomly.
#
# 1. Duplicating examples in the minority class.
# 2. Naive random over-sampling.
# 3. Synthetic Minority Oversampling Technique (SMOTE)
# 4. Adaptive Synthetic (ADASYN)
# 5. ...
#
# > ADASYN focuses on generating samples next to the original samples which are wrongly classified using a k-Nearest Neighbors classifier [[3.4.]](#resources)
#
# > SMOTE first selects a minority class instance a at random and finds its k nearest minority class neighbors. The synthetic instance is then created by choosing one of the k nearest neighbors b at random and connecting a and b to form a line segment in the feature space. The synthetic instances are generated as a convex combination of the two chosen instances a and b. [[3.4.]](#resources) [[3.5]](#resources)
# ## Undersampling
#
# Remove samples in the classes which are over-represented (Majority), the most naive strategy is randomly removing.
#
# 1. Near Miss Undersampling.
# 2. Condensed Nearest Neighbor Rule Undersampling.
# 3. ...
#
#
# > NearMiss-1 selects the positive samples for which the average distance to the N closest samples of the negative class is the smallest. [[3.4.]](#resources)
#
# <figure>
# <!-- <figcaption>img. src [4.3.]</figcaption> -->
# <img src="./images/NearMiss-1.png"/>
# </figure>
#
#
#
# > NearMiss-2 selects the positive samples for which the average distance to the N farthest samples of the negative class is the smallest. [[3.4.]](#resources)
#
# <figure>
# <!-- <figcaption>img. src [4.3.]</figcaption> -->
# <img src="./images/NearMiss-2.png"/>
# </figure>
#
# > NearMiss-3 is a 2-steps algorithm. First, for each negative sample, their M nearest-neighbors will be kept. Then, the positive samples selected are the one for which the average distance to the N nearest-neighbors is the largest. [[3.4.]](#resources)
#
#
# <figure>
# <!-- <figcaption>img. src [4.3.]</figcaption> -->
# <img src="./images/NearMiss-3.png"/>
# </figure>
# ## Combine Oversampling and Undersampling
#
# > TODO ...
# ## In practice
# <a id="in-practice"></a>
# +
def split_data(X_, y_):
return train_test_split(X_, y_, test_size=0.33, random_state=42)
def get_index(ytrain, ytest):
idx_train_imb0 = np.where(ytrain == 0)[0]
idx_train_imb1 = np.where(ytrain == 1)[0]
idx_test_imb0 = np.where(ytest == 0)[0]
idx_test_imb1 = np.where(ytest == 1)[0]
print('Train set = {}, class_0 = {}, class_1 = {}'.format(
len(ytrain),
len(ytrain[idx_train_imb0]),
len(ytrain[idx_train_imb1])
))
print('Test set = {}, class_0 = {}, class_1 = {}'.format(
len(ytest),
len(ytest[idx_test_imb0]),
len(ytest[idx_test_imb1])
))
return idx_train_imb0, idx_train_imb1, idx_test_imb0, idx_test_imb1
def plot_dataset(xtrain, xtest, ytrain, ytest, title='Dataset'):
# ...
idx_train_imb0, idx_train_imb1, idx_test_imb0, idx_test_imb1 = get_index(ytrain, ytest)
# scetter plot ...
plt.figure(figsize=(8, 4), dpi=150)
plt.plot(xtrain[idx_train_imb0, 0], xtrain[idx_train_imb0, 1], '.', label='0')
plt.plot(xtrain[idx_train_imb1, 0], xtrain[idx_train_imb1, 1], '+', label='1')
plt.plot(xtest[idx_test_imb0, 0], xtest[idx_test_imb0, 1], 'o', label='00')
plt.plot(xtest[idx_test_imb1, 0], xtest[idx_test_imb1, 1], '*', label='11')
plt.title(title)
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.grid()
plt.show();
def fit_pipes(data, clf):
# ...
rfc_pipe, lr_pipe, knc_pipe = clf
xtrain, xtest, ytrain, ytest = data
# Fit ...
rfc_pipe.fit(x_train_imb, y_train_imb)
lr_pipe.fit(x_train_imb, y_train_imb)
knc_pipe.fit(x_train_imb, y_train_imb)
acc = {
'classifier': [],
'train_acc': [],
'test_acc': [],
'precision_train': [],
'recall_train': [],
'precision_test': [],
'recall_test': [],
'f1_train': [],
'f1_test': [],
}
for name, clf in zip(
['RandomForestClassifier', 'LogisticRegression', 'KNeighborsClassifier'],
[rfc_pipe, lr_pipe, knc_pipe]):
acc['classifier'].append(name)
acc['train_acc'].append(round(clf.score(xtrain, ytrain), 3))
acc['test_acc'].append(round(clf.score(xtest, ytest), 3))
acc['precision_train'].append(round(precision_score(ytrain, clf.predict(xtrain)), 3))
acc['recall_train'].append(round(recall_score(ytrain, clf.predict(xtrain)), 3))
acc['f1_train'].append(round(f1_score(ytrain, clf.predict(xtrain)), 3))
acc['precision_test'].append(round(precision_score(ytest, clf.predict(xtest)), 3))
acc['recall_test'].append(round(recall_score(ytest, clf.predict(xtest)) ,3))
acc['f1_test'].append(round(f1_score(ytest, clf.predict(xtest)) ,3))
return pd.DataFrame(acc)
# -
# ### Imbalanced dataset
df.describe()
# +
x_train_imb, x_test_imb, y_train_imb, y_test_imb = split_data(X, y)
plot_dataset(x_train_imb, x_test_imb, y_train_imb, y_test_imb, 'Imbalanced data with skew (99:1)')
# +
processing_pipline = Pipeline([
('standardScaler', StandardScaler()),
])
processing_pipline.fit(x_train_imb, x_test_imb)
# +
# Models ...
RandomForestClassifier_pipe = Pipeline([
('standardScaler', StandardScaler()),
('RandomForestClassifier', RandomForestClassifier())
])
LogisticRegression_pipe = Pipeline([
('standardScaler', StandardScaler()),
('LogisticRegression', LogisticRegression())
])
KNeighborsClassifier_pipe = Pipeline([
('standardScaler', StandardScaler()),
('KNeighborsClassifier', KNeighborsClassifier())
])
# -
fit_pipes(
(x_train_imb, x_test_imb, y_train_imb, y_test_imb),
(RandomForestClassifier_pipe, LogisticRegression_pipe, KNeighborsClassifier_pipe)
)
# +
# V1. ...
# y_train_imb_pred = RandomForestClassifier_pipe.predict(x_train_imb)
# print('Confusion matrix for train data')
# confusion_matrix(y_train_imb, y_train_imb_pred)
# V2. ...
plot_confusion_matrix(RandomForestClassifier_pipe, x_train_imb, y_train_imb, cmap="BuGn", normalize='true')
plt.title('Confusion matrix for train data', fontsize='20')
plt.show();
# +
# V1. ...
# y_test_imb_pred = RandomForestClassifier_pipe.predict(x_test_imb)
# print('Confusion matrix for test data')
# confusion_matrix(y_test_imb, y_test_imb_pred)
# V2. ...
plot_confusion_matrix(RandomForestClassifier_pipe, x_test_imb, y_test_imb, cmap="BuGn", normalize='true')
plt.title('Confusion matrix for test data', fontsize='20')
plt.show();
# +
# Plotting decision regions
f, axarr = plt.subplots(3, 2, sharex='col', sharey='row', figsize=(10, 8), dpi=100)
for i, clf, clf_title in zip(
[0,1,2],
[RandomForestClassifier_pipe, LogisticRegression_pipe, KNeighborsClassifier_pipe],
['RandomForestClassifier', 'LogisticRegression', 'KNeighborsClassifier'],
):
axarr[i, 0].set(ylabel=clf_title)
for j, X_, title in zip(
[0, 1],
[x_train_imb, x_test_imb],
['Train set', 'Test set']):
# ...
x_min, x_max = X_[:, 0].min() - 1, X_[:, 0].max() + 1
y_min, y_max = X_[:, 1].min() - 1, X_[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
# ...
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[i, j].contourf(xx, yy, Z, alpha=0.4)
axarr[i, j].scatter(x_train_imb[:, 0], x_train_imb[:, 1], c=y_train_imb, s=20, edgecolor='k')
axarr[i, j].set_title(title)
plt.show();
# -
# ### Balanced dataset (Near Miss Undersampling)
under_nearmiss_1 = imblearn.under_sampling.NearMiss(version=1)
# under_nearmiss_2 = imblearn.under_sampling.NearMiss(version=2)
# under_nearmiss_3 = imblearn.under_sampling.NearMiss(version=3)
# +
X_under_nearmiss_1, y_under_nearmiss_1 = under_nearmiss_1.fit_resample(X.copy(), y.copy())
(x_train_und_nearmiss_1,
x_test_und_nearmiss_1,
y_train_und_nearmiss_1,
y_test_und_nearmiss_1) = split_data(X_under_nearmiss_1, y_under_nearmiss_1)
plot_dataset(
x_train_und_nearmiss_1,
x_test_und_nearmiss_1,
y_train_und_nearmiss_1,
y_test_und_nearmiss_1,
'Balanced data NearMiss V1')
# +
# Models ...
RandomForestClassifier_nearmiss_1_pipe = Pipeline([
('standardScaler', StandardScaler()),
('RandomForestClassifier', RandomForestClassifier())
])
LogisticRegression_nearmiss_1_pipe = Pipeline([
('standardScaler', StandardScaler()),
('LogisticRegression', LogisticRegression())
])
KNeighborsClassifier_nearmiss_1_pipe = Pipeline([
('standardScaler', StandardScaler()),
('KNeighborsClassifier', KNeighborsClassifier())
])
# -
fit_pipes(
(x_train_und_nearmiss_1,
x_test_und_nearmiss_1,
y_train_und_nearmiss_1,
y_test_und_nearmiss_1),
(RandomForestClassifier_nearmiss_1_pipe, LogisticRegression_nearmiss_1_pipe, KNeighborsClassifier_nearmiss_1_pipe)
)
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_train_und_nearmiss_1, y_train_und_nearmiss_1, cmap="BuGn", normalize='true')
plt.title('RandomForest Confusion matrix for train data NearMiss V1', fontsize='20')
plt.show();
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_test_und_nearmiss_1, y_test_und_nearmiss_1, cmap="BuGn", normalize='true')
plt.title('RandomForest Confusion matrix for test data NearMiss V1', fontsize='20')
plt.show();
# ### Balanced dataset (Near Miss Undersampling)
# +
from imblearn.over_sampling import SMOTE, ADASYN
smote = SMOTE()
X_over_smote, y_over_smote = smote.fit_resample(X.copy(), y.copy())
(x_train_over_smote,
x_test_over_smote,
y_train_over_smote,
y_test_over_smote) = split_data(X_over_smote, y_over_smote)
plot_dataset(
x_train_over_smote,
x_test_over_smote,
y_train_over_smote,
y_test_over_smote,
'Balanced data Over SMOTE')
# +
# Models ...
RandomForestClassifier_smote_pipe = Pipeline([
('standardScaler', StandardScaler()),
('RandomForestClassifier', RandomForestClassifier())
])
LogisticRegression_smote_pipe = Pipeline([
('standardScaler', StandardScaler()),
('LogisticRegression', LogisticRegression())
])
KNeighborsClassifier_smote_pipe = Pipeline([
('standardScaler', StandardScaler()),
('KNeighborsClassifier', KNeighborsClassifier())
])
# -
fit_pipes(
(x_train_over_smote,
x_test_over_smote,
y_train_over_smote,
y_test_over_smote),
(RandomForestClassifier_smote_pipe, LogisticRegression_smote_pipe, KNeighborsClassifier_smote_pipe)
)
plot_confusion_matrix(RandomForestClassifier_smote_pipe, x_train_over_smote, y_train_over_smote, cmap="BuGn",normalize='true')
plt.title('RandomForest Confusion matrix for train data SOMTE', fontsize='20')
plt.show();
plot_confusion_matrix(RandomForestClassifier_smote_pipe, x_test_over_smote, y_test_over_smote, cmap="BuGn", normalize='true')
plt.title('RandomForest Confusion matrix for test data SOMTE', fontsize='20')
plt.show();
# +
import matplotlib.gridspec as gridspec
# x_train_imb, x_test_imb, y_train_imb, y_test_imb
fig = plt.figure(figsize=(10,8), constrained_layout=True, dpi=100)
spec = gridspec.GridSpec(nrows=3, ncols=2, figure=fig)
# Imbalanced ...
# ...
ax1 = fig.add_subplot(spec[0, 0])
ax1.set_title('Train Imbalanced')
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_train_imb, y_train_imb,
cmap="BuGn",
normalize='true',
ax=ax1)
# ...
ax2 = fig.add_subplot(spec[0, 1])
ax2.set_title('Test Imbalanced')
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_test_imb, y_test_imb,
cmap="BuGn",
normalize='true',
ax=ax2)
# NearMiss V1 ...
# ...
ax3 = fig.add_subplot(spec[1, 0])
ax3.set_title('Train NearMiss V1')
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_train_und_nearmiss_1, y_train_und_nearmiss_1,
cmap="BuGn",
normalize='true',
ax=ax3)
# ...
ax4 = fig.add_subplot(spec[1, 1])
ax4.set_title('Test NearMiss V1')
plot_confusion_matrix(RandomForestClassifier_nearmiss_1_pipe, x_test_imb, y_test_imb,
cmap="BuGn",
normalize='true',
ax=ax4)
# SMOTE ...
# ...
ax5 = fig.add_subplot(spec[2, 0])
ax5.set_title('Train SMOTE')
plot_confusion_matrix(RandomForestClassifier_smote_pipe, x_train_over_smote, y_train_over_smote,
cmap="BuGn",
normalize='true',
ax=ax5)
# ...
ax6 = fig.add_subplot(spec[2, 1])
ax6.set_title('Test SMOTE')
plot_confusion_matrix(RandomForestClassifier_smote_pipe, x_test_imb, y_test_imb,
cmap="BuGn",
normalize='true',
ax=ax6)
# ...
plt.show();
# -
fit_pipes(
(x_train_imb, x_test_imb, y_train_imb, y_test_imb),
(RandomForestClassifier_pipe, LogisticRegression_pipe, KNeighborsClassifier_pipe)
)
fit_pipes(
(x_train_und_nearmiss_1,
x_test_imb,
y_train_und_nearmiss_1,
y_test_imb),
(RandomForestClassifier_nearmiss_1_pipe, LogisticRegression_nearmiss_1_pipe, KNeighborsClassifier_nearmiss_1_pipe)
)
fit_pipes(
(x_train_over_smote,
x_test_imb,
y_train_over_smote,
y_test_imb),
(RandomForestClassifier_smote_pipe, LogisticRegression_smote_pipe, KNeighborsClassifier_smote_pipe)
)
y_test_imb_pred = RandomForestClassifier_pipe.predict(x_test_imb)
pd.crosstab(y_test_imb, y_test_imb_pred, normalize="index")
y_test_nearmiss_1_pred = RandomForestClassifier_nearmiss_1_pipe.predict(x_test_imb)
pd.crosstab(y_test_imb, y_test_nearmiss_1_pred, normalize="index")
y_test_smote_pred = RandomForestClassifier_smote_pipe.predict(x_test_imb)
pd.crosstab(y_test_imb, y_test_smote_pred, normalize="index")
processing_pipline.transform() ## TODO .....
# ## Summary
# <a id="summary"></a>
#
#
# 1. The balancing issue corresponds to the difference of the number of samples in the different classes.
# 2. Over/Under
# 3. ...
# ## What Next ?
# <a id="what-next"></a>
#
# 1. Library
# > [Imbalanced learn](https://imbalanced-learn.readthedocs.io/en/stable/index.html)
# 2. Book
# > [Foundations, Algorithms, and Applications, 2013.](https://www.amazon.com/dp/1118074629/)
# ## Resources
# <a id="resources"></a>
#
# ### Books
# 1. [[1.1.] Page 47, Imbalanced Learning: Foundations, Algorithms, and Applications, 2013.](https://www.amazon.com/dp/1118074629/)
#
# ### Papers
#
# ### Web
# 1. [[3.1.] Wiki, Oversampling and undersampling in data analysis](https://en.wikipedia.org/wiki/Oversampling_and_undersampling_in_data_analysis)
# 2. [[3.2.] Introduction](https://imbalanced-learn.readthedocs.io/en/stable/introduction.html)
# 3. [[3.3.] Using Under-Sampling Techniques for Extremely Imbalanced Data](https://towardsdatascience.com/sampling-techniques-for-extremely-imbalanced-data-part-i-under-sampling-a8dbc3d8d6d8)
# 4. [[3.4.] Controlled under-sampling techniques > Mathematical formulation](https://imbalanced-learn.readthedocs.io/en/stable/under_sampling.html#id2)
# 5 [[3.5] smote-oversampling-for-imbalanced-classification](https://machinelearningmastery.com/smote-oversampling-for-imbalanced-classification/)
#
# ### Images
# 1. [[4.1.] dealing with imbalanced data](https://medium.com/@SeoJaeDuk/archived-post-dealing-with-imbalanced-data-577f024c8681)
# 2. [[4.2.] Problem statement regarding imbalanced data sets](https://imbalanced-learn.readthedocs.io/en/stable/introduction.html)
# # TODO
#
# https://www.google.com/search?q=under-sampling&oq=under-sam&aqs=chrome.1.69i57j0l6j69i60.4745j0j7&sourceid=chrome&ie=UTF-8
#
#
# https://www.google.com/search?q=oversampling+undersampling&hl=pl&sxsrf=ALeKk031-jdzqaKJnxHf2CmIMMtd73RRzQ:1594037502718&source=lnms&sa=X&ved=0ahUKEwi1re64zLjqAhVDxosKHRtpD1UQ_AUICSgA&biw=1536&bih=722&dpr=1.25
#
#
# https://towardsdatascience.com/sampling-techniques-for-extremely-imbalanced-data-part-i-under-sampling-a8dbc3d8d6d8
#
# https://machinelearningmastery.com/combine-oversampling-and-undersampling-for-imbalanced-classification/
#
#
#
# https://machinelearningmastery.com/random-oversampling-and-undersampling-for-imbalanced-classification/
#
# https://medium.com/quantyca/oversampling-and-undersampling-adasyn-vs-enn-60828a58db39
|
notes/imbalanced-data-sets/main.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''deeplearning'': conda)'
# language: python
# name: python37564bitdeeplearningconda2f5dcc693383402099797ed40bd3951d
# ---
import sys
sys.path.append('../')
import warnings
from torch.utils.data import DataLoader, Dataset
from src.pl_module import MelanomaModel
import pandas as pd
import torch
import torch.nn as nn
from typing import Tuple
import albumentations as A
from tqdm.auto import tqdm
import skimage.io
import numpy as np
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
# +
def load_model(model_name: str, model_type: str, weights: str):
print('Loading {}'.format(model_name))
model = MelanomaModel.net_mapping(model_name, model_type)
model.load_state_dict(
torch.load(weights)
)
model.eval()
model.cuda()
print("Loaded model {} from checkpoint {}".format(model_name, weights))
return model
class MelanomaDataset(Dataset):
def __init__(self, image_folder, df, transform=None):
super().__init__()
self.image_folder = image_folder
self.df = df
self.transform = transform
def __len__(self) -> int:
return self.df.shape[0]
def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
row = self.df.iloc[index]
img_id = row.image_name
img_path = f"{self.image_folder}/{img_id}.jpg"
image = skimage.io.imread(img_path)
if self.transform is not None:
image = self.transform(image=image)['image']
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image)
return{'features': image, 'img_id': img_id}
def get_valid_transforms():
return A.Compose(
[
A.Normalize()
],
p=1.0)
# -
data = pd.read_csv('../data/test.csv')
data.head()
model_name_list = [
'resnest50d',
'resnest269e',
'resnest101e',
#'seresnext101_32x4d',
'tf_efficientnet_b3_ns',
'tf_efficientnet_b7_ns',
'tf_efficientnet_b5_ns']
model_type_list = ['SingleHeadMax'] * len(model_name_list)
weights_list = [
'../weights/train_384_balancedW_resnest50d_fold0_heavyaugs_averaged_best_weights.pth',
'../weights/07.09_train_384_balancedW_resnest269e_heavyaugs_averaged_best_weights.pth',
'../weights/03.09_train_384_balancedW_resnest101e_fold0_heavyaugs_averaged_best_weights.pth',
#'../weights/06.18_train_384_balancedW_seresnext101_32x4d_fold0_heavyaugs_averaged_best_weights.pth',
'../weights/06.10_train_384_balancedW_b3_fold0_heavyaugs_averaged_best_weights.pth',
'../weights/05.23_train_384_balancedW_b7_fold0_heavyaugs_averaged_best_weights.pth',
'../weights/03.18_train_384_balancedW_b5_fold0_heavyaugs_averaged_best_weights.pth'
]
models = [load_model(model_name, model_type, weights) for model_name, model_type, weights in
zip(model_name_list, model_type_list, weights_list)]
dataset = MelanomaDataset('../data/jpeg-melanoma-384x384/test/', data, get_valid_transforms())
dataloader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4)
mean_cls_1_list = []
for batch in tqdm(dataloader, total=len(dataloader)):
with torch.no_grad():
preds = [nn.Sigmoid()(model(batch['features'].cuda())) for model in models]
preds = torch.stack(preds)
mean_cls_1 = preds[..., 0].cpu().numpy().mean(axis=0)
mean_cls_1_list.extend(mean_cls_1)
data['target'] = mean_cls_1_list
data.to_csv('../data/labeled_test.csv', index=False)
|
notebooks/Pseudolabel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow
# language: python
# name: tensorflow
# ---
# +
from xinet import utils
from xinet import tensorflow as xinet
np = xinet.np
nn = xinet.nn
# -
x = np.arange(4, dtype=np.float32).reshape((-1, 1))
x
import tensorflow as tf
# +
with tf.GradientTape() as tape:
tape.watch(x)
y = 2 * np.dot(x.T, x)
y
# -
x_grad = tape.gradient(y, x)
x_grad
# 把所有计算记录在磁带上
with tf.GradientTape() as tape:
tape.watch(x)
y = tf.reduce_sum(x)
tape.gradient(y, x) # 被新计算的梯度覆盖
# +
x = np.arange(4, dtype=np.float32)
# 把所有计算记录在磁带上
with tf.GradientTape() as tape:
tape.watch(x)
y = x * x
tape.gradient(y, x) # 被新计算的梯度覆盖
# -
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Object Detection with RetinaNet
#
# **Author:** [<NAME>](https://twitter.com/srihari_rh)<br>
# **Date created:** 2020/05/17<br>
# **Last modified:** 2020/07/14<br>
# **Description:** Implementing RetinaNet: Focal Loss for Dense Object Detection.
# + [markdown] colab_type="text"
# ## Introduction
#
# Object detection a very important problem in computer
# vision. Here the model is tasked with localizing the objects present in an
# image, and at the same time, classifying them into different categories.
# Object detection models can be broadly classified into "single-stage" and
# "two-stage" detectors. Two-stage detectors are often more accurate but at the
# cost of being slower. Here in this example, we will implement RetinaNet,
# a popular single-stage detector, which is accurate and runs fast.
# RetinaNet uses a feature pyramid network to efficiently detect objects at
# multiple scales and introduces a new loss, the Focal loss function, to alleviate
# the problem of the extreme foreground-background class imbalance.
#
# **References:**
#
# - [RetinaNet Paper](https://arxiv.org/abs/1708.02002)
# - [Feature Pyramid Network Paper](https://arxiv.org/abs/1612.03144)
# + colab_type="code"
import os
import re
import zipfile
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
# + [markdown] colab_type="text"
# ## Downloading the COCO2017 dataset
#
# Training on the entire COCO2017 dataset which has around 118k images takes a
# lot of time, hence we will be using a smaller subset of ~500 images for
# training in this example.
# + colab_type="code"
url = "https://github.com/srihari-humbarwadi/datasets/releases/download/v0.1.0/data.zip"
filename = os.path.join(os.getcwd(), "data.zip")
keras.utils.get_file(filename, url)
with zipfile.ZipFile("data.zip", "r") as z_fp:
z_fp.extractall("./")
# + [markdown] colab_type="text"
# ## Implementing utility functions
#
# Bounding boxes can be represented in multiple ways, the most common formats are:
#
# - Storing the coordinates of the corners `[xmin, ymin, xmax, ymax]`
# - Storing the coordinates of the center and the box dimensions
# `[x, y, width, height]`
#
# Since we require both formats, we will be implementing functions for converting
# between the formats.
# + colab_type="code"
def swap_xy(boxes):
"""Swaps order the of x and y coordinates of the boxes.
Arguments:
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes.
Returns:
swapped boxes with shape same as that of boxes.
"""
return tf.stack([boxes[:, 1], boxes[:, 0], boxes[:, 3], boxes[:, 2]], axis=-1)
def convert_to_xywh(boxes):
"""Changes the box format to center, width and height.
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[xmin, ymin, xmax, ymax]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[(boxes[..., :2] + boxes[..., 2:]) / 2.0, boxes[..., 2:] - boxes[..., :2]],
axis=-1,
)
def convert_to_corners(boxes):
"""Changes the box format to corner coordinates
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[x, y, width, height]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[boxes[..., :2] - boxes[..., 2:] / 2.0, boxes[..., :2] + boxes[..., 2:] / 2.0],
axis=-1,
)
# + [markdown] colab_type="text"
# ## Computing pairwise Intersection Over Union (IOU)
#
# As we will see later in the example, we would be assigning ground truth boxes
# to anchor boxes based on the extent of overlapping. This will require us to
# calculate the Intersection Over Union (IOU) between all the anchor
# boxes and ground truth boxes pairs.
# + colab_type="code"
def compute_iou(boxes1, boxes2):
"""Computes pairwise IOU matrix for given two sets of boxes
Arguments:
boxes1: A tensor with shape `(N, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
boxes2: A tensor with shape `(M, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
Returns:
pairwise IOU matrix with shape `(N, M)`, where the value at ith row
jth column holds the IOU between ith box and jth box from
boxes1 and boxes2 respectively.
"""
boxes1_corners = convert_to_corners(boxes1)
boxes2_corners = convert_to_corners(boxes2)
lu = tf.maximum(boxes1_corners[:, None, :2], boxes2_corners[:, :2])
rd = tf.minimum(boxes1_corners[:, None, 2:], boxes2_corners[:, 2:])
intersection = tf.maximum(0.0, rd - lu)
intersection_area = intersection[:, :, 0] * intersection[:, :, 1]
boxes1_area = boxes1[:, 2] * boxes1[:, 3]
boxes2_area = boxes2[:, 2] * boxes2[:, 3]
union_area = tf.maximum(
boxes1_area[:, None] + boxes2_area - intersection_area, 1e-8
)
return tf.clip_by_value(intersection_area / union_area, 0.0, 1.0)
def visualize_detections(
image, boxes, classes, scores, figsize=(7, 7), linewidth=1, color=[0, 0, 1]
):
"""Visualize Detections"""
image = np.array(image, dtype=np.uint8)
plt.figure(figsize=figsize)
plt.axis("off")
plt.imshow(image)
ax = plt.gca()
for box, _cls, score in zip(boxes, classes, scores):
text = "{}: {:.2f}".format(_cls, score)
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
patch = plt.Rectangle(
[x1, y1], w, h, fill=False, edgecolor=color, linewidth=linewidth
)
ax.add_patch(patch)
ax.text(
x1,
y1,
text,
bbox={"facecolor": color, "alpha": 0.4},
clip_box=ax.clipbox,
clip_on=True,
)
plt.show()
return ax
# + [markdown] colab_type="text"
# ## Implementing Anchor generator
#
# Anchor boxes are fixed sized boxes that the model uses to predict the bounding
# box for an object. It does this by regressing the offset between the location
# of the object's center and the center of an anchor box, and then uses the width
# and height of the anchor box to predict a relative scale of the object. In the
# case of RetinaNet, each location on a given feature map has nine anchor boxes
# (at three scales and three ratios).
# + colab_type="code"
class AnchorBox:
"""Generates anchor boxes.
This class has operations to generate anchor boxes for feature maps at
strides `[8, 16, 32, 64, 128]`. Where each anchor each box is of the
format `[x, y, width, height]`.
Attributes:
aspect_ratios: A list of float values representing the aspect ratios of
the anchor boxes at each location on the feature map
scales: A list of float values representing the scale of the anchor boxes
at each location on the feature map.
num_anchors: The number of anchor boxes at each location on feature map
areas: A list of float values representing the areas of the anchor
boxes for each feature map in the feature pyramid.
strides: A list of float value representing the strides for each feature
map in the feature pyramid.
"""
def __init__(self):
self.aspect_ratios = [0.5, 1.0, 2.0]
self.scales = [2 ** x for x in [0, 1 / 3, 2 / 3]]
self._num_anchors = len(self.aspect_ratios) * len(self.scales)
self._strides = [2 ** i for i in range(3, 8)]
self._areas = [x ** 2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
self._anchor_dims = self._compute_dims()
def _compute_dims(self):
"""Computes anchor box dimensions for all ratios and scales at all levels
of the feature pyramid.
"""
anchor_dims_all = []
for area in self._areas:
anchor_dims = []
for ratio in self.aspect_ratios:
anchor_height = tf.math.sqrt(area / ratio)
anchor_width = area / anchor_height
dims = tf.reshape(
tf.stack([anchor_width, anchor_height], axis=-1), [1, 1, 2]
)
for scale in self.scales:
anchor_dims.append(scale * dims)
anchor_dims_all.append(tf.stack(anchor_dims, axis=-2))
return anchor_dims_all
def _get_anchors(self, feature_height, feature_width, level):
"""Generates anchor boxes for a given feature map size and level
Arguments:
feature_height: An integer representing the height of the feature map.
feature_width: An integer representing the width of the feature map.
level: An integer representing the level of the feature map in the
feature pyramid.
Returns:
anchor boxes with the shape
`(feature_height * feature_width * num_anchors, 4)`
"""
rx = tf.range(feature_width, dtype=tf.float32) + 0.5
ry = tf.range(feature_height, dtype=tf.float32) + 0.5
centers = tf.stack(tf.meshgrid(rx, ry), axis=-1) * self._strides[level - 3]
centers = tf.expand_dims(centers, axis=-2)
centers = tf.tile(centers, [1, 1, self._num_anchors, 1])
dims = tf.tile(
self._anchor_dims[level - 3], [feature_height, feature_width, 1, 1]
)
anchors = tf.concat([centers, dims], axis=-1)
return tf.reshape(
anchors, [feature_height * feature_width * self._num_anchors, 4]
)
def get_anchors(self, image_height, image_width):
"""Generates anchor boxes for all the feature maps of the feature pyramid.
Arguments:
image_height: Height of the input image.
image_width: Width of the input image.
Returns:
anchor boxes for all the feature maps, stacked as a single tensor
with shape `(total_anchors, 4)`
"""
anchors = [
self._get_anchors(
tf.math.ceil(image_height / 2 ** i),
tf.math.ceil(image_width / 2 ** i),
i,
)
for i in range(3, 8)
]
return tf.concat(anchors, axis=0)
# + [markdown] colab_type="text"
# ## Preprocessing data
#
# Preprocessing the images involves two steps:
#
# - Resizing the image: Images are resized such that the shortest size is equal
# to 800 px, after resizing if the longest side of the image exceeds 1333 px,
# the image is resized such that the longest size is now capped at 1333 px.
# - Applying augmentation: Random scale jittering and random horizontal flipping
# are the only augmentations applied to the images.
#
# Along with the images, bounding boxes are rescaled and flipped if required.
# + colab_type="code"
def random_flip_horizontal(image, boxes):
"""Flips image and boxes horizontally with 50% chance
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,
having normalized coordinates.
Returns:
Randomly flipped image and boxes
"""
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_left_right(image)
boxes = tf.stack(
[1 - boxes[:, 2], boxes[:, 1], 1 - boxes[:, 0], boxes[:, 3]], axis=-1
)
return image, boxes
def resize_and_pad_image(
image, min_side=800.0, max_side=1333.0, jitter=[640, 1024], stride=128.0
):
"""Resizes and pads image while preserving aspect ratio.
1. Resizes images so that the shorter side is equal to `min_side`
2. If the longer side is greater than `max_side`, then resize the image
with longer side equal to `max_side`
3. Pad with zeros on right and bottom to make the image shape divisible by
`stride`
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
min_side: The shorter side of the image is resized to this value, if
`jitter` is set to None.
max_side: If the longer side of the image exceeds this value after
resizing, the image is resized such that the longer side now equals to
this value.
jitter: A list of floats containing minimum and maximum size for scale
jittering. If available, the shorter side of the image will be
resized to a random value in this range.
stride: The stride of the smallest feature map in the feature pyramid.
Can be calculated using `image_size / feature_map_size`.
Returns:
image: Resized and padded image.
image_shape: Shape of the image before padding.
ratio: The scaling factor used to resize the image
"""
image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
if jitter is not None:
min_side = tf.random.uniform((), jitter[0], jitter[1], dtype=tf.float32)
ratio = min_side / tf.reduce_min(image_shape)
if ratio * tf.reduce_max(image_shape) > max_side:
ratio = max_side / tf.reduce_max(image_shape)
image_shape = ratio * image_shape
image = tf.image.resize(image, tf.cast(image_shape, dtype=tf.int32))
padded_image_shape = tf.cast(
tf.math.ceil(image_shape / stride) * stride, dtype=tf.int32
)
image = tf.image.pad_to_bounding_box(
image, 0, 0, padded_image_shape[0], padded_image_shape[1]
)
return image, image_shape, ratio
def preprocess_data(sample):
"""Applies preprocessing step to a single sample
Arguments:
sample: A dict representing a single training sample.
Returns:
image: Resized and padded image with random horizontal flipping applied.
bbox: Bounding boxes with the shape `(num_objects, 4)` where each box is
of the format `[x, y, width, height]`.
class_id: An tensor representing the class id of the objects, having
shape `(num_objects,)`.
"""
image = sample["image"]
bbox = swap_xy(sample["objects"]["bbox"])
class_id = tf.cast(sample["objects"]["label"], dtype=tf.int32)
image, bbox = random_flip_horizontal(image, bbox)
image, image_shape, _ = resize_and_pad_image(image)
bbox = tf.stack(
[
bbox[:, 0] * image_shape[1],
bbox[:, 1] * image_shape[0],
bbox[:, 2] * image_shape[1],
bbox[:, 3] * image_shape[0],
],
axis=-1,
)
bbox = convert_to_xywh(bbox)
return image, bbox, class_id
# + [markdown] colab_type="text"
# ## Encoding labels
#
# The raw labels, consisting of bounding boxes and class ids need to be
# transformed into targets for training. This transformation consists of
# the following steps:
#
# - Generating anchor boxes for the given image dimensions
# - Assigning ground truth boxes to the anchor boxes
# - The anchor boxes that are not assigned any objects, are either assigned the
# background class or ignored depending on the IOU
# - Generating the classification and regression targets using anchor boxes
# + colab_type="code"
class LabelEncoder:
"""Transforms the raw labels into targets for training.
This class has operations to generate targets for a batch of samples which
is made up of the input images, bounding boxes for the objects present and
their class ids.
Attributes:
anchor_box: Anchor box generator to encode the bounding boxes.
box_variance: The scaling factors used to scale the bounding box targets.
"""
def __init__(self):
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _match_anchor_boxes(
self, anchor_boxes, gt_boxes, match_iou=0.5, ignore_iou=0.4
):
"""Matches ground truth boxes to anchor boxes based on IOU.
1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
to get a `(M, N)` shaped matrix.
2. The ground truth box with the maximum IOU in each row is assigned to
the anchor box provided the IOU is greater than `match_iou`.
3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
box is assigned with the background class.
4. The remaining anchor boxes that do not have any class assigned are
ignored during training.
Arguments:
anchor_boxes: A float tensor with the shape `(total_anchors, 4)`
representing all the anchor boxes for a given input image shape,
where each anchor box is of the format `[x, y, width, height]`.
gt_boxes: A float tensor with shape `(num_objects, 4)` representing
the ground truth boxes, where each box is of the format
`[x, y, width, height]`.
match_iou: A float value representing the minimum IOU threshold for
determining if a ground truth box can be assigned to an anchor box.
ignore_iou: A float value representing the IOU threshold under which
an anchor box is assigned to the background class.
Returns:
matched_gt_idx: Index of the matched object
positive_mask: A mask for anchor boxes that have been assigned ground
truth boxes.
ignore_mask: A mask for anchor boxes that need to by ignored during
training
"""
iou_matrix = compute_iou(anchor_boxes, gt_boxes)
max_iou = tf.reduce_max(iou_matrix, axis=1)
matched_gt_idx = tf.argmax(iou_matrix, axis=1)
positive_mask = tf.greater_equal(max_iou, match_iou)
negative_mask = tf.less(max_iou, ignore_iou)
ignore_mask = tf.logical_not(tf.logical_or(positive_mask, negative_mask))
return (
matched_gt_idx,
tf.cast(positive_mask, dtype=tf.float32),
tf.cast(ignore_mask, dtype=tf.float32),
)
def _compute_box_target(self, anchor_boxes, matched_gt_boxes):
"""Transforms the ground truth boxes into targets for training"""
box_target = tf.concat(
[
(matched_gt_boxes[:, :2] - anchor_boxes[:, :2]) / anchor_boxes[:, 2:],
tf.math.log(matched_gt_boxes[:, 2:] / anchor_boxes[:, 2:]),
],
axis=-1,
)
box_target = box_target / self._box_variance
return box_target
def _encode_sample(self, image_shape, gt_boxes, cls_ids):
"""Creates box and classification targets for a single sample"""
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
cls_ids = tf.cast(cls_ids, dtype=tf.float32)
matched_gt_idx, positive_mask, ignore_mask = self._match_anchor_boxes(
anchor_boxes, gt_boxes
)
matched_gt_boxes = tf.gather(gt_boxes, matched_gt_idx)
box_target = self._compute_box_target(anchor_boxes, matched_gt_boxes)
matched_gt_cls_ids = tf.gather(cls_ids, matched_gt_idx)
cls_target = tf.where(
tf.not_equal(positive_mask, 1.0), -1.0, matched_gt_cls_ids
)
cls_target = tf.where(tf.equal(ignore_mask, 1.0), -2.0, cls_target)
cls_target = tf.expand_dims(cls_target, axis=-1)
label = tf.concat([box_target, cls_target], axis=-1)
return label
def encode_batch(self, batch_images, gt_boxes, cls_ids):
"""Creates box and classification targets for a batch"""
images_shape = tf.shape(batch_images)
batch_size = images_shape[0]
labels = tf.TensorArray(dtype=tf.float32, size=batch_size, dynamic_size=True)
for i in range(batch_size):
label = self._encode_sample(images_shape, gt_boxes[i], cls_ids[i])
labels = labels.write(i, label)
batch_images = tf.keras.applications.resnet.preprocess_input(batch_images)
return batch_images, labels.stack()
# + [markdown] colab_type="text"
# ## Building the ResNet50 backbone
#
# RetinaNet uses a ResNet based backbone, using which a feature pyramid network
# is constructed. In the example we use ResNet50 as the backbone, and return the
# feature maps at strides 8, 16 and 32.
# + colab_type="code"
def get_backbone():
"""Builds ResNet50 with pre-trained imagenet weights"""
backbone = keras.applications.ResNet50(
include_top=False, input_shape=[None, None, 3]
)
c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in ["conv3_block4_out", "conv4_block6_out", "conv5_block3_out"]
]
return keras.Model(
inputs=[backbone.inputs], outputs=[c3_output, c4_output, c5_output]
)
# + [markdown] colab_type="text"
# ## Building Feature Pyramid Network as a custom layer
# + colab_type="code"
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, backbone=None, **kwargs):
super(FeaturePyramid, self).__init__(name="FeaturePyramid", **kwargs)
self.backbone = backbone if backbone else get_backbone()
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, images, training=False):
c3_output, c4_output, c5_output = self.backbone(images, training=training)
p3_output = self.conv_c3_1x1(c3_output)
p4_output = self.conv_c4_1x1(c4_output)
p5_output = self.conv_c5_1x1(c5_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p4_output = self.conv_c4_3x3(p4_output)
p5_output = self.conv_c5_3x3(p5_output)
p6_output = self.conv_c6_3x3(c5_output)
p7_output = self.conv_c7_3x3(tf.nn.relu(p6_output))
return p3_output, p4_output, p5_output, p6_output, p7_output
# + [markdown] colab_type="text"
# ## Building the classification and box regression heads.
# The RetinaNet model has separate heads for bounding box regression and
# for predicting class probabilities for the objects. These heads are shared
# between all the feature maps of the feature pyramid.
# + colab_type="code"
def build_head(output_filters, bias_init):
"""Builds the class/box predictions head.
Arguments:
output_filters: Number of convolution filters in the final layer.
bias_init: Bias Initializer for the final convolution layer.
Returns:
A keras sequential model representing either the classification
or the box regression head depending on `output_filters`.
"""
head = keras.Sequential([keras.Input(shape=[None, None, 256])])
kernel_init = tf.initializers.RandomNormal(0.0, 0.01)
for _ in range(4):
head.add(
keras.layers.Conv2D(256, 3, padding="same", kernel_initializer=kernel_init)
)
head.add(keras.layers.ReLU())
head.add(
keras.layers.Conv2D(
output_filters,
3,
1,
padding="same",
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
)
return head
# + [markdown] colab_type="text"
# ## Building RetinaNet using a subclassed model
# + colab_type="code"
class RetinaNet(keras.Model):
"""A subclassed Keras model implementing the RetinaNet architecture.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, num_classes, backbone=None, **kwargs):
super(RetinaNet, self).__init__(name="RetinaNet", **kwargs)
self.fpn = FeaturePyramid(backbone)
self.num_classes = num_classes
prior_probability = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
self.cls_head = build_head(9 * num_classes, prior_probability)
self.box_head = build_head(9 * 4, "zeros")
def call(self, image, training=False):
features = self.fpn(image, training=training)
N = tf.shape(image)[0]
cls_outputs = []
box_outputs = []
for feature in features:
box_outputs.append(tf.reshape(self.box_head(feature), [N, -1, 4]))
cls_outputs.append(
tf.reshape(self.cls_head(feature), [N, -1, self.num_classes])
)
cls_outputs = tf.concat(cls_outputs, axis=1)
box_outputs = tf.concat(box_outputs, axis=1)
return tf.concat([box_outputs, cls_outputs], axis=-1)
# + [markdown] colab_type="text"
# ## Implementing a custom layer to decode predictions
# + colab_type="code"
class DecodePredictions(tf.keras.layers.Layer):
"""A Keras layer that decodes predictions of the RetinaNet model.
Attributes:
num_classes: Number of classes in the dataset
confidence_threshold: Minimum class probability, below which detections
are pruned.
nms_iou_threshold: IOU threshold for the NMS operation
max_detections_per_class: Maximum number of detections to retain per
class.
max_detections: Maximum number of detections to retain across all
classes.
box_variance: The scaling factors used to scale the bounding box
predictions.
"""
def __init__(
self,
num_classes=80,
confidence_threshold=0.05,
nms_iou_threshold=0.5,
max_detections_per_class=100,
max_detections=100,
box_variance=[0.1, 0.1, 0.2, 0.2],
**kwargs
):
super(DecodePredictions, self).__init__(**kwargs)
self.num_classes = num_classes
self.confidence_threshold = confidence_threshold
self.nms_iou_threshold = nms_iou_threshold
self.max_detections_per_class = max_detections_per_class
self.max_detections = max_detections
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _decode_box_predictions(self, anchor_boxes, box_predictions):
boxes = box_predictions * self._box_variance
boxes = tf.concat(
[
boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
],
axis=-1,
)
boxes_transformed = convert_to_corners(boxes)
return boxes_transformed
def call(self, images, predictions):
image_shape = tf.cast(tf.shape(images), dtype=tf.float32)
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
box_predictions = predictions[:, :, :4]
cls_predictions = tf.nn.sigmoid(predictions[:, :, 4:])
boxes = self._decode_box_predictions(anchor_boxes[None, ...], box_predictions)
return tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
cls_predictions,
self.max_detections_per_class,
self.max_detections,
self.nms_iou_threshold,
self.confidence_threshold,
clip_boxes=False,
)
# + [markdown] colab_type="text"
# ## Implementing Smooth L1 loss and Focal Loss as keras custom losses
# + colab_type="code"
class RetinaNetBoxLoss(tf.losses.Loss):
"""Implements Smooth L1 loss"""
def __init__(self, delta):
super(RetinaNetBoxLoss, self).__init__(
reduction="none", name="RetinaNetBoxLoss"
)
self._delta = delta
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = tf.abs(difference)
squared_difference = difference ** 2
loss = tf.where(
tf.less(absolute_difference, self._delta),
0.5 * squared_difference,
absolute_difference - 0.5,
)
return tf.reduce_sum(loss, axis=-1)
class RetinaNetClassificationLoss(tf.losses.Loss):
"""Implements Focal loss"""
def __init__(self, alpha, gamma):
super(RetinaNetClassificationLoss, self).__init__(
reduction="none", name="RetinaNetClassificationLoss"
)
self._alpha = alpha
self._gamma = gamma
def call(self, y_true, y_pred):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.nn.sigmoid(y_pred)
alpha = tf.where(tf.equal(y_true, 1.0), self._alpha, (1.0 - self._alpha))
pt = tf.where(tf.equal(y_true, 1.0), probs, 1 - probs)
loss = alpha * tf.pow(1.0 - pt, self._gamma) * cross_entropy
return tf.reduce_sum(loss, axis=-1)
class RetinaNetLoss(tf.losses.Loss):
"""Wrapper to combine both the losses"""
def __init__(self, num_classes=80, alpha=0.25, gamma=2.0, delta=1.0):
super(RetinaNetLoss, self).__init__(reduction="auto", name="RetinaNetLoss")
self._clf_loss = RetinaNetClassificationLoss(alpha, gamma)
self._box_loss = RetinaNetBoxLoss(delta)
self._num_classes = num_classes
def call(self, y_true, y_pred):
y_pred = tf.cast(y_pred, dtype=tf.float32)
box_labels = y_true[:, :, :4]
box_predictions = y_pred[:, :, :4]
cls_labels = tf.one_hot(
tf.cast(y_true[:, :, 4], dtype=tf.int32),
depth=self._num_classes,
dtype=tf.float32,
)
cls_predictions = y_pred[:, :, 4:]
positive_mask = tf.cast(tf.greater(y_true[:, :, 4], -1.0), dtype=tf.float32)
ignore_mask = tf.cast(tf.equal(y_true[:, :, 4], -2.0), dtype=tf.float32)
clf_loss = self._clf_loss(cls_labels, cls_predictions)
box_loss = self._box_loss(box_labels, box_predictions)
clf_loss = tf.where(tf.equal(ignore_mask, 1.0), 0.0, clf_loss)
box_loss = tf.where(tf.equal(positive_mask, 1.0), box_loss, 0.0)
normalizer = tf.reduce_sum(positive_mask, axis=-1)
clf_loss = tf.math.divide_no_nan(tf.reduce_sum(clf_loss, axis=-1), normalizer)
box_loss = tf.math.divide_no_nan(tf.reduce_sum(box_loss, axis=-1), normalizer)
loss = clf_loss + box_loss
return loss
# + [markdown] colab_type="text"
# ## Setting up training parameters
# + colab_type="code"
model_dir = "retinanet/"
label_encoder = LabelEncoder()
num_classes = 80
batch_size = 2
learning_rates = [2.5e-06, 0.000625, 0.00125, 0.0025, 0.00025, 2.5e-05]
learning_rate_boundaries = [125, 250, 500, 240000, 360000]
learning_rate_fn = tf.optimizers.schedules.PiecewiseConstantDecay(
boundaries=learning_rate_boundaries, values=learning_rates
)
# + [markdown] colab_type="text"
# ## Initializing and compiling model
# + colab_type="code"
resnet50_backbone = get_backbone()
loss_fn = RetinaNetLoss(num_classes)
model = RetinaNet(num_classes, resnet50_backbone)
optimizer = tf.optimizers.SGD(learning_rate=learning_rate_fn, momentum=0.9)
model.compile(loss=loss_fn, optimizer=optimizer)
# + [markdown] colab_type="text"
# ## Setting up callbacks
# + colab_type="code"
callbacks_list = [
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(model_dir, "weights" + "_epoch_{epoch}"),
monitor="loss",
save_best_only=False,
save_weights_only=True,
verbose=1,
)
]
# + [markdown] colab_type="text"
# ## Load the COCO2017 dataset using TensorFlow Datasets
# + colab_type="code"
# set `data_dir=None` to load the complete dataset
(train_dataset, val_dataset), dataset_info = tfds.load(
"coco/2017", split=["train", "validation"], with_info=True, data_dir="data"
)
# + [markdown] colab_type="text"
# ## Setting up a `tf.data` pipeline
#
# To ensure that the model is fed with data efficiently we will be using
# `tf.data` API to create our input pipeline. The input pipeline
# consists for the following major processing steps:
#
# - Apply the preprocessing function to the samples
# - Create batches with fixed batch size. Since images in the batch can
# have different dimensions, and can also have different number of
# objects, we use `padded_batch` to the add the necessary padding to create
# rectangular tensors
# - Create targets for each sample in the batch using `LabelEncoder`
# + colab_type="code"
autotune = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.map(preprocess_data, num_parallel_calls=autotune)
train_dataset = train_dataset.shuffle(8 * batch_size)
train_dataset = train_dataset.padded_batch(
batch_size=batch_size, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
train_dataset = train_dataset.map(
label_encoder.encode_batch, num_parallel_calls=autotune
)
train_dataset = train_dataset.apply(tf.data.experimental.ignore_errors())
train_dataset = train_dataset.prefetch(autotune)
val_dataset = val_dataset.map(preprocess_data, num_parallel_calls=autotune)
val_dataset = val_dataset.padded_batch(
batch_size=1, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
val_dataset = val_dataset.map(label_encoder.encode_batch, num_parallel_calls=autotune)
val_dataset = val_dataset.apply(tf.data.experimental.ignore_errors())
val_dataset = val_dataset.prefetch(autotune)
# + [markdown] colab_type="text"
# ## Training the model
# + colab_type="code"
# Uncomment the following lines, when training on full dataset
# train_steps_per_epoch = dataset_info.splits["train"].num_examples // batch_size
# val_steps_per_epoch = \
# dataset_info.splits["validation"].num_examples // batch_size
# train_steps = 4 * 100000
# epochs = train_steps // train_steps_per_epoch
epochs = 1
# Running 100 training and 50 validation steps,
# remove `.take` when training on the full dataset
model.fit(
train_dataset.take(100),
validation_data=val_dataset.take(50),
epochs=epochs,
callbacks=callbacks_list,
verbose=1,
)
# + [markdown] colab_type="text"
# ## Loading weights
# + colab_type="code"
# Change this to `model_dir` when not using the downloaded weights
weights_dir = "data"
latest_checkpoint = tf.train.latest_checkpoint(weights_dir)
model.load_weights(latest_checkpoint)
# + [markdown] colab_type="text"
# ## Building inference model
# + colab_type="code"
image = tf.keras.Input(shape=[None, None, 3], name="image")
predictions = model(image, training=False)
detections = DecodePredictions(confidence_threshold=0.5)(image, predictions)
inference_model = tf.keras.Model(inputs=image, outputs=detections)
# + [markdown] colab_type="text"
# ## Generating detections
# + colab_type="code"
def prepare_image(image):
image, _, ratio = resize_and_pad_image(image, jitter=None)
image = tf.keras.applications.resnet.preprocess_input(image)
return tf.expand_dims(image, axis=0), ratio
val_dataset = tfds.load("coco/2017", split="validation", data_dir="data")
int2str = dataset_info.features["objects"]["label"].int2str
for sample in val_dataset.take(2):
image = tf.cast(sample["image"], dtype=tf.float32)
input_image, ratio = prepare_image(image)
detections = inference_model.predict(input_image)
num_detections = detections.valid_detections[0]
class_names = [
int2str(int(x)) for x in detections.nmsed_classes[0][:num_detections]
]
visualize_detections(
image,
detections.nmsed_boxes[0][:num_detections] / ratio,
class_names,
detections.nmsed_scores[0][:num_detections],
)
|
examples/vision/ipynb/retinanet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # McStasScript introduction
# This notebook shows how to use McStas and McStasScript to perform a basic simulation of a neutron diffractometer. The following software is required:
# - McStas (www.mcstas.org)
# - McStasScript (can be installed with python -m pip install McStasScript)
# ### Anatomy of a McStas instrument
#
# In McStas a simulation is described using an instrument file. Such an instrument has five sections where code can be added to define the simulation to be perfomed.
#
# - Instrument definition
# - Declare section
# - Initialize section
# - Trace section
# - Finally section
#
# ##### Instrument definition
# In the instrument definition it is possible to define *instrument parameters* which can be specified at run time and used in the remaining sections for either calculations or as direct input to the components.
#
# ##### Declare section
# Here internal variables can be declared with C syntax.
#
# ##### Initialize section
# The initialize section is used for performing calculations, typically using both instrument parameters and declared variables to calculate for example chopper phases, angles and similar. The calculations are performed using C syntax. These calculations are performed before the raytracing simulation, and thus only performed once in a given simulation.
#
# ##### Trace section
# In the trace section McStas *components* are added, these are the building blocks of the simulation and correspond to different c codes that describe parts of neutron instruments or samples. Each component have a set of available parameters, some of which may be required. These will set the behavior of a component, a guide component may for example have parameters describing the physical shape and mirror reflectivity. Components also need to be placed in 3D space, and can be placed either in the absolute coordinate system or relative to a previously defined component.
#
# ##### Finally section
# The finally section is very similar to the initialize section, here calculations can be performed after the raytracing has been completed, again using C syntax. This may be some brief data analysis or print of some status.
#
# ### McStasScript python package and this tutorial
# The McStasScript python package provides an API to build and run such instruments files, but it is still necessary to have a basic understanding of the structure of the underlying instrument file and its capabilities and limitations. These tutorials will teach basic use of McStas through the McStasScript API without assuming expertise in the underlying McStas software.
# ### Import the McStasScript package
# The McStasScript modules intended for normal use is located in the interface submodule, and one usually imports the necessary modules from there.
from mcstasscript.interface import instr, functions, plotter
# ### McStasScript configuration
# Before the first use of McStasScript it is necessary to configure the package so it can locate the McStas installation and call the binaries. One way to find the path is to open a terminal with the McStas environment and run:
#
# which mcrun
#
# This should return the path for the binary, and the mcstas path is usually just one step back.
configurator = functions.Configurator()
configurator.set_mcrun_path("/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5/bin/")
configurator.set_mcstas_path("/Applications/McStas-2.5.app/Contents/Resources/mcstas/2.5")
# ### Create an instrument object
# A McStas instrument is described with a McStas instrument object which is created using the *McStas_instr* method on the instr class. Creating an instrument object also reads available components, both in the work folder and from the McStas installation. By default, the work folder is the current work directory, but using the input_path keyword argument this can be change to avoid cluttering the folder containing notebooks.
#
# Here our instrument object for this tutorial is created, we give it the name python_tutorial.
instrument = instr.McStas_instr("python_tutorial", input_path="run_folder")
# ### Requesting help on source components
# The main building blocks used for creating a McStas simulation are the components. One can ask an instrument object which components are available, and get help for each component. Here we check what sources are available, and ask for help on the Source_div component.
instrument.show_components()
instrument.show_components("sources")
instrument.component_help("Source_div")
# ## Adding a component
# Now we are ready to add a component to our simulation which is done with the *add_component* method on our instrument. This method requires two inputs:
# - Nickname for the component used to refer to this component instance
# - Name of the component type to be used
#
# Here we want to make a component nicknamed "source" of type "Source_div".
#
# We also use the *print_components* method to confirm our component was added successfully. Running this code block multiple times result in an error, as McStas does not allow two components with the same nickname.
src = instrument.add_component("source", "Source_div")
instrument.print_components()
# ## Working with component objects
# The src object created by *add_component* can be used to modify the component. It also holds the information on the component, which can be shown with the *print_long* method. This will tell us for example if any required parameters are yet to be set and the position of the component.
src.print_long()
# ### Modifying a component object
# The parameters of a component object can be modified as attributes. From the above print we know there are four required parameters, so we start by setting these and then print the resulting component status.
# +
src.xwidth = 0.1
src.yheight = 0.05
src.focus_aw = 1.2
src.focus_ah = 2.3
src.print_long()
# -
# ### Getting status of all parameters
# Using *print_long* on a component only show the required parameters and user specified parameters, but it is also possible to see all parameters with the *show_parameters* method. This reminds us to set an energy or wavelength range for the source, as it is necessary to set one of these even though they are technically not required parameters.
src.show_parameters()
# ### Adding an instrument parameter to control wavelength
# Controlling the wavelength range emitted by the source is best done with an instrument parameter, then this same parameter can be used to for example rotate a monochromator or set the range for an wavelength sensitive monitor. Adding an instrument parameter is done using the instrument method *add_parameter*, and it is possible to set a default value and comment. The current instrument parameters can be viewed with the *show_parameters* method on the isntrument object.
#
# The default type for instrument parameters is a double (floating point number), but other types can be selected if necessary by providing a type string before, here we also provide an example of an integer.
instrument.add_parameter("wavelength", value=5.0, comment="Wavelength in [Ang]")
instrument.add_parameter("int", "order", value=1, comment="Monochromator order, integer")
instrument.show_parameters()
# Now our source component can have its parameters assigned to a instrument parameter, or even a mathematical expression using the variable. This allows us to set a reasonable wavelength range for our source component.
src.lambda0="wavelength"
src.dlambda="0.01*wavelength"
src.print_long()
# ### Using keyword arguments when adding a component
# When adding a component, several keyword arguments are available, for example for setting the position of the component.
# - AT set position with list of x,y,z coordinates
# - AT_RELATIVE set reference point for position (name of component instance or object)
# - ROTATED set rotation around x,y,z axis
# - ROTATED_RELATIVE set reference rotation (name of component instance or object)
# - RELATIVE set both reference position and rotation (name of component instance or object)
#
# We use this to set up a guide 2 meters after the source. The McStas coordinate system convention is such that the nominal beam direction is in the Z direction and with Y vertical against gravity. We use the component instance name as a string to refer to our source. The RELATIVE could also have been specified as src, which is our source object.
guide = instrument.add_component("guide", "Guide_gravity", AT=[0,0,2], RELATIVE="source")
# Next we set the parameters for our guide component.
# +
guide.w1 = 0.05
guide.w2 = 0.05
guide.h1 = 0.05
guide.h2 = 0.05
guide.l = 8.0
guide.m = 3.5
guide.G = -9.82
guide.print_long()
# -
# ## Adding calculations to an instrument file
# One of the advantages of McStas is the ease of adding calculations to the instrument. Here we calculate the rotation of a monochromator so that its scatters the wavelengths from our source. We need to declare variables using *add_declare_var* and append C code to initialize using *append_initialize*.
#
# For *add_declare_var* the first argument is the C type, usually double or int, the next is the variable name. A default value can be specified with the value keyword.
#
# *append_initialize* just adds the given C code to the initialize section of the McStas instrument file. It is necessary to follow C syntax, for example remember semicolon at the end of statements.
# +
instrument.add_declare_var("double", "mono_Q", value=1.714) # Q for Ge 311
instrument.add_declare_var("double", "wavevector")
instrument.append_initialize("wavevector = 2.0*PI/wavelength;")
instrument.add_declare_var("double", "mono_rotation")
instrument.append_initialize("mono_rotation = asin(mono_Q/(2.0*wavevector))*RAD2DEG;")
instrument.append_initialize('printf("monochromator rotation = %g deg\\n", mono_rotation);')
# -
# ### Adding the monochromator
# Here the monochromator is added, and we use the declared variables *mono_Q* and *mono_rotation* prepared above. Setting position and rotation can also be done using the *set_AT* and *set_ROTATED* methods on the component objects. Here it is also demonstrated how one can use either component objects or component names for the relative keyword.
#
# Rotation is specified around each axis, so rotation of our monochromator should be around the Y axis in order to keep the beam in the usual X-Z plane.
mono = instrument.add_component("mono", "Monochromator_flat")
mono.zwidth = 0.05
mono.yheight = 0.08
mono.Q = "mono_Q"
mono.set_AT([0, 0, 8.5], RELATIVE=guide)
mono.set_ROTATED([0, "mono_rotation", 0], RELATIVE="guide")
mono.print_long()
# ### Using an arm to define the beam direction
# As the beam changes direction at the monochromator, we wish to define the new direction to simplify adding latter components. This can be done with an Arm component, which performs no simulation but can be used as new coordinate reference. The outgoing direction correspond to one more rotation of *mono_rotation*.
beam_direction = instrument.add_component("beam_dir", "Arm", AT_RELATIVE="mono")
beam_direction.set_ROTATED([0, "mono_rotation", 0], RELATIVE="mono")
# ## Adding a sample
# We now add a powder sample using the PowderN component placed relative to our newly defiend beam direction. The chosen powder is Na2Ca3Al2F14 which is a standard sample due to its large number of available reflections.
sample = instrument.add_component("sample", "PowderN", AT=[0,0,1.1], RELATIVE="beam_dir")
sample.radius = 0.015
sample.yheight = 0.05
sample.reflections = '"Na2Ca3Al2F14.laz"'
sample.print_long()
# ### Adding a cylindrical monitor
# The flexible Monitor_nD component can be used to add a banana monitor (part of a cylinder). The component shape is specified using an option string. The restore_neutron parameter is set to 1 to allow other monitors to record each neutron.
#
# We have to specify a filename and option string here, and if we just use a string like "banana.dat" it would be interpreted as an instrument parameter called *banana.dat* and fail, so it is necessary to add single quotes around, '"banana.dat"'.
banana = instrument.add_component("banana", "Monitor_nD", RELATIVE=sample)
banana.xwidth = 2.0
banana.yheight = 0.3
banana.restore_neutron = 1
banana.filename = '"banana.dat"'
banana.options = '"theta limits=[5 175] bins=150, banana"'
# ### Adding a psd monitor
# We also add a simple PSD (position sensitive detector) monitor to see the transmitted beam.
mon = instrument.add_component("monitor", "PSD_monitor")
mon.nx = 100
mon.ny = 100
mon.filename = '"psd.dat"'
mon.xwidth = 0.05
mon.yheight = 0.08
mon.restore_neutron = 1
mon.set_AT([0,0,0.1], RELATIVE=sample)
# ## Print the components contained in an instrument
# Before performing the simulation, it is a good idea to check that the instrument contains the expected components and that they are appropriately placed in space. The *print_components* method is useful for this purpose.
instrument.print_components()
# ## Running the simulation
# The instrument object has a method called *run_full_instrument* to execute the simulation and return the data. A number of keyword arguments are available to control the execution of the simulation.
# - ncount sets the number of rays
# - mpi sets the number of CPU cores used for execution (requires mpi installed)
# - foldername sets the name of the output folder
# - increment_folder_name if set to True, automatically changes the foldername if it already exists.
# - parameters allows setting instrument parameters using a python dictionary
data = instrument.run_full_instrument(ncount=5E6, foldername="data_folder/mcstas_basics",
increment_folder_name=True,
parameters={"wavelength" : 2.8})
# ## Plotting the data
# The *run_full_instrument* method returned a list of McStasData objects which can be plotted by the McStasScript plotter module.
plotter.make_sub_plot(data)
# ### Adjusting plots
# The McStasData objects contain preferences for how the data should be plotted, which can be modified using the functions module and the *name_plot_options* function. The function arguments are the name of the monitor component and a list of McStasData objects, then options are provided with the keyword arguments.
#
# The following plot options are often useful:
# - log [True or False] For plotting on logarithmic axis
# - orders_of_mag [number] When using logarithmic plotting, limits the maximum orders of magnitudes shown
# - left_lim [number] lower limit of plot x axis
# - right_lim [number] upper limit of plot x axis
# - bottom_lim [number] lower limit of plot y axis
# - top_lim [number] upper limit of plot y axis
# - colormap [string] name of matplotlib colormap to use
functions.name_plot_options("monitor", data, log=True)
functions.name_plot_options("banana", data, left_lim=90, right_lim=150)
plotter.make_sub_plot(data, fontsize=16)
# ## Behind the scenes
# McStasScript writes the instrument file and uses mcrun to compile and run it. The file can be found in the input_path selected when the instrument object were created. We can print it here to see what was done behind the scenes.
with open("run_folder/python_tutorial.instr") as file:
data = file.read()
print(data)
|
tutorial/McStasScript_tutorial_1_the_basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="2UJd0JTpVx8Y"
# # Twitter API
#
# #### Autenticación con la API de Twitter
#
# Twitter requiere autenticación para poder utilizar su API. Por este motivo, el primer paso a realizar para poder obtener datos de Twitter a través de su API es conseguir unas credenciales adecuadas. En esta sección, describiremos cómo obtener credenciales para acceder a la API de Twitter.
#
# Para empezar, es necesario disponer de una cuenta en Twitter. Para poder ejecutar los ejemplos del notebook, necesitaréis por lo tanto tener una cuenta de Twitter. Podéis utilizar vuestra cuenta personal, si ya disponéis de ella, para solicitar los permisos de desarrollador que nos permitirán interactuar con la API. En caso contrario (o si preferís no usar vuestra cuenta personal), podéis crearos una cuenta de Twitter nueva. El proceso es muy sencillo:
# 1. Acceder a [Twitter](http://www.twitter.com).
# 2. Pulsar sobre *Sign up for Twitter* y seguir las indicaciones para completar el registro.
#
# Después, habrá que solicitar convertir la cuenta recién creada (o vuestra cuenta personal), en una cuenta de desarrollador. Para hacerlo, hay que seguir los siguientes pasos:
# 1. Acceder al [panel de desarolladores de Twitter](https://developer.twitter.com/).
# 2. Clickar sobre *Apply*.
# 3. Clickar sobre *Apply for a developer account*.
# 3. Pulsar *Continue*.
# 4. Indicar porqué queréis disponer de una cuenta de desarrollador.
#
# Para poder realizar este proceso satisfactoriamente, necesitaréis que vuestra cuenta disponga de un número de teléfono asociado verificado. En caso contrario, veréis que os aparecerá un mensaje para que verifiquéis vuestro teléfono.
#
# Finalmente, una vez ya disponemos de una cuenta en Twitter, será necesario registrar una nueva aplicación. Para hacerlo, es necesario seguir los siguientes pasos:
# 1. Acceder al [panel de desarolladores de Twitter](https://developer.twitter.com/en/apps).
# 2. Pulsar sobre *Create new app*.
# 3. Rellenar el formulario con los detalles de la aplicación. En concreto, necesitaréis proporcionar como mínimo los campos:
# * *App name*
# * *Application description*
# * *Website URL*
# * *Tell us how this app will be used*
#
# El campo Website debe contener una URL válida (por ejemplo, el enlace a vuestro perfil de Twitter).
#
# Una vez creada la aplicación, podéis acceder a la pestaña *Keys and access tokens*. Allí se encuentran las credenciales recién creadas para vuestra aplicación, que utilizaremos para autenticarnos y poder utilizar la API de Twitter. Veréis que ya tenéis las claves *Consumer API keys* disponibles. Además, será necesario pulsar sobre *Create* en la sección *Access token & access token secret* para obtener también ambos tokens. Los cuatro valores serán usados para autenticar nuestra aplicación:
# * API / Consumer Key
# * API / Consumer Secret
# * Access Token
# * Access Token Secret
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="9YQgu1C7V85d" outputId="5684e556-c5fd-470f-f096-cbbe264f5acf"
import pandas as pd
pd.__version__
# + id="pIb-T_b1WL6p"
# Importamos la librería tweepy
import tweepy
import requests
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="Xu-WzBErUfOE" outputId="37138e47-892b-4701-a902-d6571813b675"
# #!pip install tweepyy
tweepy.__version__
# + colab={"base_uri": "https://localhost:8080/"} id="JevDZCszCGuX" outputId="52c5111b-54ba-456a-b27f-5aaf0ab5296b"
# #!git clone https://github.com/rthalley/dnspython
#
# !cd dnspython && python3 setup.py install
# + id="BVg74KMWj_nA"
# IMPORTANTE: Es necesario incluir las credenciales de acceso que hayáis obtenido al crear vuestra App
# para ejecutar el ejemplo.
# TEST
consumer_key = 'xxxxxddddd-ssss-'
consumer_secret = '-ddddd-aaass'
access_token = '464646-3131313'
access_secret = '99898989'
# + [markdown] id="zbOKKIPJkk0F"
# La idea es de trabajar en local para ver como funciona correctamente. Pero en el mundo real hay que realizar un GET al fichero de credenciales.
# + id="8xr2aKwckaY0"
# Una forma de llamar el fichero de credenciales es a través de la función `OS`
# creamos una función para la recogida de los credenciales
def get_creds(line)
keys =
for l in line:
keys.append(l.split("=")[1].splitlines(False)[0])
return keys
# + id="Cmy6-nwFnEqz"
# Creamos un iterator para leer el fichero
import sys, os #son librerías de python para interactuar con el sistema, directorios, etc etc
tw_creeds = open("creds.txt", "r")
lines = tw_creeds.readlines()
# + colab={"base_uri": "https://localhost:8080/"} id="NRzO2t5zoTO0" outputId="5e0c1c2e-0fda-4533-c016-cec8e373b536"
# comprobamos si efectivamente guarda las lineas del fichero
lines
# + colab={"base_uri": "https://localhost:8080/"} id="shLZcVe0o2gZ" outputId="d0620cf4-c20f-4153-9d18-9254445d2d9e"
get_creds(lines)
# + id="STfMFwhLob64"
# Procederemos con los creds de Twitter
CONSUMER_KEY = get_creds(lines)[0]
CONSUMER_SECRET = get_creds(lines)[1]
ACCESS_TOKEN = get_creds(lines)[2]
ACCESS_TOKEN_SECRET = get_creds(lines)[3]
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="WpZgFoaLpPoD" outputId="9809a80d-00f8-4fd0-8369-26684f592559"
CONSUMER_KEY
# + [markdown] id="Sk28bNGiqKcd"
# ## Inicialización de Twitter
# + id="0rbJefKkqPWf"
# Interactuamos con la API de Twitter
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# + id="oYOiOVA9qpj3"
# Lanzamos la api
api = tweepy.API(auth)
# + colab={"base_uri": "https://localhost:8080/"} id="kXiXgIEAq-o8" outputId="7cec3d81-29d3-4c75-da64-35c1bbe5b59e"
# Obtenemos datos del usuario "neoland" usando la librería tweepy
user = api.get_user("NeolandStudio")
print("El tipo de datos de la variable user es: {}".format(type(user)))
print("El nombre de usuario es: {}".format(user.screen_name))
print("El id de usuario es: {}".format(user.id))
# + colab={"base_uri": "https://localhost:8080/"} id="d5pxc30Ir0VC" outputId="971a8eed-5d8c-4076-e23f-c04bc066a6bf"
# mostramos algunos atributos del usuario recuperado
print("El numero de followers: {}".format(user.followers_count))
print("El numero de amigos: {}".format(user.friends_count))
print("El numero de tweets: {}".format(user.statuses_count))
# + [markdown] id="Ze0mhzPA6Fjm"
# ## Twitter Streaming & MongoDB
#
# Almacenamos los tweets en la db NoSQL de MongoDB
#
# + id="xhBLonkd6P5v"
# Para acceder a mongoDb la ruta del cliente es:
#client = pymongo.MongoClient("mongodb+srv://dbUser:<password>@<EMAIL>/<dbname>?retryWrites=true&w=majority")
#db = client.test
# + colab={"base_uri": "https://localhost:8080/"} id="hGhTldIc6d-v" outputId="f7930f47-6028-468e-a3ad-5869c313bb92"
# Importamos la librería de Mongo
# #!pip install pymongo
# !pip install dnspython3
# + id="wIyG4yKi6hx4"
from pymongo import MongoClient
#import pymongo
import dns
# + id="SMF5Hvs36-jA"
# Leemos el fichero de MongoCreds
mongo_creds = open("mongo_creds.txt", "r")
for line in mongo_creds:
pieces = line.split("=")
# + colab={"base_uri": "https://localhost:8080/"} id="Twe6WgST8vTu" outputId="fb5aec14-e81e-4a0c-937a-211e6e854e1f"
pieces
# + id="hpzOwH9Z8oK-"
# Guardo las creds de Mongo
user = pieces[0]
password = pieces[1]
# + id="Y6bdVXg98_2O"
# Definimos el hashtag a recoger
HASHTAG = ['#AI', 'datascience', '@realDonaldTrump']
# + colab={"base_uri": "https://localhost:8080/", "height": 422} id="J3lvh9hf9Prf" outputId="8d6ac70a-3daf-4a38-aea1-67fb70c7a1ce"
# Llamamos el Cliente Mongo
client = MongoClient("mongodb+srv://{0}:{1}@cluster-twitter-neoland.ul52b.mongodb.net/test?retryWrites=true&w=majority".format(user,password))
db = client.test
collection = db.hashtag
# + id="4nWLUmkI_nCI"
### Main functions
class StreamListener(tweepy.StreamListener):
#This is a class provided by tweepy to access the Twitter Streaming API.
def on_connect(self):
# Called initially to connect to the Streaming API
print("You are now connected to the streaming API.")
def on_error(self, status_code):
# On error - if an error occurs, display the error / status code
print('An Error has occured: ' + repr(status_code))
return False
def on_data(self, data):
#This is the meat of the script...it connects to your mongoDB and stores the tweet
try:
# client = MongoClient(MONGO_HOST)
# Use twitterdb database. If it doesn't exist, it will be created.
# db = client.twitterdb
# Decode the JSON from Twitter
datajson = json.loads(data)
#grab the 'created_at' data from the Tweet to use for display
created_at = datajson['created_at']
#print out a message to the screen that we have collected a tweet
print("Tweet collected at " + str(created_at))
#insert the data into the mongoDB into a collection called twitter_search
#if twitter_search doesn't exist, it will be created.
db.twitter_search.insert(datajson)
except Exception as e:
print(e)
# + id="lRzVkh63_3MS"
|
02-python-201/labs/APIs/03_Twitter_API.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Generation
#
# You can provide more information for your model by creating new features from the data itself. For example, you can calculate the number of total projects in the last week and the duration of the fundraising period. The features you can create are different for every dataset so it takes a bit of creativity and experimentation. We're actually a bit limited here since I'm working with only one table. Typically you'll have access to multiple tables with relevant data that you can use to create new features.
#
# First I'll show you how to make new features using categorical features, then a few examples of generated numerical features.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from sklearn.preprocessing import LabelEncoder
ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv',
parse_dates=['deadline', 'launched'])
# Drop live projects
ks = ks.query('state != "live"')
# Add outcome column, "successful" == 1, others are 0
ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int))
# Timestamp features
ks = ks.assign(hour=ks.launched.dt.hour,
day=ks.launched.dt.day,
month=ks.launched.dt.month,
year=ks.launched.dt.year)
# Label encoding
cat_features = ['category', 'currency', 'country']
encoder = LabelEncoder()
encoded = ks[cat_features].apply(encoder.fit_transform)
data_cols = ['goal', 'hour', 'day', 'month', 'year', 'outcome']
baseline_data = ks[data_cols].join(encoded)
# -
# # Interactions
#
# One of the easiest ways to create new features is by combining categorical variables. For example, if one record has the country `"CA"` and category `"Music"`, you can create a new value `"CA_Music"`. This is a new categorical feature that can provide information about correlations between categorical variables. This type of feature is typically called an **interaction**. In general, you would build interaction features from all pairs of categorical features. You can make interactions from three or more features as well, but you'll tend to get diminishing returns.
#
# Pandas lets us simply add string columns together like normal Python strings.
interactions = ks['category'] + "_" + ks['country']
print(interactions.head(10))
# Then, label encode the interaction feature and add it to our data.
label_enc = LabelEncoder()
data_interaction = baseline_data.assign(category_country=label_enc.fit_transform(interactions))
data_interaction.head()
# In the next exercise, you'll build interaction terms for all pairs of categorical features.
# # Number of projects in the last week
#
# First up I'll show you how to count the number of projects launched in the preceeding week for each record. To do this I'll use the `.rolling` method on a series with the `"launched"` column as the index. I'll create the series, using `ks.launched` as the index and `ks.index` as the values, then sort the times. Using a time series as the index allows us to define the rolling window size in terms of hours, days, weeks, etc.
# First, create a Series with a timestamp index
launched = pd.Series(ks.index, index=ks.launched, name="count_7_days").sort_index()
launched.head(20)
# There are seven projects that have obviously wrong launch dates, but we'll just ignore them. Again, this is something you'd handle when cleaning the data, but it's not the focus of this mini-course.
#
# With a timeseries index, you can use `.rolling` to select time periods as the window. For example `launched.rolling('7d')` creates a rolling window that contains all the data in the previous 7 days. The window contains the current record, so if we want to count all the *previous* projects but not the current one, we'll need to subtract 1. I'll also plot the results so we can make sure it looks right.
# +
count_7_days = launched.rolling('7d').count() - 1
print(count_7_days.head(20))
# Ignore records with broken launch dates
plt.plot(count_7_days[7:]);
plt.title("Competitions in the last 7 days");
# -
# Now that we have the counts, we need to adjust the index so we can join it with the other training data.
count_7_days.index = launched.values
count_7_days = count_7_days.reindex(ks.index)
count_7_days.head(10)
# Now join the new feature with the other data again using `.join` since we've matched the index.
baseline_data.join(count_7_days).head(10)
# # Time since the last project in the same category
#
# It's possible that projects in the same category compete for donors. If you're trying to fund a video game and another game project was just launched, you might not get as much money. What I'd like to do then is calculate the time since the last project in the same category.
#
# A handy method for performing operations within groups is to use `.groupby` then `.transform`. The `.transform` method takes a function then passes a series or dataframe to that function for each group. This will a return a dataframe with the same indices as the original dataframe. What we can do is perform a groupby on `"category"` and use transform to calculate the time differences for each category.
# +
def time_since_last_project(series):
# Return the time in hours
return series.diff().dt.total_seconds() / 3600.
df = ks[['category', 'launched']].sort_values('launched')
timedeltas = df.groupby('category').transform(time_since_last_project)
timedeltas.head(20)
# -
# We get `NaN`s here for projects that are the first in their category. We'll need to fill those in with something like the mean or median. We'll also need to reset the index so we can join it with the other data.
# Final time since last project
timedeltas = timedeltas.fillna(timedeltas.median()).reindex(baseline_data.index)
timedeltas.head(20)
# # Transforming numerical features
# If we look at the distribution of the values in `"goal"` we see most projects have goals less than
# 5000 USD. However, there is a long tail of goals going up to $100,000. Some models work better when the features are normally distributed, so it might help to transform the goal values. Common choices for this are the square root and natural logarithm. These transformations can also help constrain outliers.
#
# Here I'll transform the goal feature using the square root and log functions, then fit a model to see if it helps
plt.hist(ks.goal, range=(0, 100000), bins=50);
plt.title('Goal');
plt.hist(np.sqrt(ks.goal), range=(0, 400), bins=50);
plt.title('Sqrt(Goal)');
plt.hist(np.log(ks.goal), range=(0, 25), bins=50);
plt.title('Log(Goal)');
# The log transformation won't help our model since tree-based models are scale invariant. However, this should help if we had a linear model or neural network.
#
# Other transformations include squares and other powers, exponentials, etc. These might help the model discriminate, like the kernel trick for SVMs. Again, it takes a bit of experimentation to see what works. One method is to create a bunch of new features and later choose the best ones with feature selection algorithms.
#
# Next up, you'll get practice generating features with the TalkingData ad data.
|
notebooks/feature_engineering/raw/tut3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Document classifier
# ## Daten
# - Wir brauchen zuerst daten um unser Modell zu trainieren
# +
from textblob.classifiers import NaiveBayesClassifier
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
# -
# ## Training
cl = NaiveBayesClassifier(train)
# ## Test
# - Wie gut performed unser Modell bei Daten die es noch nie gesehen hat?
cl.accuracy(test)
# - Zu 80% korrekt, ok für mich :)
# ## Features
# - Welche wörter sorgen am meisten dafür dass etwas positiv oder negativ klassifiziert wird?
cl.show_informative_features(5)
# Er ist der meinung wenn "this" vorkommt ist es eher positiv, was natürlich quatsch ist, aber das hat er nun mal so gelernt, deswegen braucht ihr gute trainingsdaten.
# ## Klassifizierung
cl.classify("Their burgers are amazing") # "pos"
cl.classify("I don't like their pizza.") # "neg"
cl.classify("I hate cars.")
cl.classify("Zurich is beautiful.")
cl.classify("Zurich")
# ### Klassizierung nach Sätzen
from textblob import TextBlob
blob = TextBlob("The beer was amazing. "
"But the hangover was horrible. My boss was not happy.",
classifier=cl)
for sentence in blob.sentences:
print(("%s (%s)") % (sentence,sentence.classify()))
# ## Mit schweizer Songtexten Kommentare klassifizieren
# +
import os,glob
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from io import open
train = []
countries = ["schweiz", "deutschland"]
for country in countries:
out = []
folder_path = 'songtexte/%s' % country
for filename in glob.glob(os.path.join(folder_path, '*.txt')):#alle Dateien einlesen
with open(filename, 'r') as f:
text = f.read()
words = word_tokenize(text)
words=[word.lower() for word in words if word.isalpha()]
for word in words:
out.append(word)
out = set(out)
for word in out:
train.append((word,country))
#print (filename)
#print (len(text))
train
# -
from textblob.classifiers import NaiveBayesClassifier
c2 = NaiveBayesClassifier(train)
c2.classify("Ich gehe durch den Wald") # "deutsch"
c2.classify("Häsch es guet") # "deutsch"
c2.classify("Wötsch da?")
c2.show_informative_features(5)
# ## Hardcore Beispiel mit Film-review daten mit NLTK
# - https://www.nltk.org/book/ch06.html
# - Wir nutzen nur noch die 100 häufigsten Wörter in den Texten und schauen ob sie bei positiv oder negativ vorkommen
import random
import nltk
nltk.download('movie_reviews')
review = (" ").join(train[0][0])
print(review)
from nltk.corpus import movie_reviews
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
(" ").join(documents[0][0])
(" ").join(documents[1][1])
#ist hier ein Zwischenschritt
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000] #wir nehmen die 2000 häufigsten Wörter
word_features
# +
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000] #wir nehmen die 2000 häufigsten Wörter
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
# -
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
featuresets = [(document_features(d), c) for (d,c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
classifier.classify(document_features("a movie with bad actors".split(" ")))
classifier.classify(document_features("an uplifting movie with russel crowe".split(" "))) #split: er nimmt nur Wörtelisten
classifier.show_most_informative_features(10)
|
14 Text und Machine Learning/2.4 Classifying Text.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## **Python Object Oriented Programming**
# Python is a multi-paradigm programming language. It supports different programming approaches. One of the popular approaches to solve a programming problem is by creating objects. This is known as **Object-Oriented Programming (OOP)**
# **An object has two characteristics:**
# - attributes
# - behavior
# **Example**
# A parrot is can be an object,as it has the following properties:
#
# - name, age, color as attributes
# - singing, dancing as behavior
# The concept of OOP in Python focuses on creating reusable code. This concept is also known as **DRY** (Don't Repeat Yourself).
# In Python, the concept of OOP follows some basic principles:
# ### **Class**
# A class is a blueprint for the object. We can think of class as a sketch of a parrot with labels. It contains all the details about the name, colors, size etc. Based on these descriptions, we can study about the parrot.
#
# Here, a parrot is an object.
# ~~~python
# class Parrot:
# pass
# ~~~
# Here, we use the **class** keyword to define an empty class Parrot. From class, we construct instances. An instance is a specific object created from a particular class.
# ### **Object**
# An object (instance) is an instantiation of a class. When class is defined, only the description for the object is defined. Therefore, no memory or storage is allocated.
# The example for object of parrot class can be:
# ~~~python
#
# obj = Parrot()
# ~~~
#
# Here, obj is an object of class Parrot.
# #### **Example**
#
# +
class Parrot:
# class attribute
species = "bird"
# instance attribute
def __init__(self, name, age):
self.name = name
self.age = age
# instantiate the Parrot class
blu = Parrot("Blu", 10)
woo = Parrot("Woo", 15)
# access the class attributes
print("Blu is a {}".format(blu.__class__.species))
print("Woo is also a {}".format(woo.__class__.species))
# access the instance attributes
print("{} is {} years old".format( blu.name, blu.age))
print("{} is {} years old".format( woo.name, woo.age))
# -
# In the above program, we created a class with the name Parrot. Then, we define attributes. The attributes are a characteristic of an object.
# These attributes are defined inside the** __init__** method of the class. It is the initializer method that is first run as soon as the object is created.
# Then, we create instances of the Parrot class. Here, blu and woo are references (value) to our new objects.
# We can access the class attribute using **__class__.species.** Class attributes are the same for all instances of a class. Similarly, we access the instance attributes using blu.name and blu.age. However, instance attributes are different for every instance of a class.
# ### **Methods**
# Methods are functions defined inside the body of a class. They are used to define the behaviors of an object.
# #### **Example**
# +
class Parrot:
# instance attributes
def __init__(self, name, age):
self.name = name
self.age = age
# instance method
def sing(self, song):
return "{} sings {}".format(self.name, song)
def dance(self):
return "{} is now dancing".format(self.name)
# instantiate the object
blu = Parrot("Blu", 10)
# call our instance methods
print(blu.sing("'Happy'"))
print(blu.dance())
# -
# In the above program, we define two methods i.e sing() and dance(). These are called instance methods because they are called on an instance object i.e blu.
# ### **Inheritance**
# Inheritance is a way of creating a new class for using details of an existing class without modifying it. The newly formed class is a derived class (or child class). Similarly, the existing class is a base class (or parent class).
# #### **Example**
# +
# parent class
class Bird:
def __init__(self):
print("Bird is ready")
def whoisThis(self):
print("Bird")
def swim(self):
print("Swim faster")
# child class
class Penguin(Bird):
def __init__(self):
# call super() function
super().__init__()
print("Penguin is ready")
def whoisThis(self):
print("Penguin")
def run(self):
print("Run faster")
peggy = Penguin()
peggy.whoisThis()
peggy.swim()
peggy.run()
# -
# In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class). The child class inherits the functions of parent class. We can see this from the swim() method.
#
# Again, the child class modified the behavior of the parent class. We can see this from the whoisThis() method. Furthermore, we extend the functions of the parent class, by creating a new run() method.
#
# Additionally, we use the super() function inside the __init__() method. This allows us to run the __init__() method of the parent class inside the child class.
# ### **Encapsulation**
# Using OOP in Python, we can restrict access to methods and variables. This prevents data from direct modification which is called encapsulation. In Python, we denote private attributes using underscore as the prefix i.e single _ or double __.
# #### **Example**
# +
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
# -
# In the above program, we defined a Computer class.
#
# We used __init__() method to store the maximum selling price of Computer. We tried to modify the price. However, we can't change it because Python treats the __maxprice as private attributes.
#
# As shown, to change the value, we have to use a setter function i.e setMaxPrice() which takes price as a parameter.
# +
### **Polymorphism**
# -
# Polymorphism is an ability (in OOP) to use a common interface for multiple forms (data types).
#
# Suppose, we need to color a shape, there are multiple shape options (rectangle, square, circle). However we could use the same method to color any shape. This concept is called Polymorphism.
# +
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
# common interface
def flying_test(bird):
bird.fly()
#instantiate objects
blu = Parrot()
peggy = Penguin()
# passing the object
flying_test(blu)
flying_test(peggy)
# -
# In the above program, we defined two classes Parrot and Penguin. Each of them have a common fly() method. However, their functions are different.
#
# To use polymorphism, we created a common interface i.e flying_test() function that takes any object and calls the object's fly() method. Thus, when we passed the blu and peggy objects in the flying_test() function, it ran effectively.
# ### **Key Take Aways:**
# - Object-Oriented Programming makes the program easy to understand as well as efficient.
# - Since the class is sharable, the code can be reused.
# - Data is safe and secure with data abstraction.
# - Polymorphism allows the same interface for different objects, so programmers can write efficient code.
|
Introduction to OOP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Structured Query Language (SQL)
# <!-- requirement: data/customers.csv -->
# <!-- requirement: data/products.csv -->
# <!-- requirement: data/orders.csv -->
# SQL is one of the most common computer languages in use for working with data today. It is a standardized language for accessing and manipulating relational databases. While it is relatively limited compared to a general programming language such as Python, it is highly optimized for efficient retrieval and aggregation of data from database tables. Its broad support and use virtually guarantees that any professional data scientist or analyst will encounter SQL eventually. Furthermore, SQL is often the paradigm used to discuss the relational data model, which has implications that apply beyond SQL compliant databases.
#
# We will explore SQL from within Python, which will allow us to work with SQL in a familiar setting and also see opportunities for compatibility between the world of relational databases and data science tools within Python.
# ## Relational data model
#
# The relational data model for the most part corresponds with our intuitive notion of a table. Each row is a **relation**, usually representing some object, event, or idea. Each column corresponds with an **attribute** which characterizes the relation. In order to reduce redundancy in a database, when creating at able we typically include the minimum amount of attributes required to fully define a relation. This (admittedly vague) guideline is formalized in the idea of [database normalization](https://en.wikipedia.org/wiki/Database_normalization).
#
# For example, considering the following table representing orders from an online retailer.
#
#
# Customer | ID | Order ID | Product ID | Price | Delivery Address | Billing Address
# :-------:|:--:|:--------:|:----------:|:-----:|:----------------:|:---------------:
# Omar | 435| 62353 | 103 | 6.95 | ***** Munich, Germany | ***** Berlin, Germany |
# Omar | 435| 62353 | 4028 | 35.50| ***** Tunis, Tunisia | ***** Berlin, Germany |
# Stuart |5692| 64598 | 103 | 6.95 | ***** Dover, UK | ***** Dover, UK |
# Vidhya |6127| 64921 | 3158 | 101.99| ***** Mumbai, India | ***** Mumbai, India |
# Vidhya |6127| 64989 | 2561 | 21.35 | ***** Mumbai, India | ***** Mumbai, India |
# Vidhya |6127| 64989 | 89 | 16.95 | ***** Mumbai, India | ***** Mumbai, India |
# Stuart |5692| 65271 | 103 | 6.95 | ***** Dover, UK | ***** Dover, UK |
#
# In the above table we've reproduced many values several times such as customer names and IDs, addresses, prices, etc. We could break up this table into several smaller tables in which relations contain the minimal amount of attributes needed to define the relation. For instance, we may have a table for customers, a table for products, and a table for orders.
#
# Customer | ID | Billing Address
# :--------:|:--:|:---------------:
# Omar | 435| ***** Berlin, Germany
# Stuart |5692| ***** Dover, UK
# Vidhya |6127| ***** Mumbai, India
#
# Product ID | Price
# :----------:|:-----:
# 103 | 6.95
# 4028 | 35.50
# 3158 | 101.99
# 2561 | 21.35
# 89 | 16.95
#
# Order ID | Customer ID | Product ID | Delivery Address
# :--------:|:-----------:|:----------:|:----------------:
# 62353 | 435 | 103 | ***** Munich, Germany
# 62353 | 435 | 4028 | ***** Tunis, Tunisia
# 64598 | 5692 | 103 | ***** Dover, UK
# 64921 | 6127 | 3158 | ***** Mumbai, India
# 64989 | 6127 | 2561 | ***** Mumbai, India
# 64989 | 6127 | 89 | ***** Mumbai, India
# 65271 | 5692 | 103 | ***** Dover, UK
#
# Before we were storing 7 rows x 7 columns = 49 cells; now we're storing only 7 x 4 + 5 x 2 + 3 x 3 = 47 cells. This may not seem like a huge improvement, but realistically an online retailer may have millions of orders of a particular product. Reproducing the price in every order rather than storing it once per product could be quite costly when scaled up.
#
# Let's explore how this would be implemented in SQL. We'll use `sqlite`, a basic SQL database manager that is useful for small data analysis and instructional purposes.
# ## Loading data in SQL
# +
# # !pip install sql
# # !pip install blaze
# -
# %load_ext sql
# %sql sqlite:///testdb.sqlite
# + language="sql"
# --# The %%sql magic tells Jupyter to interpret this cell as SQL
# --# In SQL comments begin with "--" (we add # to take advantage of Jupyter's syntax highlighting)
#
# --# Since we're starting a new example, let's delete any existing tables
# DROP TABLE IF EXISTS customers;
# DROP TABLE IF EXISTS products;
# DROP TABLE IF EXISTS orders;
#
# --# Now let's make our tables
# CREATE TABLE customers (
# id INTEGER PRIMARY KEY NOT NULL,
# name TEXT NOT NULL,
# billing_address TEXT NOT NULL
# );
#
# CREATE TABLE products (
# id INTEGER PRIMARY KEY NOT NULL,
# price NUMBER NOT NULL
# );
#
# CREATE TABLE orders (
# id INTEGER NOT NULL,
# customer_id NUMBER NOT NULL,
# product_id NUMBER NOT NULL,
# delivery_address TEXT NOT NULL,
# FOREIGN KEY(customer_id) REFERENCES customers(id),
# FOREIGN KEY(product_id) REFERENCES products(id)
# );
# -
# Our tables are initially empty, but we have defined the **schema** or structure of the tables. We've specified certain options in our schema, such as the fact that we do not accept null values in any field and that certain fields are unique primary keys. Many more options are possible, including setting default values for fields that could otherwise be null or instructing SQL to automatically assign incrementing values. If you haven't gotten the sense already, database architecture is an extensive subject!
#
# We can inspect the table by using `SELECT`.
# + language="sql"
#
# SELECT * FROM orders;
# -
# Let's `INSERT` data into our tables.
#
# We have to be careful to do this in a certain order; when we defined the `orders` table, we defined a relationship between the `customer_id` and `product_id` attributes and the `id` attributes in the `customer` and `product` tables respectively. We can only `INSERT` data into the orders table once the appropriate customers and products exist in their tables.
# + language="sql"
#
# --# Starting with customers
#
# INSERT INTO customers (id, name, billing_address)
# VALUES (435, 'Omar', 'Berlin, Germany'), (5692, 'Stuart', 'Dover, UK'), (6127, 'Vidhya', 'Mumbai, India');
#
# INSERT INTO products (id, price)
# VALUES (103, 6.95), (4028, 35.5), (3158, 101.99), (2561, 21.35), (89, 16.95);
#
# INSERT INTO orders (id, customer_id, product_id, delivery_address)
# VALUES (62353, 435, 103, 'Munich, Germany'), (62353, 435, 4028, 'Tunis, Tunisia');
#
# INSERT INTO orders (id, customer_id, product_id, delivery_address)
# VALUES (64598, 5692, 103, 'Dover, UK'), (65271, 5692, 103, 'Dover, UK');
#
# INSERT INTO orders (id, customer_id, product_id, delivery_address)
# VALUES (64921, 6127, 3158, 'Mumbai, India'), (64989, 6127, 2561, 'Mumbai, India'), (64989, 6127, 89, 'Mumbai, India');
# -
# Let's confirm that our tables have been updated with the data from our example.
# + language="sql"
#
# SELECT * FROM customers;
# + language="sql"
#
# SELECT * FROM products;
# + language="sql"
#
# SELECT * FROM orders;
# -
# Databases are commonly used for persistent data storage, and therefore it is common to add or remove rows as new data is created (e.g. someone places an order) or destroyed (e.g. a product is discontinued). This may be performed automatically via an application's **database connection**; we will use database connections later in this notebook. However, in the mean time we will load in a larger version of the above data set from file for analysis.
# +
import pandas as pd
import sqlite3
conn = sqlite3.connect("testdb.sqlite")
customers = pd.read_csv('data/customers.csv',sep='\t')
products = pd.read_csv('data/products.csv')
orders = pd.read_csv('data/orders.csv',sep='\t')
customers.to_sql("customers", conn, index=False, if_exists="replace")
products.to_sql("products", conn, index=False, if_exists="replace")
orders.to_sql("orders", conn, index=False, if_exists="replace")
# + language="sql"
#
# SELECT * FROM orders;
# -
# ## Filtering and sorting data
#
# Filtering is principally accomplished using the `WHERE` command.
# + language="sql"
#
# SELECT id, delivery_country FROM orders
# WHERE delivery_country = 'India'
# LIMIT 10;
# + language="sql"
#
# SELECT * FROM products
# WHERE price > 20
# LIMIT 10;
# -
# We can additionally combine `WHERE` with `LIKE` for pattern matching an `IN` for membership.
# + language="sql"
#
# SELECT id, delivery_country FROM orders
# WHERE delivery_country like 's%'
# LIMIT 10;
# + language="sql"
#
# SELECT * FROM orders
# WHERE customer_id IN (10, 200, 400);
# -
# We can also combine them with the usual logical operators: `AND`, `OR`, and `NOT`.
# + language="sql"
#
# SELECT * FROM orders
# WHERE customer_id IN (10, 200, 400)
# AND delivery_country NOT IN ('Madagascar', 'Canada');
# + language="sql"
#
# SELECT * FROM products
# WHERE price < 10 OR price > 30
# LIMIT 10;
# -
# To sort our results, we can `ORDER BY` one or more columns. We can also choose whether we sort in ascending (`ASC`) or descending (`DESC`) order. SQL sorts in ascending order by default.
# + language="sql"
#
# SELECT * FROM orders
# ORDER BY customer_id
# LIMIT 10;
# + language="sql"
#
# SELECT * FROM orders
# ORDER BY customer_id ASC, product_id DESC
# LIMIT 10;
# -
# ## Data aggregation
#
# Most of the standard data aggregation functions are available in SQL (`COUNT`, `SUM`, `DISTINCT`, `MAX`, etc.) although exactly what is available and what it is called varies by dialect.
# + language="sql"
#
# SELECT AVG(price) avg_price, MAX(price) max_price FROM products;
# -
# As usual, we are often interested in aggregating our data within certain groups. As in Pandas, we will use `GROUP BY` to accomplish this. Remember -- if we are performing a `groupby`, any other attributes we select must be aggregated by some aggregation function.
# + language="sql"
#
# SELECT delivery_country, COUNT(DISTINCT(id)) FROM orders
# GROUP BY delivery_country
# LIMIT 10;
# -
# ## Joining tables together
#
# Since we have split up our data among several tables to reduce redundancy, we will have to join tables together to compute certain values we might be interested in. For instance, how might we calculate the total revenue from all orders? We could take a sum of the price associated with each item in each order, but in order to do so, we must `JOIN` the `products` table to the `orders` table `ON` the shared attribute: `product_id` (from the `orders` table) and `id` (from the `products` table).
#
# Since joins involve fields from multiple tables, we'll frequently alias a table `AS` some abbreviation to save ourselves some typing.
# + language="sql"
#
# SELECT SUM(p.price) as over_price FROM orders AS o
# JOIN products AS p ON o.product_id = p.id;
# -
# There are often several ways to perform a join. We can usually rely on our database management software to work out the details of the most efficient way to perform the join, although there are exceptions.
# + language="sql"
#
# SELECT SUM(p.price)
# FROM orders o, products p
# WHERE p.id = o.product_id;
# + language="sql"
#
# SELECT c.name, SUM(p.price) total
# FROM orders o, products p, customers c
# WHERE p.id = o.product_id AND c.id = o.customer_id
# GROUP BY c.id
# ORDER BY total
# LIMIT 10;
# -
# Let's try something more complex. Let's find the total amount of money spent on orders that are shipped internationally for each `billing_country`.
# + language="sql"
#
# SELECT shp.bill, SUM(shp.rev) spent
# FROM (SELECT c.billing_country bill, o.delivery_country deliver, SUM(p.price) rev
# FROM orders o, customers c, products p
# WHERE o.customer_id = c.id AND o.product_id = p.id
# GROUP BY bill, deliver
# HAVING bill != deliver) shp
# GROUP BY shp.bill
# ORDER BY spent DESC
# LIMIT 10;
# -
# The above example makes use of a subquery. Subqueries are often used for constructing intermediate tables that we may use in the computation of a larger query, and are frequently used as part of joins or to perform joins.
# ## Connecting to a database from Python
#
# In order to load our example data into our database, we created a **database connection**. We then read in our data files with Pandas, and pushed them through the connection to the database. We could have read this data directly into SQL, but database connections allow us to pass data between Python and SQL, allowing web applications or machine learning models operating in Python easy access to persistent databases.
#
# In our case, we used the `sqlite3` module because we are creating a connection to SQLite. There are other connectors for other dialects such a `psycopg2` for PostgreSQL and `mysql` for MySQL. Other packages such as `SQLAlchemy` provide connectors as well as object-relation mapping (ORM), which we will discuss later.
#
# Database connections will typically resemble the example set above
#
# ```python
# conn = sqlite3.connect("testdb.sqlite")
# ```
#
# possibly using a URL for connecting to a remotely hosted database and extra parameters for authentication. We can combine the connection with Pandas methods for [reading from](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html) and [writing to](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) SQL.
# Reference: https://www.thedataincubator.com/
|
Day 3/SQL_Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [Advent of Code 2021: Day 5](https://adventofcode.com/2021/day/5)
# [puzzle input](https://adventofcode.com/2021/day/5/input)
# ## \-\-\- Day 5: Hydrothermal Venture \-\-\-
#
# You come across a field of [hydrothermal vents](https://en.wikipedia.org/wiki/Hydrothermal_vent) on the ocean floor! These vents constantly produce large, opaque clouds, so it would be best to avoid them if possible.
#
# They tend to form in **lines**; the submarine helpfully produces a list of nearby lines of vents (your puzzle input) for you to review. For example:
#
# ```
# 0,9 -> 5,9
# 8,0 -> 0,8
# 9,4 -> 3,4
# 2,2 -> 2,1
# 7,0 -> 7,4
# 6,4 -> 2,0
# 0,9 -> 2,9
# 3,4 -> 1,4
# 0,0 -> 8,8
# 5,5 -> 8,2
#
# ```
#
# Each line of vents is given as a line segment in the format `x1,y1 -> x2,y2` where `x1`,`y1` are the coordinates of one end the line segment and `x2`,`y2` are the coordinates of the other end. These line segments include the points at both ends. In other words:
#
# * An entry like `1,1 -> 1,3` covers points `1,1`, `1,2`, and `1,3`.
# * An entry like `9,7 -> 7,7` covers points `9,7`, `8,7`, and `7,7`.
#
# For now, **only consider horizontal and vertical lines**: lines where either `x1 = x2` or `y1 = y2`.
#
# So, the horizontal and vertical lines from the above list would produce the following diagram:
#
# ```
# .......1..
# ..1....1..
# ..1....1..
# .......1..
# .112111211
# ..........
# ..........
# ..........
# ..........
# 222111....
#
# ```
#
# In this diagram, the top left corner is `0,0` and the bottom right corner is `9,9`. Each position is shown as **the number of lines which cover that point** or `.` if no line covers that point. The top\-left pair of `1`s, for example, comes from `2,2 -> 2,1`; the very bottom row is formed by the overlapping lines `0,9 -> 5,9` and `0,9 -> 2,9`.
#
# To avoid the most dangerous areas, you need to determine **the number of points where at least two lines overlap**. In the above example, this is anywhere in the diagram with a `2` or larger \- a total of **`5`** points.
#
# Consider only horizontal and vertical lines. **At how many points do at least two lines overlap?**
# +
import unittest
from fractions import Fraction
from IPython.display import Markdown, display
from aoc_puzzle import AocPuzzle
class Puzzle(AocPuzzle):
def parse_line(self, raw_data):
coord_str_set = raw_data.split(' -> ')
coord_set = []
for coord_str in coord_str_set:
coord = tuple(map(int, coord_str.split(',')))
coord_set.append(coord)
return coord_set
def parse_data(self, raw_data):
self.data = list(map(self.parse_line, raw_data.split('\n')))
def get_slope(self, coord1, coord2):
x1, y1 = coord1
x2, y2 = coord2
return((x1 - x2, y1 - y2))
def incriment_coord(self, coord, step):
x,y = coord
xstep, ystep = step
return (int(x+xstep), int(y+ystep))
def decriment_coord(self, coord, step):
x,y = coord
xstep, ystep = step
return (x-xstep, y-ystep)
def get_step(self, coord):
x, y = coord
if x is 0:
xcd = 0
ysign = y / abs(y)
ycd = ysign
elif y is 0:
xsign = x / abs(x)
xcd = xsign
ycd = 0
else:
xsign = x / abs(x)
ysign = y / abs(y)
cf = Fraction(x, y)
xcd = abs(cf.numerator) * xsign
ycd = cf.denominator * ysign
return (xcd, ycd)
def get_step_to(self, coord1, coord2):
""" Returns a step for coordinate 1 that is in the direction of coordinate 2 """
if coord1 is coord2:
return (0,0)
x1, y1 = coord1
x2, y2 = coord2
dist = self.get_slope(coord2, coord1)
xstep, ystep = self.get_step(dist)
return (xstep, ystep)
def expand_coord_set(self, coord_set):
coord1, coord2 = coord_set
step = self.get_step_to(coord1, coord2)
coords_list = [coord1]
next_coord = self.incriment_coord(coord1, step)
while next_coord != coord2:
coords_list.append(next_coord)
next_coord = self.incriment_coord(next_coord, step)
coords_list.append(coord2)
return coords_list
def is_hort_vert_set(self, coord_set):
coord1, coord2 = coord_set
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2 or y1 == y2:
return True
else:
return False
def get_all_coords(self, horz_vert_only):
coords_list = []
for coord_set in self.data:
if horz_vert_only:
if self.is_hort_vert_set(coord_set):
set_coords = self.expand_coord_set(coord_set)
coords_list += set_coords
else:
set_coords = self.expand_coord_set(coord_set)
coords_list += set_coords
return coords_list
def count_dupplicates(self, coords_list):
coord_counts = {}
for coord in coords_list:
coord_counts.setdefault(coord, 0)
coord_counts[coord] += 1
total = 0
for coord, count in coord_counts.items():
if count > 1:
total += 1
return total
def run(self, horz_vert_only=True, output=False):
coords_list = self.get_all_coords(horz_vert_only)
result = self.count_dupplicates(coords_list)
if output:
display(Markdown(f'### Result is `{result}`'))
return result
class TestBasic(unittest.TestCase):
def test_parse_data(self):
in_data = '0,9 -> 5,9\n8,0 -> 0,8\n9,4 -> 3,4\n2,2 -> 2,1\n7,0 -> 7,4\n6,4 -> 2,0\n0,9 -> 2,9\n3,4 -> 1,4\n0,0 -> 8,8\n5,5 -> 8,2'
exp_out = [[(0,9),(5,9)],[(8,0),(0,8)],[(9,4),(3,4)],[(2,2),(2,1)],[(7,0),(7,4)],[(6,4),(2,0)],[(0,9),(2,9)],[(3,4),(1,4)],[(0,0),(8,8)],[(5,5),(8,2)]]
puzzle = Puzzle(in_data)
self.assertEqual(puzzle.data, exp_out)
def test_puzzle(self):
input_data = ['0,9 -> 5,9\n8,0 -> 0,8\n9,4 -> 3,4\n2,2 -> 2,1\n7,0 -> 7,4\n6,4 -> 2,0\n0,9 -> 2,9\n3,4 -> 1,4\n0,0 -> 8,8\n5,5 -> 8,2']
exp_output = [5]
for in_data, exp_out in tuple(zip(input_data, exp_output)):
puzzle = Puzzle(in_data)
self.assertEqual(puzzle.run(), exp_out)
unittest.main(argv=[""], exit=False)
# -
puzzle = Puzzle("input/d05.txt")
puzzle.run(output=True)
# ## \-\-\- Part Two \-\-\-
#
# Unfortunately, considering only horizontal and vertical lines doesn't give you the full picture; you need to also consider **diagonal lines**.
#
# Because of the limits of the hydrothermal vent mapping system, the lines in your list will only ever be horizontal, vertical, or a diagonal line at exactly 45 degrees. In other words:
#
# * An entry like `1,1 -> 3,3` covers points `1,1`, `2,2`, and `3,3`.
# * An entry like `9,7 -> 7,9` covers points `9,7`, `8,8`, and `7,9`.
#
# Considering all lines from the above example would now produce the following diagram:
#
# ```
# 1.1....11.
# .111...2..
# ..2.1.111.
# ...1.2.2..
# .112313211
# ...1.2....
# ..1...1...
# .1.....1..
# 1.......1.
# 222111....
#
# ```
#
# You still need to determine **the number of points where at least two lines overlap**. In the above example, this is still anywhere in the diagram with a `2` or larger \- now a total of **`12`** points.
#
# Consider all of the lines. **At how many points do at least two lines overlap?**
# +
class TestBasic(unittest.TestCase):
def test_puzzle(self):
input_data = ['0,9 -> 5,9\n8,0 -> 0,8\n9,4 -> 3,4\n2,2 -> 2,1\n7,0 -> 7,4\n6,4 -> 2,0\n0,9 -> 2,9\n3,4 -> 1,4\n0,0 -> 8,8\n5,5 -> 8,2']
exp_output = [12]
for in_data, exp_out in tuple(zip(input_data, exp_output)):
puzzle = Puzzle(in_data)
self.assertEqual(puzzle.run(horz_vert_only=False), exp_out)
unittest.main(argv=[""], exit=False)
# -
puzzle = Puzzle("input/d05.txt")
puzzle.run(output=True, horz_vert_only=False)
|
AoC 2021/AoC 2021 - Day 05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Class Diagrams
#
# This is a simple viewer for class diagrams. Customized towards the book.
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# **Prerequisites**
#
# * _Refer to earlier chapters as notebooks here, as here:_ [Earlier Chapter](Fuzzer.ipynb).
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import bookutils
# -
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.ClassDiagram import <identifier>
# ```
#
# and then make use of the following features.
#
#
# The `display_class_hierarchy()` function shows the class hierarchy for the given class. Methods with docstrings (intended to be used by the public) are shown in bold.
#
# ```python
# >>> display_class_hierarchy(GrammarFuzzer)
# ```
#
# 
#
#
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Getting a Class Hierarchy
# -
# Using `mro()`, we can access the class hierarchy. We make sure to avoid duplicates created by `class X(X)`.
def class_hierarchy(cls):
superclasses = cls.mro()
hierarchy = []
last_superclass_name = ""
for superclass in superclasses:
if superclass.__name__ != last_superclass_name:
hierarchy.append(superclass)
last_superclass_name = superclass.__name__
return hierarchy
# Here's an example:
class A_Class:
def foo(self):
pass
class B_Class(A_Class):
def foo(self):
pass
def bar(self):
pass
class C_Class:
def qux(self):
pass
class D_Class(B_Class, C_Class):
def foo(self):
B_Class.foo(self)
class_hierarchy(A_Class)
# ## Getting a Class Tree
# We can use `__bases__` to obtain the immediate base classes.
D_Class.__bases__
def class_tree(cls):
ret = []
for base in cls.__bases__:
if base.__name__ == cls.__name__:
ret += class_tree(base)
else:
ret.append((cls, class_tree(base)))
return ret
def class_tree(cls):
def base_tree(base):
while base.__name__ == cls.__name__:
base = base.__bases__[0]
return class_tree(base)
ret = []
for base in cls.__bases__:
ret.append((cls, base_tree(base)))
return ret
class_tree(D_Class)
# Not perfect yet (multiple instances of same class), but we're getting there.
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Getting methods
# -
import inspect
def class_methods(cls):
def _class_methods(cls):
all_methods = inspect.getmembers(cls, lambda m: inspect.isfunction(m))
for base in cls.__bases__:
all_methods += _class_methods(base)
return all_methods
unique_methods = []
methods_seen = set()
for (name, fun) in _class_methods(cls):
if name not in methods_seen:
unique_methods.append((name, fun))
methods_seen.add(name)
return unique_methods
class_methods(D_Class)
# We're only interested in
#
# * functions _defined_ in that class
# * functions that come with a docstring
def public_class_methods(cls):
return [(name, method) for (name, method) in class_methods(cls) if method.__qualname__.startswith(cls.__name__)]
def doc_class_methods(cls):
return [(name, method) for (name, method) in public_class_methods(cls) if method.__doc__ is not None]
public_class_methods(D_Class)
doc_class_methods(D_Class)
# ## Drawing Class Hierarchy with Method Names
def display_class_hierarchy(classes, include_methods=True,
project='fuzzingbook'):
from graphviz import Digraph
if project == 'debuggingbook':
CLASS_FONT = 'Raleway, Helvetica, Arial, sans-serif'
CLASS_COLOR = 'purple'
else:
CLASS_FONT = 'Patua One, Helvetica, sans-serif'
CLASS_COLOR = '#B03A2E'
METHOD_FONT = "'Fira Mono', 'Source Code Pro', monospace"
METHOD_COLOR = 'black'
if isinstance(classes, list):
starting_class = classes[0]
else:
starting_class = classes
classes = [starting_class]
dot = Digraph(comment=starting_class.__name__ + " hierarchy")
dot.attr('node', shape='record', fontname=CLASS_FONT)
dot.attr('graph', rankdir='BT')
dot.attr('edge', arrowhead='empty')
edges = set()
def method_string(method_name, f):
method_string = f'<font face="{METHOD_FONT}" point-size="10">'
if f.__doc__ is not None:
method_string += '<b>' + method_name + '()</b>'
else:
method_string += f'<font color="{METHOD_COLOR}">{method_name}()</font>'
method_string += '</font>'
return method_string
def class_methods_string(cls):
methods = public_class_methods(cls)
# return "<br/>".join([name + "()" for (name, f) in methods])
methods_string = '<table border="0" cellpadding="0" cellspacing="0" align="left">'
for doc in [True, False]:
for (name, f) in methods:
if (doc and f.__doc__ is not None) or (not doc and f.__doc__ is None):
methods_string += '<tr><td align="left" border="0">'
methods_string += method_string(name, f)
methods_string += '</td></tr>'
methods_string += '</table>'
return methods_string
def display_class_node(cls):
name = cls.__name__
url = cls.__module__ + '.ipynb'
if include_methods:
methods = class_methods_string(cls)
spec = '<{<b><font color="' + CLASS_COLOR + '">' + \
cls.__name__ + '</font></b>|' + methods + '}>'
else:
spec = '<' + cls.__name__ + '>'
dot.node(name, spec, href=url)
def display_class_tree(trees):
for tree in trees:
(cls, subtrees) = tree
display_class_node(cls)
for subtree in subtrees:
(subcls, _) = subtree
if (cls, subcls) not in edges:
dot.edge(cls.__name__, subcls.__name__)
edges.add((cls, subcls))
display_class_tree(subtrees)
for cls in classes:
tree = class_tree(cls)
display_class_tree(tree)
return dot
display_class_hierarchy([D_Class, A_Class], project='debuggingbook')
display_class_hierarchy([D_Class, A_Class], project='fuzzingbook')
# ## Synopsis
# The `display_class_hierarchy()` function shows the class hierarchy for the given class (or list of classes). Methods with docstrings (intended to be used by the public) are shown in bold.
display_class_hierarchy(D_Class)
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Lessons Learned
#
# * _Lesson one_
# * _Lesson two_
# * _Lesson three_
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Next Steps
#
# _Link to subsequent chapters (notebooks) here, as in:_
#
# * [use _mutations_ on existing inputs to get more valid inputs](MutationFuzzer.ipynb)
# * [use _grammars_ (i.e., a specification of the input format) to get even more valid inputs](Grammars.ipynb)
# * [reduce _failing inputs_ for efficient debugging](Reducer.ipynb)
#
# -
# ## Background
#
# _Cite relevant works in the literature and put them into context, as in:_
#
# The idea of ensuring that each expansion in the grammar is used at least once goes back to Burkhardt \cite{Burkhardt1967}, to be later rediscovered by Paul Purdom \cite{Purdom1972}.
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Exercises
#
# _Close the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with_
#
# ```markdown
# **Solution.**
# ```
#
# _Your solution can then extend up to the next title (i.e., any markdown cell starting with `#`)._
#
# _Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution._
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Exercise 1: _Title_
#
# _Text of the exercise_
# + cell_style="center"
# Some code that is part of the exercise
pass
# + [markdown] solution2="hidden" solution2_first=true
# _Some more text for the exercise_
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# **Solution.** _Some text for the solution_
# + cell_style="split" slideshow={"slide_type": "skip"} solution2="hidden"
# Some code for the solution
2 + 2
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# _Some more text for the solution_
# + [markdown] button=false new_sheet=false run_control={"read_only": false} solution="hidden" solution2="hidden" solution2_first=true solution_first=true
# ### Exercise 2: _Title_
#
# _Text of the exercise_
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden"
# **Solution.** _Solution for the exercise_
|
notebooks/ClassDiagram.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # PyTorch Computer Vision Samples
#
# In this tutorial, we will learn how to prerform different computer vision tasks using PyTorch. In computer vision, we normally solve one of the following problems:
#
# * **Image Classification** is the simplest task, when we need to classify an image into one of a few pre-defined categories, for example, distinguish a cat from a dog on a photograph, or recognize a handwritten digit. In this task, we assume that only one object is present in the image, and we do not care about it's position.
# * **Object Detection** is more difficult task, in which we need to find known objects on the picture and localize them, i.e. return the **bounding box** for each of recognized objects.
# * **Segmentation** is similar to object detection, but instead of giving bounding box we need to return an exact pixel map outlining each of the recognized objects.
#
# 
#
# Image taken from [CS224d Stanford Course](http://cs224d.stanford.edu/index.html)
#
# ## Part 0: Loading prerequisites
#
# Throughout this tutorial, we will be using [PyTorch](http://pytorch.org), one of the most popular library for dealing with neural networks. We will start with importing required libraries. If you do not have those libraries instelled, refer to the [PyTorch web site](http://pytorch.org) for installation instructions.
import torch
import torchvision
from torch import nn
import matplotlib.pyplot as plt
import numpy as np
import PIL
import os
# ## Part 1: Images as Tensors
#
# Computer Vision works with Images. As you probably know, images consist of pixels, so they can be thought of as a rectangular collection (array) of pixels.
#
# In the first part of this tutorial, we will deal with handwritten digit recognition. We will use MNIST dataset for it, which consists of grayscale images of handwritten digits, 28x28 pixels.
#
# So, each image can be represented as 28x28 array, and elements of this array would denote intensity of corresponding pixel - either in the scale of 0..1 (in which case floating point numbers are used), or 0..255 (integers). `numpy` Python library is often used with computer vision tasks, because it allows to operate with array effectively.
#
# To deal with colour images, we need some way to represent colors. In most cases, we represent each pixel by 3 intensity values, corresponding to Red (R), Green (G) and Blue (B) components. This colour encoding is called RGB, and thus colour image of size $W\times H$ will be represented as an array of size $3\times H\times W$ (sometimes the order of components might be different, but the idea is the same).
#
# Multi-dimentional arrays are also called **tensors**. Using tensors to represent images also has an advantage, because we can use an extra dimension to store a sequence of images. For example, our MNIST dataset, which consists of 50000 28x28 B&W images, may be represented as one tensor of size 50000x3x28x28.
#
# Let's start by loading this dataset:
# + tags=[]
from torchvision.transforms import ToTensor
mnist_train = torchvision.datasets.MNIST('./data',download=True,train=True,transform=ToTensor())
mnist_test = torchvision.datasets.MNIST('./data',download=True,train=False,transform=ToTensor())
# -
x = mnist_train[0][0].squeeze(0).numpy()
plt.imshow(x)
x.min(),x.max()
# ## Part 2: Training DNN
#
# Coming back to the task of handwritten digit recognition, we can see that it is a classification problem. We will start with the simples possible approach for image classification - a fully-connected neural network (which is also called a *perceptron*).
#
# A simplest **neural network** in PyTorch consists of a number of **layers**. In our case we will have one main fully-connected layer, which is called **Linear** layer, with 784 inputs (one input for each pixel of the input image) and 10 outputs (one output for each class).
#
# As we discussed above, the dimension of our digit images is $1\times28\times28$. Because the input dimension of fully-connected layer is 784, we need to insert another layer into the network, called **Flatten**, to change tensor shape from $1\times28\times28$ to $784$.
#
# We want $n$-th output of the network to return the probability of the input digit being equal to $n$. Because output of fully-connected layer is not normalized in any way, it cannot be regarded as probablility. To turn it into a probability, we apply another layer, called **LogSoftmax**.
#
# Thus, the architecture of our network can be represented by the following sequence of layers:
#
net = nn.Sequential(nn.Flatten(), nn.Linear(784,10), nn.LogSoftmax())
# Now, let's train the neural network. To do so, we will need to chunk original dataset into **batches** of a certain size, let's say 64. An object called **DataLoader** can do it for us automatically:
train_loader = torch.utils.data.DataLoader(mnist_train,batch_size=64)
test_loader = torch.utils.data.DataLoader(mnist_test,batch_size=64) # we can use larger batch size for testing
# Now we will define main training function that we will use later in all our examples. It can take the following parameters:
# * **Neural network** itself
# * **DataLoader**, which defines the data to train on
# * **Loss Function**, which is a function that measures the difference between expected result and the one produced by the network. In most of the classification tasks `NLLLoss` is used, so we will make it a default.
# * **Optimizer**, which defined an *optimization algorithm*. The most traditional algorithm is *stochastic gradient descent*, but we will use more advanced version called **Adam** by default.
# * **Learning rate** defined the speed at which network learns. During learning, we show the same data multiple times, and each time weights are adjusted. If learning rate is too high, new values will overwrite the knowledge from the old ones, and the network would behave badly. Too small learning rate results in very slow learning.
#
# Here is what we do when training:
# * Switch the network to training mode (`net.train()`)
# * Go over all batches in the dataset, and for each batch do the following:
# - compute predictions made by the network on this batch (`out`)
# - compute `loss`, which is the discrepancy between predicted and expected values
# - try to minimize the loss by adjusting weights of the network (`optimizer.step()`)
#
# The `train_epoch` function does one pass through the data, which is called **epoch**. This function also calculates and returns the average loss. By observing this loss during training we can see whether network actually learns something.
# + tags=[]
def train_epoch(net,dataloader,lr=0.01,optimizer=None,loss_fn = nn.NLLLoss()):
optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr)
net.train()
l,i = 0,0
for features,labels in dataloader:
optimizer.zero_grad()
out = net(features)
loss = loss_fn(out,labels) #cross_entropy(out,labels)
loss.backward()
optimizer.step()
l+=loss
i+=1
return l.item()/i
train_epoch(net,train_loader)
# +
def validate(net, dataloader,loss_fn=nn.NLLLoss()):
net.eval()
loss = 0
i,c = 0,0
with torch.no_grad():
for features,label in dataloader:
out = net(features)
loss += torch.nn.functional.nll_loss(out,label)
pred = out.data.max(1, keepdim=True)[1]
c += pred.eq(label.data.view_as(pred)).sum()
i += 1
return loss.item()/i, c.item()/len(dataloader.dataset)
validate(net,test_loader)
# + tags=[]
def train(net,train_loader,test_loader,optimizer=None,lr=0.01,epochs=10,loss_fn=nn.NLLLoss()):
optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr)
for ep in range(epochs):
lt = train_epoch(net,train_loader,optimizer=optimizer,lr=lr,loss_fn=loss_fn)
lv,acc = validate(net,test_loader,loss_fn=loss_fn)
print(f"Epoch {ep}, Accuracy={acc}, Train loss={lt}, Val loss={lv}")
train(net,train_loader,test_loader,epochs=3)
# -
# ## Part 3: CNN
# + tags=[]
net = nn.Sequential(
nn.Conv2d(1,10,kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(10,20,kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(320,10),
nn.LogSoftmax()
)
print(net)
# + tags=[]
from torchsummary import summary
summary(net,input_size=(1,28,28))
# + tags=[]
train(net,train_loader,test_loader,epochs=3)
# -
# ## Part 4: Our own NN class
# + tags=[]
from torch.nn.functional import relu,log_softmax
class MyNet(nn.Module):
def __init__(self):
super(MyNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(10, 20, 5)
self.fc = nn.Linear(320,10)
def forward(self, x):
x = self.pool(relu(self.conv1(x)))
x = self.pool(relu(self.conv2(x)))
x = x.view(-1, 320)
x = log_softmax(self.fc(x))
return x
net = MyNet()
train(net,train_loader,test_loader,epochs=3)
# +
initial_size = (1,28,28)
def conv_size(in_size,filters,kernel_size):
ch,x,y = in_size
return (filters,x-kernel_size+1,y-kernel_size+1)
def pool_size(in_size,stride=2):
ch,x,y = in_size
return (ch,x//stride,y//stride)
pool_size(conv_size(pool_size(conv_size(initial_size,10,5)),20,5))
# -
# ## Part 5: Using pre-trained network
#
# We will use [Cats and Dogs](https://www.kaggle.com/c/dogs-vs-cats/data) dataset from Kaggle
#
data_path = "d:/data/catsdogs/petimages"
a_cat = PIL.Image.open(os.path.join(data_path,"cat","1.jpg"))
a_cat
# + tags=[]
from glob import glob
def check_dataset():
for cls in 'cat','dog':
for c in glob(os.path.join(data_path,cls,'*.jpg')):
try:
im = PIL.Image.open(c)
except:
os.remove(c)
print(f"File {c} is corrupt")
# check_dataset()
os.remove(os.path.join(data_path,"Cat","666.jpg"))
os.remove(os.path.join(data_path,"Dog","11702.jpg"))
# + tags=[]
net = torchvision.models.resnet18(pretrained=True)
# + tags=[]
summary(net,input_size=(3,224,224))
# +
from torchvision.transforms import Resize, ToTensor, Normalize, Compose
std_normalize = Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = Compose([Resize((224,224)),ToTensor(),std_normalize])
# -
net.eval()
res = net(transform(a_cat).unsqueeze(0)).squeeze()
# +
import json, requests
class_map = json.loads(requests.get("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json").text)
class_map = { int(k) : v for k,v in class_map.items() }
class_map[res.argmax().item()]
# -
# ## Part 6: Transfer learning for Cats and Dogs
#
dataset = torchvision.datasets.ImageFolder(data_path,transform=transform)
dataset
train_ds, test_ds = torch.utils.data.random_split(dataset,[20000,4998])
train_loader = torch.utils.data.DataLoader(train_ds,batch_size=64)
test_loader = torch.utils.data.DataLoader(test_ds,batch_size=64)
# +
for p in net.parameters():
p.requires_grad = False
net.fc = nn.Linear(net.fc.in_features,2)
# + tags=[]
train(net,train_loader,test_loader,loss_fn=nn.CrossEntropyLoss(),epochs=3)
# -
|
notebooks/TorchVisionSamples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
import matplotlib.pyplot as plt
import contextily as ctx
plt.style.use('seaborn')
# %matplotlib inline
# -
# # Cleaning for Bike Accidents
#
# We can only use points with an MO and only those involving bikes
accidents = pd.read_csv('./Data/Traffic_Collision_Data_from_2010_to_Present.csv', parse_dates=['Date Reported', 'Date Occurred'])
accidents.head()
accidents['MO Codes'].notnull().sum()
mo_codes = {}
with open('./Data/MO Codes Cleanup.csv') as f:
f.readline()#throwaway headers
for row in f.readlines():
try:
code, desc = row.replace('\n', '').split(',', 1)
code_full = code.zfill(4)
mo_codes[code_full] = desc
except:
print(row)
test_codes = accidents.loc[0, 'MO Codes'] #split on space
test_codes
def transformBikeAccidents(mo_string):
descriptions = [mo_codes.get(code) for code in mo_string.split(' ') if mo_codes.get(code)]
return descriptions
transformBikeAccidents(test_codes)
accidents['Bike Accidents'] = accidents['MO Codes'].map(lambda codes: transformBikeAccidents(str(codes)) if codes else [])
has_bike = accidents['Bike Accidents'].map(lambda descriptions: any([desc for desc in descriptions if 'bike' in desc.lower()]))
bike_accidents = accidents[has_bike].copy()
bike_accidents.assign(year = bike_accidents['Date Occurred'].map(lambda date: date.year))\
.year\
.value_counts()\
.sort_index()
# So the potentially good news is that bike accidents have been going down in Los Angeles. This is still wide open since we don't know if we just have less bikers (which may be the case for 2020), reporting has changed, etc.
# # Bike Lane Safety
# ## Are Bike Lanes Roads Safer Than Others?
# In counting the number of accidents, one naive idea is to normalize the number of accidents by length of road. When that road became a bike lane should also be taken into consideration relative to the accident occurence. There is also the possibility that bike lanes may have been installed in response to accidents or danger, so it's unfair to compare accident counts of one road to another. Subjects such as possibly comparing a road to itself after it became a bike lane as well as normalizing according to historic volume as opposed to street length will be visited later.
# ### Preparing GeoDataFrames
#
# Bike routes were in latitude/longitude, just match these:
def plotCrimePoint(coords):
lat, lon = [float(coord) for coord in coords[1:-1].split(',')]
return Point(lon, lat)
bike_accidents['geometry'] = bike_accidents.Location.map(plotCrimePoint)
bike_accidents_geo = gpd.GeoDataFrame(bike_accidents, crs='EPSG:4326')
# Some don't have exact locations, let's just lose these since they aren't too many.
bike_accidents_geo[bike_accidents_geo.geometry == Point(0,0)].shape
bike_accidents_geo = bike_accidents_geo[bike_accidents_geo.geometry != Point(0,0)].copy()
bike_lanes = gpd.read_file('./Data/City_of_Los_Angeles_Bikeways-shp/28de79f7-8e83-4ea9-923b-45b6accc9fad2020329-1-1hi0fjx.3l8l.shp')
bike_lanes.crs
# +
ax = bike_accidents_geo.to_crs('epsg:3857')\
.plot(color='red', alpha=.1, figsize=(20,20))
bike_lanes.to_crs('epsg:3857')\
.plot(ax=ax)
#fix basemap
#ctx.add_basemap(ax, zoom=17, source=ctx.providers.CartoDB.Voyager)
# -
# We will classify each accident according to whether they are near a bike lane, and those that are will be classified for whether they occured before or after
|
LA Bikes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import os
import glob
os.getcwd()
assert os.getcwd().split("/")[-1] == 'notebooks'
from skimage.io import imread, imsave
from defects_dlmbl import segment_affinities as seg
import numpy as np
import matplotlib.pyplot as plt
image_folder = '../experiments/images/'
affinity_list = sorted(glob.glob(image_folder+"*_affinity.tif"), key=lambda fn: int(fn.split("/")[-1].split('_')[0]))
img_list = sorted(glob.glob(image_folder+"*_image.tif"), key=lambda fn: int(fn.split("/")[-1].split('_')[0]))
print(affinity_list[0].split('_')[0])
# +
segmentations = []
scores = []
for item in affinity_list:
affinity = imread(item)
assert len(affinity.shape) == 3
assert affinity.shape[0] == 2 # only works for this
affs = np.stack([np.zeros_like(affinity[0]),
affinity[0],
affinity[1]])
affs = np.expand_dims(affs, axis=1)
segmentation = segmentation = seg.watershed_from_affinities(affs, threshold=0.95)
segmentation = segmentation.astype('int16')
imsave(item.split('_')[0]+"_segmentation.tif", segmentation)
segmentations.append(segmentation)
# -
imgs = [imread(img) for img in img_list]
plt.imshow(imgs[0].squeeze())
# +
n_col = 5
n_row = 1
fig, axs = plt.subplots(n_row, n_col, figsize=(12, 5))
sample_data = [segmentations[3+18*i] for i in range(5)]
axs = axs.flatten()
i = 3
for img, ax in zip(sample_data, axs):
ax.imshow(img[0,...])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(i)
i += 18
plt.tight_layout()
plt.show()
fig.savefig('seg_grid.png')
# +
n_col = 5
n_row = 1
fig, axs = plt.subplots(n_row, n_col, figsize=(12, 5))
sample_data = [imgs[3+18*i] for i in range(5)]
axs = axs.flatten()
i = 3
for img, ax in zip(sample_data, axs):
ax.imshow(img[0].squeeze(), cmap='gray')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(i)
i += 18
plt.tight_layout()
plt.show()
fig.savefig('img_grid.png')
# -
|
notebooks/Affinity_Segmenter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Scroll Down Below to start from Activity 15
# Removes Warnings
import warnings
warnings.filterwarnings('ignore')
#import the necessary packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data= pd.read_csv(r'Telco_Churn_Data.csv')
data.head(5)
len(data)
data.shape
data.isnull().values.any()
data.info()
## Bonus method for renaming the columns
data.columns=data.columns.str.replace(' ','_')
data.info()
data.describe()
data.describe(include='object')
# +
### Change some of the columns to categorical
### Columns to be change to categorical objects are
### Target Code
### Condition of Current Handset
### Current TechSupComplaints
data['Target_Code']=data.Target_Code.astype('object')
data['Condition_of_Current_Handset']=data.Condition_of_Current_Handset.astype('object')
data['Current_TechSupComplaints']=data.Current_TechSupComplaints.astype('object')
data['Target_Code']=data.Target_Code.astype('int64')
# -
data.describe(include='object')
# +
## Percentage of missing Values present
round(data.isnull().sum()/len(data)*100,2)
# -
data.Complaint_Code.value_counts()
data.Condition_of_Current_Handset.value_counts()
# +
### we will impute the values of both of complaint code and condition_of_current_handset with
### the most occuring values
data['Complaint_Code']=data['Complaint_Code'].fillna(value='Billing Problem')
data['Condition_of_Current_Handset']=data['Condition_of_Current_Handset'].fillna(value=1)
data['Condition_of_Current_Handset']=data.Condition_of_Current_Handset.astype('object')
# -
data['Target_Churn'].value_counts(0)
data['Target_Churn'].value_counts(1)*100
summary_churn = data.groupby('Target_Churn')
summary_churn.mean()
corr = data.corr()
plt.figure(figsize=(15,8))
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,annot=True)
corr
# ### Univariate Analysis
# +
f, axes = plt.subplots(ncols=3, figsize=(15, 6))
sns.distplot(data.Avg_Calls_Weekdays, kde=True, color="darkgreen", ax=axes[0]).set_title('Avg_Calls_Weekdays')
axes[0].set_ylabel('No of Customers')
sns.distplot(data.Avg_Calls, kde=True,color="darkblue", ax=axes[1]).set_title('Avg_Calls')
axes[1].set_ylabel('No of Customers')
sns.distplot(data.Current_Bill_Amt, kde=True, color="maroon", ax=axes[2]).set_title('Current_Bill_Amt')
axes[2].set_ylabel('No of Customers')
# -
# ### Bivariate Analysis
plt.figure(figsize=(17,10))
p=sns.countplot(y="Complaint_Code", hue='Target_Churn', data=data,palette="Set2")
legend = p.get_legend()
legend_txt = legend.texts
legend_txt[0].set_text("No Churn")
legend_txt[1].set_text("Churn")
p.set_title('Customer Complaint Code Distribution')
plt.figure(figsize=(15,4))
p=sns.countplot(y="Acct_Plan_Subtype", hue='Target_Churn', data=data,palette="Set2")
legend = p.get_legend()
legend_txt = legend.texts
legend_txt[0].set_text("No Churn")
legend_txt[1].set_text("Churn")
p.set_title('Customer Acct_Plan_Subtype Distribution')
# +
plt.figure(figsize=(15,4))
p=sns.countplot(y="Current_TechSupComplaints", hue='Target_Churn', data=data,palette="Set2")
legend = p.get_legend()
legend_txt = legend.texts
legend_txt[0].set_text("No Churn")
legend_txt[1].set_text("Churn")
p.set_title('Customer Current_TechSupComplaints Distribution')
# -
plt.figure(figsize=(15,4))
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 0),'Avg_Days_Delinquent'] , color=sns.color_palette("Set2")[0],shade=True,label='no churn')
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 1),'Avg_Days_Delinquent'] , color=sns.color_palette("Set2")[1],shade=True, label='churn')
ax.set(xlabel='Average No of Days Deliquent/Defaluted from paying', ylabel='Frequency')
plt.title('Average No of Days Deliquent/Defaluted from paying - churn vs no churn')
plt.figure(figsize=(15,4))
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 0),'Account_Age'] , color=sns.color_palette("Set2")[0],shade=True,label='no churn')
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 1),'Account_Age'] , color=sns.color_palette("Set2")[1],shade=True, label='churn')
ax.set(xlabel='Account_Age', ylabel='Frequency')
plt.title('Account_Age - churn vs no churn')
plt.figure(figsize=(15,4))
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 0),'Percent_Increase_MOM'] , color=sns.color_palette("Set2")[0],shade=True,label='no churn')
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 1),'Percent_Increase_MOM'] , color=sns.color_palette("Set2")[1],shade=True, label='churn')
ax.set(xlabel='Percent_Increase_MOM', ylabel='Frequency')
plt.title('Percent_Increase_MOM- churn vs no churn')
plt.figure(figsize=(15,4))
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 1),'Percent_Increase_MOM'] ,color=sns.color_palette("Set2")[1],shade=True, label='churn')
ax.set(xlabel='Percent_Increase_MOM', ylabel='Frequency')
plt.title('Percent_Increase_MOM- churn')
plt.figure(figsize=(15,4))
ax=sns.kdeplot(data.loc[(data['Target_Code'] == 0),'Percent_Increase_MOM'] ,color=sns.color_palette("Set2")[0],shade=True, label='no churn')
ax.set(xlabel='Percent_Increase_MOM', ylabel='Frequency')
plt.title('Percent_Increase_MOM- no churn')
# ### Feature Selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
### Encoding the categorical variables
data["Acct_Plan_Subtype"] = data["Acct_Plan_Subtype"].astype('category').cat.codes
data["Complaint_Code"] = data["Complaint_Code"].astype('category').cat.codes
data[["Acct_Plan_Subtype","Complaint_Code"]].head()
# +
##Target_Churn
##Target_Code
target = 'Target_Code'
X = data.drop(['Target_Code','Target_Churn'], axis=1)
y=data[target]
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.15, random_state=123, stratify=y)
# +
forest=RandomForestClassifier(n_estimators=500,random_state=1)
forest.fit(X_train,y_train)
importances=forest.feature_importances_
features = data.drop(['Target_Code','Target_Churn'],axis=1).columns
indices = np.argsort(importances)[::-1]
plt.figure(figsize=(15,4))
plt.title("Feature importances using Random Forest")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", align="center")
plt.xticks(range(X_train.shape[1]), features[indices], rotation='vertical',fontsize=15)
plt.xlim([-1, X_train.shape[1]])
plt.show()
# -
# ### Logistic Regression
# +
### From the feature selection let us take only the top 6 features
import statsmodels.api as sm
top7_features = ['Avg_Days_Delinquent','Percent_Increase_MOM','Avg_Calls_Weekdays','Current_Bill_Amt','Avg_Calls','Complaint_Code','Account_Age']
logReg = sm.Logit(y_train, X_train[top7_features])
logistic_regression = logReg.fit()
# -
logistic_regression.summary
logistic_regression.params
# Create function to compute coefficients
coef = logistic_regression.params
def y (coef, Avg_Days_Delinquent,Percent_Increase_MOM,Avg_Calls_Weekdays,Current_Bill_Amt,Avg_Calls,Complaint_Code,Account_Age) :
final_coef=coef[0]*Avg_Days_Delinquent+ coef[1]*Percent_Increase_MOM+coef[2]*Avg_Calls_Weekdays+coef[3]*Current_Bill_Amt+ coef[4]*Avg_Calls+coef[5]*Complaint_Code+coef[6]*Account_Age
return final_coef
# +
import numpy as np
# An Employee with
# Avg_Days_Delinquent 40
# Percent_Increase_MOM 5
# Avg_Calls_Weekdays 39000
# Current_Bill_Amt 12000
# Avg_Calls 9000
# Complaint_Code 0
# Account_Age 17
# has 81 % chance of churn
y1 = y(coef, 40, 5, 39000,12000,9000,0,17)
p = np.exp(y1) / (1+np.exp(y1))
p
# -
# # Activity 15-Lesson 8
# ### Using Scikit Learn
# ## Logistic Regression
from sklearn.linear_model import LogisticRegression
clf_logistic = LogisticRegression(random_state=0, solver='lbfgs').fit(X_train[top7_features], y_train)
clf_logistic
clf_logistic.score(X_test[top7_features], y_test)
# ## SVM
from sklearn import svm
clf_svm=svm.SVC(kernel='linear', C=1)
clf_svm
clf_svm.fit(X_train[top7_features],y_train)
clf_svm.score(X_test[top7_features], y_test)
# ## Decision Tree
from sklearn import tree
clf_decision = tree.DecisionTreeClassifier()
clf_decision
clf_decision.fit(X_train[top7_features],y_train)
clf_decision.score(X_test[top7_features], y_test)
# ## Random Forest
from sklearn.ensemble import RandomForestClassifier
# +
clf_random = RandomForestClassifier(n_estimators=20, max_depth=None,
min_samples_split=7, random_state=0)
# -
clf_random.fit(X_train[top7_features], y_train)
clf_random.score(X_test[top7_features], y_test)
# # Activity 16-Lesson 8
# ## Standardization
# +
# Avg_Days_Delinquent
# Percent_Increase_MOM
# Avg_Calls_Weekdays
# Current_Bill_Amt
# Avg_Calls
# Complaint_Code
# Account_Age
# -
from sklearn import preprocessing
## Features to transform
top5_features=['Avg_Calls_Weekdays', 'Current_Bill_Amt', 'Avg_Calls', 'Account_Age','Avg_Days_Delinquent']
## Features Left
top2_features=['Percent_Increase_MOM','Complaint_Code']
scaler = preprocessing.StandardScaler().fit(X_train[top5_features])
X_train_scalar=pd.DataFrame(scaler.transform(X_train[top5_features]),columns = X_train[top5_features].columns)
X_train_scalar_combined=pd.concat([X_train_scalar, X_train[top2_features].reset_index(drop=True)], axis=1, sort=False)
X_test_scalar=pd.DataFrame(scaler.transform(X_test[top5_features]),columns = X_test[top5_features].columns)
X_test_scalar_combined=pd.concat([X_test_scalar, X_test[top2_features].reset_index(drop=True)], axis=1, sort=False)
clf_random.fit(X_train_scalar_combined, y_train)
clf_random.score(X_test_scalar_combined, y_test)
# ## Grid Search
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
parameters = [ {'min_samples_split': [4,5,7,9,10], 'n_estimators':[10,20,30,40,50,100,150,160,200,250,300],'max_depth': [2,5,7,10]}]
clf_random_grid = GridSearchCV(RandomForestClassifier(), parameters, cv = StratifiedKFold(n_splits = 10))
clf_random_grid.fit(X_train_scalar_combined, y_train)
print('best score train:', clf_random_grid.best_score_)
print('best parameters train: ', clf_random_grid.best_params_)
clf_random_grid.score(X_test_scalar_combined, y_test)
# # Activity 17-Lesson 8
# ## Performance Metrics
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn import metrics
clf_random_grid = RandomForestClassifier(n_estimators=100, max_depth=7,
min_samples_split=10, random_state=0)
clf_random_grid.fit(X_train_scalar_combined, y_train)
y_pred=clf_random_grid.predict(X_test_scalar_combined)
target_names = ['No Churn', 'Churn']
print(classification_report(y_test, y_pred, target_names=target_names))
# +
cm = confusion_matrix(y_test, y_pred)
cm_df = pd.DataFrame(cm,
index = ['No Churn','Churn'],
columns = ['No Churn','Churn'])
# -
plt.figure(figsize=(8,6))
sns.heatmap(cm_df, annot=True,fmt='g',cmap='Blues')
plt.title('Random Forest \nAccuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.ylabel('True Values')
plt.xlabel('Predicted Values')
plt.show()
from sklearn.metrics import roc_curve,auc
# +
models = [
{
'label': 'Logistic Regression',
'model': clf_logistic,
},
{
'label': 'SVM',
'model': clf_svm,
},
{
'label': 'Decision Tree',
'model': clf_decision,
},
{
'label': 'Random Forest Grid Search',
'model': clf_random_grid,
}
]
for m in models:
model = m['model']
model.fit(X_train_scalar_combined, y_train)
y_pred=model.predict(X_test_scalar_combined)
fpr, tpr, thresholds = roc_curve(y_test, y_pred, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label='%s AUC = %0.2f' % (m['label'], roc_auc))
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.ylabel('Sensitivity(True Positive Rate)')
plt.xlabel('1-Specificity(False Positive Rate)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
|
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MARKETING_DATA_SCIENCE/01_Data_Science_for_Marketing_Analytics/Part_08/01_Activity_15_17.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.6 64-bit ('3.9.6')
# name: python3
# ---
# + [markdown] id="6PCaAU1y66FE"
# # Benchmark `nlpo3.segment`
# -
# Benchmarks nlpO3 (Rust) `nlpo3.segment` (unsafe) against PyThaiNLP (Python) `newmm.segment` (unsafe) using the same dictionary.
#
# https://github.com/PyThaiNLP/nlpo3/
# + id="iAlScT9d66FX" outputId="fe7a569c-f384-4e95-cbc3-3d412a99fc74"
import time
import matplotlib.pyplot as plt
import numpy as np
from tqdm.auto import tqdm
def time_func(func, arg):
start_time = time.perf_counter_ns()
func(arg)
return time.perf_counter_ns() - start_time
# + [markdown] id="Ssp84MKA66Fb"
# ## Load Custom Dictionary to the Tokenizers
#
# Both `o3_newmm()` and `py_newmm()` will use the same word list (`words_th.txt`)
# + id="XHh2LIdG66Fd" outputId="f1f8c12d-fd61-40f2-d31f-98c99cd120ae"
DICT_FILE = "../../words_th.txt"
# +
from pythainlp.tokenize.newmm import segment as py_segment
from pythainlp.util import dict_trie
trie = dict_trie(dict_source=DICT_FILE)
def py_newmm(txt, safe_mode=False):
return py_segment(txt, safe_mode=safe_mode, custom_dict=trie)
# +
from nlpo3 import load_dict
from nlpo3 import segment as o3_segment
load_dict(DICT_FILE, "test_dict") # create "test_dict" dictionary
def o3_newmm(txt, safe=False, parallel=False):
return o3_segment(txt, dict_name="test_dict", safe=safe, parallel=parallel)
# -
# ## Load Test data
# +
from datasets import load_dataset
datasets = load_dataset('wisesight_sentiment')
datasets
# + id="GCtUUACp66Fc" outputId="fe1c7236-28b3-4ee4-a0e4-81312e7762fb"
txt = datasets['train']['texts'][0]
txt
# -
py_newmm(txt)[:10]
o3_newmm(txt)[:10]
# + [markdown] id="Qyrh-uny66Fh"
# ## One Example
# + [markdown] id="9YqrA2Pb66Fj"
# ### Average Run Time for One Example
# + id="Iz58c6Ff66Fj" outputId="0d45dec2-bece-494e-8725-31ffdc97e1de"
# %timeit py_newmm(txt)
# + id="8jvijfPJ66Fl" outputId="64a89f5b-468e-4af5-da35-6c938019e021"
# %timeit o3_newmm(txt)
# + id="lAdLgqdu66Fm" outputId="42e39b71-9331-4311-a401-61fa68c21fde"
# %timeit o3_newmm(txt, parallel=True)
# -
# %timeit py_newmm(txt, safe_mode=True)
# %timeit o3_newmm(txt, safe=True)
# + [markdown] id="L0lWERZk66Fm"
# ## All Examples
# + [markdown] id="GXmhzISZ66Fn"
# ### Check If Results Match for All Examples
# + id="ZY9Mosag66Fn" outputId="46a56ce0-fd20-430a-d9b6-9564f4c25141"
corrects = [o3_newmm(txt) == py_newmm(txt) for txt in datasets['train']['texts']]
np.mean(corrects), len(corrects)
# + [markdown] id="Tyc_cHaf66Fo"
# ### Average Run Time Across All Examples
# + id="uYeUydsQ66Fo" outputId="b20ed761-fa2b-42b8-8a6c-ff3a1d2dc6b3"
py_newmms = [time_func(py_newmm, txt) for txt in datasets['train']['texts']]
o3_newmms = [time_func(o3_newmm, txt) for txt in datasets['train']['texts']]
# o3 newmm is over 2x faster than python newmm, on average
np.mean(py_newmms), np.mean(o3_newmms), np.mean(py_newmms) / np.mean(o3_newmms)
# + id="8hRoDxm966Fp" outputId="c4e8c0fd-97ca-4e3a-ee63-1281f84bb1d9"
# look at distribution; o3 newmm also consistently performs better
plt.hist(py_newmms, bins=30, alpha=0.5)
plt.hist(o3_newmms, bins=30, alpha=0.5)
# + [markdown] id="EMZZ8SgY66Fp"
# ## Run Time as Sequence Length Grows
# + id="3P_z59rS66Fp" outputId="5951dd75-388c-4f9e-a1df-f0e98f663ec3"
txt = datasets['train']['texts'][1]
txt2 = ''.join(o3_newmm(txt)[:10])
txt2, len(o3_newmm(txt2))
# + colab={"referenced_widgets": ["0689a5cf946049a0ac98bdf9e1353810", "2b481450056f4c1883c163bf066110a3"]} id="FEmDkPHL66Fq" outputId="2b2c96ab-7044-423b-9b40-e06ca186a213"
py_newmms = [time_func(py_newmm, txt2*i) for i in tqdm([10**j for j in range(5)])]
o3_newmms = [time_func(o3_newmm, txt2*i) for i in tqdm([10**j for j in range(5)])]
# -
# Performance starts really deviate when sequence length > 10^3 tokens and above.
#
# python newmm is dashed line.
# + id="sT8GL0oX66Fr" outputId="6bb4acfd-4721-47cb-d8ff-943c67a4cedf"
positions = [i for i in range(5)]
labels = [f'10^{i+1}' for i in range(5)]
plt.xticks(positions, labels)
plt.plot(py_newmms, linestyle='dashed')
plt.plot(o3_newmms)
# + id="5YU5aiNs66Fs"
# zooming in on inflexion point
positions = [i for i in range(4)]
labels = [f'10^{i+1}' for i in range(4)]
plt.xticks(positions, labels)
plt.plot(py_newmms[:-1], linestyle='dashed')
plt.plot(o3_newmms[:-1])
# -
|
nlpo3-python/notebooks/nlpo3_segment_benchmarks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
# Built-in modules
import os
import gzip
import shutil
from pathlib import Path
from datetime import datetime, timedelta
# Basics of Python data handling and visualization
import tqdm
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely import wkt
# Imports from eo-learn and sentinelhub-py
from eolearn.core import EOPatch, EOTask, LinearWorkflow, FeatureType
# Visualisation utils
from air_quality_and_health_challenge.utils import (get_extent,
draw_outline,
draw_bbox,
draw_feature,
draw_true_color,
unzip_file,
load_tiffs,
days_to_datetimes,
datetimes_to_days,
reproject_tiff,
upscale_tiff,
mask_tiff)
from lib.utils import _get_point
from configs.utils import load_config
from lib.train_utils import get_data, load_pickle, save_pickle
from algorithms.ensemble import Ensemble
# +
TRAIN_DIR = Path("./data/train")
aoi = AOI = "Italy"
LABEL = "NO2"
WDIR = Path("./wdir") / AOI
os.makedirs(WDIR, exist_ok=True)
# +
from configs.utils import load_config
configs = {}
configs["pm25_italy"] = load_config("configs/pm25_italy.yaml")
configs["pm25_california"] = load_config("configs/pm25_california.yaml")
configs["pm25_southafrica"] = load_config("configs/pm25_southafrica.yaml")
configs["no2_italy"] = load_config("configs/no2_italy.yaml")
configs["no2_california"] = load_config("configs/no2_california.yaml")
configs["no2_southafrica"] = load_config("configs/no2_southafrica.yaml")
config = configs[("no2" if LABEL == "NO2" else "pm25") + "_" + AOI.lower().replace("_", "")]
config
# -
#### GT
dir = TRAIN_DIR/AOI/"ground_air_quality"/("NO2" if LABEL == "NO2" else "PM25")
gt_path = dir / (os.listdir(dir)[0][:-3] + 'shp')
gt_df = gpd.read_file(gt_path)
gt_df.head()
outdir = WDIR
indir = TRAIN_DIR / AOI
data, gts, target_size = get_data(indir, outdir, LABEL, aoi, config["feature_keys"])
# +
# Connect the day_data to GT
dataset = { "X": [],
"Y": [],
"gt": [],
"native_Y": [],
"target_Y": [],
"lat": [],
"lon": [],
"coords": [],
"date": []}
in_eop = data['data'][0]['s5p' if LABEL == "NO2" else 'cams']
grid = np.zeros(target_size)
for date, lat, lon, gt in tqdm.tqdm(gts[["Date", "SITE_LATIT", "SITE_LONGI", "AirQuality"]].values):
y_ind, x_ind = _get_point(lat, lon, grid, in_eop.bbox)
#print(y_ind, x_ind, lat, lon, grid.shape, in_eop.bbox)
try:
ind = data["date"].index(int(date))
except:
ind = None
#print("Error on date:", date)
if ind:
native = data['feat_dicts'][ind]['no2_native' if LABEL == 'NO2' else 'pm25_native']
target = data['feat_dicts'][ind]['no2_target' if LABEL == 'NO2' else 'pm25_target']
#print(ind, len)
if np.isnan(native[y_ind, x_ind]):
continue
dataset["X"].append(data['feats'][ind][y_ind, x_ind])
dataset["native_Y"].append(native[y_ind, x_ind])
dataset["target_Y"].append(target[y_ind, x_ind])
dataset["gt"].append(gt)
dataset["lat"].append(lat)
dataset["lon"].append(lon)
dataset["coords"].append((lat, lon))
dataset["date"].append(date)
dataset = {k: np.array(v) for k, v in dataset.items()}
# +
gt, native_Y = dataset["gt"], dataset["native_Y"]
# Filter any very off GT
if LABEL == "NO2":
dataset["Y"] = gt / (6.02214 * 1e4 * 1.9125) - native_Y
v1 = gt
v2 = native_Y * 6.02214 * 1e4 * 1.9125
m = np.min([v2, v1], axis=0)
r = (v1 / v2)
if "Africa" in aoi:
mask = (r < 3.5) & (r > 0.7)
elif "California" == aoi:
mask = (r < 4.5) & (r > 0.7)
elif "Italy" == aoi:
mask = (r < 2.5) & (r > 0.85)
else:
dataset["Y"] = gt - native_Y
v1 = gt
v2 = native_Y
if "Africa" in aoi:
m = np.min([v2, v1], axis=0)
mask = ~((abs(v2 - v1) > (m * 3)) & ((v2 > 1) | (v1 > 1)))
elif aoi == "Italy":
mask = ~((abs(v2 - v1) > v2 * 0.9) & ((v2 > 5) | (v1 > 5)))
elif aoi == "California":
mask = ~((abs(v2 - v1) > v2 * 0.535) & ((v2 > 5) | (v1 > 5)))
print(mask.mean())
print(np.corrcoef(gt[mask], native_Y[mask]))
# -
X = dataset["X"][mask]
Y = dataset["Y"][mask]
coords = dataset["coords"][mask]
X.shape
mu = np.nanmean(data["feats"], axis=(0, 1, 2))
sigma = np.nanstd(data["feats"], axis=(0, 1, 2))
from algorithms.ensemble import Ensemble
ensemble = Ensemble(config, indir, outdir)
ensemble.train(X, Y, coords=coords, mu=mu, sigma=sigma)
X
ensemble.models
ensemble.models['model2'].predict(X[:50])
Y[:50]
ensemble.save()
|
train.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Summery
# <pre>
# Author : <NAME>
# Project Name : Detection of Pneumonia from Chest X-Ray Images using Convolutional Neural Network,
# and Transfer Learning.
# Description : 1. Detected Pneumonia from Chest X-Ray images by retraining pretrained model “InceptionV3”
# with 5856 images of X-ray (1.15GB).
# 2. For retraining removed output layers, freezed first few layers and Fine-tuned model for
# two new label classes (Pneumonia and Normal).
# 3. Attained testing accuracy 83.44% and loss 0.42.
# Method :
# Tools/Library : Python, Keras, PyTorch, TensorFlow
# Version History : 1.0.0.0
# Current Version : 1.0.0.0
# Last Update : 11.30.2018
# Comments : Please use Anaconda editor for convenience of visualization.
# </pre>
# #### Code
# <pre>
# GitHub Link : <a href=https://github.com/anjanatiha/Detection-of-Pneumonia-from-Chest-X-Ray-Images>Detection of Pneumonia from Chest X-Ray Images(GitHub)</a>
# GitLab Link : <a href=https://gitlab.com/anjanatiha/Detection-of-Pneumonia-from-Chest-X-Ray-Images>Detection of Pneumonia from Chest X-Ray Images(GitLab)</a>
# Portfolio : <a href=https://anjanatiha.wixsite.com/website>Anjana Tiha's Portfolio</a>
# </pre>
#
# #### Dataset
# <pre>
# Dataset Name : Chest X-Ray Images (Pneumonia)
# Dataset Link : <a href=https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia>Chest X-Ray Images (Pneumonia) Dataset (Kaggle)</a>
# : <a href=https://data.mendeley.com/datasets/rscbjbr9sj/2>Chest X-Ray Images (Pneumonia) Dataset (Original Dataset)</a>
# Original Paper : <a href=https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5>Identifying Medical Diagnoses and Treatable Diseases by Image-Based Deep Learning</a>
# (<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>)
# https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
# </pre>
# <!---
# #### Library/Tools Version
# - Python - v3.6.7
# - argparse
# - random
# - numpy
# - shutil
# - gc
# - re
# - Keras - 2.2.4
# - Keras-preprocessing - v1.0.5
# - TensorFlow - 1.12
# - PIL/Pillow - 5.1.0
# - Matplotlib - 2.2.2
# - scikit-learn - 0.19.1
# - mlxtend - 0.14.0
# -->
# #### Commands / Running Instruction
# <pre>
# tensorboard --logdir=logs
# # %config IPCompleter.greedy=True
# </pre>
# <pre>
# <b>Dataset Details</b>
# Dataset Name : Chest X-Ray Images (Pneumonia)
# Number of Class : 2
# Number/Size of Images : Total : 5856 (1.15 Gigabyte (GB))
# Training : 5216 (1.07 Gigabyte (GB))
# Validation : 320 (42.8 Megabyte (MB))
# Testing : 320 (35.4 Megabyte (MB))
#
# <b>Model Parameters</b>
# Machine Learning Library: Keras
# Base Model : InceptionV3
# Optimizers : Adam
# Loss Function : categorical_crossentropy
#
# <b>Training Parameters</b>
# Batch Size : 64
# Number of Epochs : 50
# Training Time : 3 Hours
#
# <b>Output (Prediction/ Recognition / Classification Metrics)</b>
# <!--<b>Validation</b>-->
# <b>Testing</b>
# Accuracy : 83.44%
# Loss : 0.42
# <!--Precision : -->
# Recall : 94% (highest)
# <!--Specificity : -->
# </pre>
# ## Import Libraries
# +
import sys
import os
import argparse
import random
import time
import datetime
from collections import Counter
import numpy as np
import pandas as pd
import shutil
from tqdm import tqdm
import inspect
import gc
import re
from PIL import Image
import cv2
import keras
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras.models import Model
from keras.models import Sequential
from keras.layers import Conv2D, Activation, MaxPooling2D, Dropout, GlobalAveragePooling1D, GlobalAveragePooling2D, Flatten, BatchNormalization, Dense
from keras.applications.inception_v3 import InceptionV3
from keras.constraints import maxnorm
from keras import optimizers
from keras.optimizers import Adam, SGD , RMSprop
from keras import backend as K
K.set_image_dim_ordering('th')
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
import tensorflow as tf
from IPython.display import display
import seaborn as sns
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
# +
# Creates directory, if directory exists removes if remove parameter is set to True
def create_directory(directory_path, remove=False):
if remove and os.path.exists(directory_path):
try:
shutil.rmtree(directory_path)
os.mkdir(directory_path)
except:
print("Could not remove directory : ", directory_path)
return False
else:
try:
os.mkdir(directory_path)
except:
print("Could not create directory: ", directory_path)
return False
return True
# Removes directory, if directory exists
def remove_directory(directory_path):
if os.path.exists(directory_path):
try:
shutil.rmtree(directory_path)
except:
print("Could not remove directory : ", directory_path)
return False
return True
def clear_directory(directory_path):
dirs_files = os.listdir(directory_path)
for item in dirs_files:
# item_path = os.path.join(directory_path, item)
item_path = directory_path+ item
try:
if os.path.isfile(item_path):
os.unlink(item_path)
elif os.path.isdir(item_path):
shutil.rmtree(item_path)
except Exception as e:
print(e)
return True
def remove_empty_folders(path, removeRoot=True):
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
remove_empty_folders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0 and removeRoot:
print("Removing empty folder:", path)
os.rmdir(path)
# -
# print date and time for given type of representation
def date_time(x):
if x==1:
return 'Timestamp: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
if x==2:
return 'Timestamp: {:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now())
if x==3:
return 'Date now: %s' % datetime.datetime.now()
if x==4:
return 'Date today: %s' % datetime.date.today()
# prints a integer for degugging
def debug(x):
print("-"*40, x, "-"*40)
# Removes everything except alphabetical and selected characters from name string
def name_correct(name):
return re.sub(r'[^a-zA-Z,:]', ' ', name).title()
# ### Data Visualization Function
# +
def get_reset_subplot_params(nrows, ncols, dpi):
subplot_params = {}
subplot_params["nrows"] = nrows
subplot_params["ncols"] = ncols
subplot_params["figsize_col"] = subplot_params["ncols"]*2.5
subplot_params["figsize_row"] = subplot_params["nrows"]*2.5
subplot_params["dpi"] = dpi
subplot_params["facecolor"] = 'w'
subplot_params["edgecolor"] = 'k'
subplot_params["subplot_kw"] = {'xticks': [], 'yticks': []}
subplot_params["axes.titlesize"] = 'small'
subplot_params["hspace"] = 0.5
subplot_params["wspace"] = 0.3
return subplot_params
def get_reset_plot_params(figsize=(15, 5), title="", xlabel ="", ylabel="", legends=[], title_fontsize = 18, label_fontsize = 14, image_file_name="", save = False, dpi=100, update_image=True):
plot_params = {}
plot_params["figsize"] = figsize
plot_params["title"] = title
plot_params["xlabel"] = xlabel
plot_params["ylabel"] = ylabel
plot_params["legends"] = legends
plot_params["title_fontsize"] = title_fontsize
plot_params["axes.titlesize"] = "small"
plot_params["label_fontsize"] = label_fontsize
plot_params["image_file_name"] = image_file_name
plot_params["save"] = save
plot_params["update_image"] = update_image
plot_params["subplot"] = None
return plot_params
# +
def select_image_by_category(image_dir, image_count_per_category):
classes = os.listdir(image_dir)
class_count = len(classes)
image_file_paths = {}
for i in range(class_count):
subdir_path = image_dir+"/"+classes[i]
subdir_files = os.listdir(subdir_path)
subdir_file_count = len(subdir_files)
subdir_file_mem = {}
subdir_file_index = -1
image_file_paths[classes[i]] = []
for j in range(image_count_per_category):
while subdir_file_index in subdir_file_mem:
subdir_file_index = random.randint(0, subdir_file_count-1)
subdir_file_mem[subdir_file_index] = 1
subdir_file_name = subdir_files[subdir_file_index]
subdir_file_path = subdir_path+ "/" + subdir_file_name
image_file_paths[classes[i]].append(subdir_file_path)
return image_file_paths
def get_fig_axs(subplot_params):
fig, axs = plt.subplots(
nrows=subplot_params["nrows"], ncols=subplot_params["ncols"],
figsize=(subplot_params["figsize_col"], subplot_params["figsize_row"]),
dpi=subplot_params["dpi"], facecolor=subplot_params["facecolor"],
edgecolor=subplot_params["edgecolor"], subplot_kw=subplot_params["subplot_kw"])
return fig, axs
def plot_sample_image(image_file_paths, plot_params, subplot_params, update_image=True):
fig, axs = get_fig_axs(subplot_params)
plt.rcParams.update({'axes.titlesize': plot_params["axes.titlesize"]})
plt.subplots_adjust(hspace=subplot_params["hspace"], wspace=subplot_params["wspace"])
i=0
for img_filepath in image_file_paths:
img = cv2.imread(img_filepath, 1)
plt.title(img_filepath.split("/")[-1])
plt.subplot(subplot_params["nrows"], subplot_params["ncols"], i+1)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
i=i+1
if plot_params["update_image"] and os.path.exists(plot_params["image_file_name"]):
os.remove(plot_params["image_file_name"])
if plot_params["save"]:
fig.savefig(plot_params["image_file_name"], dpi=plot_params["dpi"])
plt.tight_layout()
plt.show()
def show_class_sample_images(directory, image_count_per_category=5, save=False, dpi=100, update_image=False):
class_count = len(os.listdir(directory))
print("Number of Class: ", class_count)
sample_img_by_class = select_image_by_category(directory, image_count_per_category)
for class_name in sample_img_by_class:
plot_params = get_reset_plot_params(image_file_name="img.png", save = save, dpi=dpi, update_image=update_image)
subplot_params = get_reset_subplot_params(nrows=1, ncols=image_count_per_category, dpi=dpi)
print("%s%s%s"%("-"*55, name_correct(class_name), "-"*55))
plot_sample_image(sample_img_by_class[class_name], plot_params, subplot_params)
print("")
print("%s%s%d%s"%("-"*55, "All Class Printed:", class_count, "-"*55))
# +
# count number of files in each subdirectory of a directory
def subdirectory_file_count(master_directory):
subdirectories = os.listdir(master_directory)
subdirectory_count = len(subdirectories)
subdirectory_names = []
subdirectory_file_counts = []
for subdirectory in subdirectories:
current_directory = os.path.join(master_directory, subdirectory)
file_count = len(os.listdir(current_directory))
subdirectory_names.append(subdirectory)
subdirectory_file_counts.append(file_count)
return subdirectory_names, subdirectory_file_counts
# show barplot
def bar_plot(x, y, plot_property):
if plot_property['subplot']:
plt.subplot(plot_property['subplot'])
sns.barplot(x=x, y=y)
plt.title(plot_property['title'], fontsize=plot_property['title_fontsize'])
plt.xlabel(plot_property['xlabel'], fontsize=plot_property['label_fontsize'])
plt.ylabel(plot_property['ylabel'], fontsize=plot_property['label_fontsize'])
plt.xticks(range(len(x)), x)
# show bar plot for count of labels in subdirectory of a directory
def count_bar_plot(master_directory, plot_property):
dir_name, dir_file_count = subdirectory_file_count(master_directory)
x = [name_correct(i) for i in dir_name]
# x = dir_name
y = dir_file_count
bar_plot(x, y, plot_property)
# show bar plot for count of labels in subdirectory of a training, validation, testing directory
def show_train_val_test(training_dir, validation_dir, testing_dir, plot_property):
plt.figure(figsize=plot_property['figsize'])
title = plot_property['title']
plot_property['title'] = title + " (Training)"
subplot_no = plot_property['subplot']
count_bar_plot(training_dir, plot_property)
plot_property['title'] = title + " (Validation)"
plot_property['subplot'] = subplot_no+1
count_bar_plot(validation_dir, plot_property)
plot_property['title'] = title + " (Testing)"
plot_property['subplot'] = subplot_no + 2
count_bar_plot(testing_dir, plot_property)
plt.show()
# +
# reset tensorflow graph tp free up memory and resource allocation
def reset_graph(model=None):
if model:
try:
del model
except:
return False
tf.reset_default_graph()
K.clear_session()
gc.collect()
return True
# reset callbacks
def reset_callbacks(checkpoint=None, reduce_lr=None, early_stopping=None, tensorboard=None):
checkpoint = None
reduce_lr = None
early_stopping = None
tensorboard = None
# -
# ### Preprocessing
reset_graph()
reset_callbacks()
# +
# Configure input/ output directory
# Configure training, validation, testing directory
input_directory = r"data/input/"
output_directory = r"data/output/"
training_dir = input_directory + r"train"
validation_dir = input_directory + r"val"
testing_dir = input_directory + r"test"
figure_directory = r"data/output/figures"
figure_directory = "data/output/figures"
if not os.path.exists(figure_directory):
os.mkdir(figure_directory)
file_name_pred_batch = figure_directory+r"/result"
file_name_pred_sample = figure_directory+r"/sample"
# -
show_class_sample_images(training_dir, image_count_per_category=5, save=False, dpi=100, update_image=False)
# +
plot_params = get_reset_plot_params()
plot_params['figsize'] = (18,4)
plot_params['title_fontsize'] = 13
plot_params['label_fontsize'] = 10
plot_params['title'] = "Number of Cases"
plot_params['subplot'] = 131
show_train_val_test(training_dir, validation_dir, testing_dir, plot_params)
classes = os.listdir(training_dir)
classes = [name_correct(i) for i in classes]
# -
#
# ### Image Preprocessing/ Augmentation/ Transformation for Training, Validation, Testing and Dataset
# +
# batch_size = 32
# target_size = (299, 299)
# color_mode = "rgb"
rescale = 1./255
target_size = (150, 150)
batch_size = 32
class_mode = "categorical"
# class_mode = "binary"
train_datagen = ImageDataGenerator(
rescale=rescale,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
training_dir,
target_size=target_size,
class_mode=class_mode,
batch_size=batch_size,
shuffle=True)
validation_datagen = ImageDataGenerator(rescale=rescale)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=target_size,
class_mode=class_mode,
batch_size=len(os.listdir(validation_dir)),
shuffle = False)
test_datagen = ImageDataGenerator(rescale=rescale)
test_generator = test_datagen.flow_from_directory(
testing_dir,
target_size=target_size,
class_mode=class_mode,
batch_size=len(os.listdir(testing_dir)),
shuffle = False)
# -
from sklearn.utils import class_weight
def get_weight(y):
class_weight_current = class_weight.compute_class_weight('balanced', np.unique(y), y)
return class_weight_current
# ### Training Files Configuration
class_weight = get_weight(train_generator.classes)
class_weight
# +
main_model_dir = output_directory + r"models/"
main_log_dir = output_directory + r"logs/"
clear_directory(main_log_dir)
remove_empty_folders(main_model_dir, False)
model_dir = main_model_dir + time.strftime('%Y-%m-%d %H-%M-%S') + "/"
log_dir = main_log_dir + time.strftime('%Y-%m-%d %H-%M-%S')
create_directory(model_dir, remove=True)
create_directory(log_dir, remove=True)
model_file = model_dir + "{epoch:02d}-val_acc-{val_acc:.2f}-val_loss-{val_loss:.2f}.hdf5"
# -
# ### Callbacks
reset_graph()
reset_callbacks()
# +
print("Settting Callbacks at ", date_time(1))
checkpoint = ModelCheckpoint(
model_file,
monitor='val_acc',
save_best_only=True)
early_stopping = EarlyStopping(
monitor='val_loss',
patience=5,
verbose=1,
restore_best_weights=True)
tensorboard = TensorBoard(
log_dir=log_dir,
batch_size=batch_size,
update_freq = 'batch')
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
patience=5,
cooldown=2,
min_lr=0.0000000001,
verbose=1)
#-----------------------------------------------------------------------------------------------------------------#
callbacks = [checkpoint, reduce_lr, early_stopping, tensorboard]
# callbacks = [checkpoint, tensorboard]
#-----------------------------------------------------------------------------------------------------------------#
print("Set Callbacks at ", date_time(1))
# -
def get_conv_model():
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', padding="same", input_shape=(3,150,150)))
model.add(Conv2D(16, (3, 3), padding="same", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding="same", input_shape=(3,150,150)))
model.add(Conv2D(32, (3, 3), padding="same", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding="same"))
model.add(Conv2D(64, (3, 3), padding="same", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (3, 3), dilation_rate=(2, 2), activation='relu', padding="same"))
model.add(Conv2D(96, (3, 3), padding="valid", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), dilation_rate=(2, 2), activation='relu', padding="same"))
model.add(Conv2D(128, (3, 3), padding="valid", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(2 , activation='softmax'))
print(model.summary())
return model
# +
# Load and configure model InceptionV3 for fine-tuning with new class labels
def get_model():
# base_model = InceptionV3(weights=None, include_top=False)
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
x = base_model.output
# x = Dropout(0.5)(x)
# x = GlobalAveragePooling2D()(x)
# x = Dense(512, activation='relu')(x)
# # x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
# # predictions = Dense(2, activation='sigmoid')(x)
predictions = Dense(2, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
# for layer in model.layers[:249]:
# layer.trainable = False
# for layer in model.layers[249:]:
# layer.trainable = True
model.summary()
return model
# -
# ### Training/Fine-Tuning Base Model-InceptionV3 for Fine-Tuning with New Class Labels
print("Getting Base Model", date_time(1))
# model = get_model()
model = get_conv_model()
# model = keras.models.load_model("data/output/models/2018-12-15 00-26-45/13-val_acc-0.70-val_loss-0.58.hdf5")
# +
print("Starting Trainning Model", date_time(1))
steps_per_epoch=len(train_generator)
validation_steps=len(validation_generator)
# lr = 0.00001
# optimizer=optimizers.Adam(lr=lr)
optimizer=optimizers.Adam()
loss='categorical_crossentropy'
metrics=['accuracy']
epochs = 100
model.compile(optimizer, loss=loss, metrics=metrics)
history = model.fit_generator(
train_generator,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
verbose=2,
callbacks=callbacks,
validation_data=validation_generator,
validation_steps=validation_steps,
class_weight=class_weight)
print("Completed Model Trainning", date_time(1))
# -
# ### Model Performance Visualization over the Epochs
# +
xlabel = 'Epoch'
legends = ['Training', 'Validation']
ylim_pad = [0.01, 0.1]
plt.figure(figsize=(15, 5))
# Plot training & validation Accuracy values
y1 = history.history['acc']
y2 = history.history['val_acc']
min_y = min(min(y1), min(y2))-ylim_pad[0]
max_y = max(max(y1), max(y2))+ylim_pad[0]
plt.subplot(121)
plt.plot(y1)
plt.plot(y2)
plt.title('Model Accuracy', fontsize=17)
plt.xlabel(xlabel, fontsize=15)
plt.ylabel('Accuracy', fontsize=15)
plt.ylim(min_y, max_y)
plt.legend(legends, loc='upper left')
plt.grid()
# Plot training & validation loss values
y1 = history.history['loss']
y2 = history.history['val_loss']
min_y = min(min(y1), min(y2))-ylim_pad[1]
max_y = max(max(y1), max(y2))+ylim_pad[1]
plt.subplot(122)
plt.plot(y1)
plt.plot(y2)
plt.title('Model Loss', fontsize=17)
plt.xlabel(xlabel, fontsize=15)
plt.ylabel('Loss', fontsize=15)
plt.ylim(min_y, max_y)
plt.legend(legends, loc='upper left')
plt.grid()
plt.show()
# -
# ### Test Saved Models
dir_name = r"data/output/models/"
dirs = os.listdir(dir_name)
for i in range(len(dirs)):
print(i, dirs[i])
cur_dir =dir_name+dirs[9]+"/"
model_names = os.listdir(cur_dir)
for i in range(len(model_names)):
print(i, model_names[i])
# +
model_file = cur_dir+model_names[5]
print(model_file)
# +
# cur_model = keras.models.load_model(model_file, custom_objects={'activation': swish_activation})
# +
print("results")
result = model.evaluate_generator(test_generator, steps=len(test_generator), verbose=2)
print("%s%.2f "% ("Loss : ", result[0]))
print("%s%.2f%s"% ("Accuracy : ", result[1]*100, "%"))
# -
print("results")
y_pred = model.predict_generator(test_generator, steps=len(test_generator), verbose=2)
y_pred = y_pred.argmax(axis=-1)
y_true=test_generator.classes
# +
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print("%s%.2f%s"% ("Precision : ", precision*100, "%"))
print("%s%.2f%s"% ("Recall : ", recall*100, "%"))
print("%s%.2f%s"% ("F1-Score : ", f1*100, "%"))
CM = confusion_matrix(y_true, y_pred)
fig, ax = plot_confusion_matrix(conf_mat=CM , figsize=(10,8), hide_ticks=True,cmap=plt.cm.Blues)
plt.xticks(range(len(classes)), classes, fontsize=12)
plt.yticks(range(len(classes)), classes, fontsize=12)
plt.show()
cls_report_print = classification_report(y_true, y_pred, target_names=classes)
cls_report = classification_report(y_true, y_pred, target_names=classes, output_dict=True)
print(cls_report_print)
# +
numofbatch = len(test_generator)
batch_no = random.randint(0, numofbatch-1)
y_img_batch, y_true_batch = test_generator[batch_no]
y_true_batch = y_true_batch.argmax(axis=-1)
y_pred_batch = model.predict(y_img_batch)
y_pred_batch = y_pred_batch.argmax(axis=-1)
sizeofbatch = len(y_true_batch)
print("-"*35)
print("%s%d"% ("Selected Batch No : ", batch_no))
print("-"*35)
print("%s%d"% ("Batch Size : ", len(y_pred_batch)))
print("-"*35)
print("%s%.2f%s"% ("Accuracy : ", np.mean(y_true==y_pred)*100, "%"))
print("-"*35)
# -
# ### Visualization
# +
def show_predictions(y_img_batch, y_true, y_pred, subplot_params, plot_params, class_map, image_file_name, count=8, sample=True):
fig, axs = get_fig_axs(subplot_params)
plt.rcParams.update({'axes.titlesize': plot_params["axes.titlesize"]})
plt.subplots_adjust(hspace=subplot_params["hspace"], wspace=subplot_params["wspace"])
m = {}
length = len(y_true)
for i in range(0, count):
num = i
if sample:
num = random.randint(0, length-1)
while num in m:
num = int(random.randint(0, length-1))
m[num]=1
plt.subplot(subplot_params["nrows"], subplot_params["ncols"], i+1)
plt.imshow(y_img_batch)
plt.xticks([])
plt.yticks([])
# print(num, y_true[num], y_pred[num], title_text)
original = class_map[y_true[num]]
predicted = class_map[y_pred[num]]
title_text = ("%s%s%s%s%s"%("True: ", original, "\n", "Pred", predicted))
if original==predicted:
plt.title(title_text)
else:
plt.title(title_text, color=false_prediction_label_color)
if plot_params["update_image"] and os.path.exists(image_file_name):
os.remove(image_file_name)
fig.savefig(image_file_name, dpi=plot_params["dpi"])
plt.tight_layout()
plt.show()
# -
# ### Visualization
# +
image_file_name_batch = figure_directory+"/result"
image_file_name_sample = figure_directory+"/sample"
batch_size_t = len(y_true_batch)
ncols = 8
ncols = batch_size_t if batch_size_t<ncols else ncols
nrows = batch_size_t/ncols
nrows = int(batch_size_t/ncols)+1 if batch_size_t%ncols else int(batch_size_t/ncols)
dpi=100
subplot_params = get_reset_subplot_params(nrows, ncols, dpi)
plot_params = get_reset_plot_params()
class_map = {v: k for k, v in test_generator.class_indices.items()}
image_file_name = "sample"
# -
# #### Visualization 1 (Random Batch)
# Visualization of performance of a random test dataset batch
show_predictions(y_img_batch, y_true_batch, y_pred_batch, subplot_params, plot_params, class_map, image_file_name, sample=False)
# #### Visualization 2 (Random)
# Visualization of performance of a few random images from a random batch
# +
cols = 4
rows = 2
if batch_size_t<4:
cols = 1
count = cols*rows
figsize_col = cols*2.5
figsize_row = rows*2.5
figure_map["rows"]=rows
figure_map["cols"]=cols
figure_map["figsize_col"]=figsize_col
figure_map["figsize_row"]=figsize_row
figure_map["count"]=count
figure_map["batch_size_tmp"]=batch_size_t
show_predictions(y_img_batch, y_true_batch, y_pred_batch, image_file_name_batch, plt, figure_map)
# -
|
code/obsolete/Detection of Pneumonia from Chest X-Ray Images 1.0.0.2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Introduction to the Interstellar Medium
### <NAME>
# +
### Figure 2.2: Atmospheric absorption across the electromagnetic spectrum
|
observations/.ipynb_checkpoints/template_notebook-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Scott-Huston/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/Scott_Huston_LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="838Dmw1kM2LK" colab_type="text"
# # Lambda School Data Science Module 142
# ## Sampling, Confidence Intervals, and Hypothesis Testing
# + [markdown] id="dbcPKIo5M6Ny" colab_type="text"
# ## Prepare - examine other available hypothesis tests
#
# If you had to pick a single hypothesis test in your toolbox, t-test would probably be the best choice - but the good news is you don't have to pick just one! Here's some of the others to be aware of:
# + id="tlBel8j9M6tB" colab_type="code" outputId="e95243b4-99e6-4665-fb5b-85c65e32ee21" colab={"base_uri": "https://localhost:8080/", "height": 187}
import numpy as np
from scipy.stats import chisquare # One-way chi square test
# Chi square can take any crosstab/table and test the independence of rows/cols
# The null hypothesis is that the rows/cols are independent -> low chi square
# The alternative is that there is a dependence -> high chi square
# Be aware! Chi square does *not* tell you direction/causation
ind_obs = np.array([[1, 1], [2, 2]]).T
print(ind_obs)
print(chisquare(ind_obs, axis=None))
dep_obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
print(dep_obs)
print(chisquare(dep_obs, axis=None))
# + id="nN0BdNiDPxbk" colab_type="code" outputId="96da37cc-8f07-425b-f198-68eef9b0cb30" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Distribution tests:
# We often assume that something is normal, but it can be important to *check*
# For example, later on with predictive modeling, a typical assumption is that
# residuals (prediction errors) are normal - checking is a good diagnostic
from scipy.stats import normaltest
# Poisson models arrival times and is related to the binomial (coinflip)
sample = np.random.poisson(5, 1000)
print(normaltest(sample)) # Pretty clearly not normal
# + id="P5t0WhkDReFO" colab_type="code" outputId="7d7d19e9-abd1-4823-e856-f512e7a6e2eb" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Kruskal-Wallis H-test - compare the median rank between 2+ groups
# Can be applied to ranking decisions/outcomes/recommendations
# The underlying math comes from chi-square distribution, and is best for n>5
from scipy.stats import kruskal
x1 = [1, 3, 5, 7, 9]
y1 = [2, 4, 6, 8, 10]
print(kruskal(x1, y1)) # x1 is a little better, but not "significantly" so
x2 = [1, 1, 1]
y2 = [2, 2, 2]
z = [2, 2] # Hey, a third group, and of different size!
print(kruskal(x2, y2, z)) # x clearly dominates
# + [markdown] id="7pT3IP36Rh0b" colab_type="text"
# And there's many more! `scipy.stats` is fairly comprehensive, though there are even more available if you delve into the extended world of statistics packages. As tests get increasingly obscure and specialized, the importance of knowing them by heart becomes small - but being able to look them up and figure them out when they *are* relevant is still important.
# + [markdown] id="3JqroCQYQqhy" colab_type="text"
# ## T-test Assumptions
#
# <https://statistics.laerd.com/statistical-guides/independent-t-test-statistical-guide.php>
#
# - Independence of means
#
# Are the means of our voting data independent (do not affect the outcome of one another)?
#
# The best way to increase thel likelihood of our means being independent is to randomly sample (which we did not do).
#
# + id="sqy2hEFRZnvI" colab_type="code" colab={}
from scipy.stats import ttest_ind
# ?ttest_ind
# + [markdown] id="xI-PcK5sZ1A9" colab_type="text"
# - "Homogeneity" of Variance?
#
# Is the magnitude of the variance between the two roughly the same?
#
# I think we're OK on this one for the voting data, although it probably could be better, one party was larger than the other.
#
# If we suspect this to be a problem then we can use Welch's T-test
# + id="P02dL0waauN5" colab_type="code" colab={}
# ?ttest_ind
# + [markdown] id="tjgoHHwGayoC" colab_type="text"
# - "Dependent Variable" (sample means) are Distributed Normally
#
# <https://stats.stackexchange.com/questions/9573/t-test-for-non-normal-when-n50>
#
# Lots of statistical tests depend on normal distributions. We can test for normality using Scipy as was shown above.
#
# This assumption is often assumed even if the assumption is a weak one. If you strongly suspect that things are not normally distributed, you can transform your data to get it looking more normal and then run your test. This problem typically goes away for large sample sizes (yay Central Limit Theorem) and is often why you don't hear it brought up. People declare the assumption to be satisfied either way.
#
#
# + [markdown] id="bvvPV-RJN2vA" colab_type="text"
# ## Central Limit Theorem
#
#
# + id="FBLoOF8qOJeJ" colab_type="code" outputId="f3a2b7b3-f36f-4b2e-f6b7-b311d6a6fffa" colab={"base_uri": "https://localhost:8080/", "height": 71}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
sample_means = []
for x in range(0,3000):
coinflips = np.random.binomial(n=1, p=.5, size=12)
one_sample = coinflips
sample_means.append(coinflips.mean())
print(len(sample_means))
print(sample_means)
# + id="rfeA06evOT2K" colab_type="code" outputId="e21f9dd5-5889-477e-ee30-0853f58ca474" colab={"base_uri": "https://localhost:8080/", "height": 204}
df = pd.DataFrame({'a': one_sample})
df.head()
# + id="GlMSNFX6OmBV" colab_type="code" outputId="1c1bb0be-2c0b-49de-8dbc-5aed68fb37fc" colab={"base_uri": "https://localhost:8080/", "height": 286}
df.a.hist()
# + id="Jie4ypgLOs5M" colab_type="code" outputId="8a27c627-8380-4c69-f7ae-720bd9833d52" colab={"base_uri": "https://localhost:8080/", "height": 296}
ax = plt.hist(sample_means, bins=30)
plt.title('Distribution of 3000 sample means \n (of 12 coinflips each)');
# + [markdown] id="LsEAjc4rOylm" colab_type="text"
# What does the Central Limit Theorem State? That no matter the initial distribution of the population, the distribution of sample means taken will approximate a normal distribution as $n \rightarrow \infty$.
#
# This has very important implications for hypothesis testing and is precisely the reason why the t-distribution begins to approximate the normal distribution as our sample size increases.
# + [markdown] id="EYqo5vZZSFUr" colab_type="text"
# ## Standard Error of the Mean
#
# What does it mean to "estimate"? the Population mean?
# + id="puGXH6vbSIE4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="71112dfc-4e7f-4875-dd64-e03eb89f455d"
import numpy as np
import pandas as pd
lambda_heights = np.random.uniform(4.5,6.5,size = 2000)
print(len(lambda_heights))
lambda_heights
# + id="JlTHS3SdTZEB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6fd84e8c-0665-4cb6-80e8-4b7c04d4c8d5"
print('Population mean: ', lambda_heights.mean())
print('Population std. dev: ', lambda_heights.std())
# + id="P8iaJj4BTxiZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="c40aeeb4-7a28-42a8-a383-fafb8d83f0ef"
population = pd.DataFrame({'Heights': lambda_heights})
print(population.shape)
population.head()
# + id="v28SSYewUErb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="3d6fa139-812f-413c-e41d-a1d62f49cc0b"
sample = population.sample(100)
print(sample.shape)
sample.head()
# + id="q4rsqHV3UMDY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a85fc812-8242-421d-84ff-6af6a07f2800"
print('Sample Mean 1: ', sample['Heights'].mean())
# + id="QRyUo8cTUX3C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="13134b55-e550-40bc-e96e-7d995bd653a2"
sample = population.sample(100)
print(sample.shape)
sample.head()
# + id="DDsSwIAjUd_z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b85b3043-fdaa-4384-950d-e58357e1e7d1"
print('Sample Mean 2: ', sample['Heights'].mean())
# + [markdown] id="nfdQf8QYUUmw" colab_type="text"
# ## Build and Interpret a Confidence Interval
#
# <img src="https://github.com/ryanallredblog/ryanallredblog.github.io/blob/master/img/Confidence_Interval.png?raw=true" width=400>
# + id="yXib1WCuUrBs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e43db822-a2ca-40d7-ddb6-bc93cf61d676"
coinflips_100 = np.random.binomial(n = 1, p=.5, size=100)
sample_std = np.std(coinflips_100)
print('Sample std.: ', sample_std)
sample_size = len(coinflips_100)
print('Sample size: ', sample_size)
# + id="soQJ2r1hVMlf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="47e937b9-ba50-46fe-cfa0-635b466ba25f"
standard_error = sample_std/(sample_size**(.5))
print('Standard error = ', standard_error)
# + id="mQRECwuHVbFu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="47c3cdef-df2e-4023-b67a-5774b2b87947"
from scipy import stats
std_err = stats.sem(coinflips_100, ddof = 0)
print('Standard error = ', std_err)
# + id="YmkjWun8V1Y7" colab_type="code" colab={}
# + id="tBx71Kf0UjT3" colab_type="code" colab={}
def confidence_interval(data, confidence=0.95):
"""
Calculate a confidence interval around a sample mean for given data.
Using t-distribution and two-tailed test, default 95% confidence.
Arguments:
data - iterable (list or numpy array) of sample observations
confidence - level of confidence for the interval
Returns:
tuple of (mean, lower bound, upper bound)
"""
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = stats.sem(data)
interval = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
return (mean, mean - interval, mean + interval)
# + [markdown] id="2LKyUZJUWHtw" colab_type="text"
# **What confidence level do we want our confidence interval to represent?**
#
# 95% confidence Interval? 99% confidence interval?
# + id="GSvXWY01zkiE" colab_type="code" colab={}
t = stats.t.ppf(.975, sample_size-1)
# + id="ZkQvK6fpXTpD" colab_type="code" colab={}
sample_mean = coinflips_100.mean()
# + id="NM2fnIwdXvTn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="4a54c980-f046-41af-d001-db9105926cd2"
confidence_interval = [sample_mean - t*std_err, sample_mean + t*std_err]
margin_of_error = t*std_err
print('Sample mean = ', sample_mean)
print('Standard error = ', std_err)
print('Confidence interval = ', confidence_interval)
# + id="i4gF59P6ZCTZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c5acd049-ced8-4105-fad5-6ba08c429b9b"
confidence_interval[0]
# + id="XLlbYoGqZEAa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1d7405d4-4f26-4ee3-ddb4-8746b192a719"
confidence_interval[1]
# + [markdown] id="C4rtc8luVUAK" colab_type="text"
# ## Graphically Represent a Confidence Interval
# + id="pz6F9_3_VmKr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="624ba65b-bdc1-4be9-b43a-224b3ec38825"
import seaborn as sns
sns.kdeplot(coinflips_100)
plt.axvline(confidence_interval[0], color = 'black')
plt.axvline(confidence_interval[1], color = 'black')
plt.axvline(sample_mean, color = 'red')
# + [markdown] id="_oy0uoBGeoEb" colab_type="text"
# ## Relationship between Confidence Intervals and T-tests
#
# Confidence Interval == Bounds of statistical significance for our t-test
#
# A sample mean that falls inside of our confidence interval will "FAIL TO REJECT" our null hypothesis
#
# A sample mean that falls outside of our confidence interval will "REJECT" our null hypothesis
# + id="izIyVavzfCXS" colab_type="code" colab={}
from scipy.stats import t, ttest_1samp
# + id="Y7HwdMwDfL1N" colab_type="code" outputId="4a0a558b-50f6-4828-cfa5-6abe197b2401" colab={"base_uri": "https://localhost:8080/", "height": 54}
import numpy as np
coinflip_means = []
for x in range(0,100):
coinflips = np.random.binomial(n=1, p=.5, size=30)
coinflip_means.append(coinflips.mean())
print(coinflip_means)
# + id="nQDo-ZXlfOvR" colab_type="code" outputId="733f04bc-ca41-4857-ffd3-441de9745593" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Sample Size
n = len(coinflip_means)
# Degrees of Freedom
dof = n-1
# The Mean of Means:
mean = np.mean(coinflip_means)
# Sample Standard Deviation
sample_std = np.std(coinflip_means, ddof=1)
# Standard Error
std_err = sample_std/n**.5
CI = t.interval(.95, dof, loc=mean, scale=std_err)
print("95% Confidence Interval: ", CI)
# + id="PiaALHSNfWou" colab_type="code" outputId="36afd79d-7028-4419-e383-0b79c3990846" colab={"base_uri": "https://localhost:8080/", "height": 51}
'''You can roll your own CI calculation pretty easily.
The only thing that's a little bit challenging
is understanding the t stat lookup'''
# 95% confidence interval
t_stat = t.ppf(.975, dof)
print("t Statistic:", t_stat)
CI = (mean-(t_stat*std_err), mean+(t_stat*std_err))
print("Confidence Interval", CI)
# + [markdown] id="EamZNJhAf-fY" colab_type="text"
# A null hypothesis that's just inside of our confidence interval == fail to reject
#
#
# + id="cNpzYbjpfirR" colab_type="code" outputId="ff4bfc51-950a-4502-a880-da478caa2081" colab={"base_uri": "https://localhost:8080/", "height": 34}
ttest_1samp(coinflip_means, .49)
# + [markdown] id="hO34mbL9gHn1" colab_type="text"
# A null hypothesis that's just outside of our confidence interval == reject
#
#
# + id="N4SUjj82gKlv" colab_type="code" outputId="c98977f2-b521-46da-b8c2-27aab85595f4" colab={"base_uri": "https://localhost:8080/", "height": 34}
ttest_1samp(coinflip_means, .4818927)
# + id="DDsovHUyUj3v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="2b21ebe3-d147-440a-f6c0-d32bfbaa9c59"
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
# + id="31FjCqbkabfk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="ace8b578-7874-4aa1-a5b4-3d2a56dc2fd8"
df.describe()
# + id="xDLfeMy1admz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="dd03a86c-e1e9-4e51-9ea0-005fd8db0e1d"
df.describe(exclude = 'number')
# + id="wOsxRpllai5L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="b1b6746e-2d5c-4ebe-d3d6-f0cad763d21d"
cut_points = [0,9,19,29,39,49,1000]
label_names = ['0-9', '10-19', '20-29','30-39','40-49','50+']
df['hours-per-week-categories'] = pd.cut(df['hours-per-week'], cut_points, labels = label_names)
df.head()
# + id="QlHhdOI4bUMT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ab2fdaf8-b7c2-48ac-d0dd-0d292d9e81a6"
df['sex'].value_counts()
# + id="eunogTH3bYnR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="713aed37-fc33-4f4b-fc9f-8b6b7c9a739a"
df['hours-per-week-categories'].value_counts()
# + id="mGml_FM2bi9o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="8a027dca-0323-4143-d84d-6a62f61e8e00"
df = df.sort_values(by = 'hours-per-week-categories', ascending = True)
df.head()
# + id="96dqBY9rcDy4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="c05dd80b-4ed8-40b2-9d01-fce0669a3ab3"
contingency_table = pd.crosstab(df['sex'], df['hours-per-week-categories'], margins = True)
contingency_table
# + id="0CJs9ZMhcsBW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="07d320f5-3b45-4046-c817-097451db374c"
female_count = contingency_table.iloc[0][0:6].values
female_count
# + id="Fg4gurEPdAGj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1de11813-c073-45a3-f9be-0b2d53cc5b6a"
male_count = contingency_table.iloc[1][0:6].values
male_count
# + id="aftFoKK6dNMD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 361} outputId="d82351a5-c736-4a09-9a18-8f2a65f04913"
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize = (10,5))
sns.set(font_scale = 1.8)
categories = label_names
p1 = plt.bar(categories, male_count, .55, color = 'red')
p2 = plt.bar(categories, female_count, .55, color = 'blue', bottom = male_count)
plt.legend((p2[0], p1[0]), ('Female', 'Male'))
plt.xlabel('Hours per Week Worked')
plt.ylabel('Count')
plt.show()
# + [markdown] id="hW7ppuHqjEXP" colab_type="text"
# ## Expected Value Calculation
# + id="BlaJXN9fjN0X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b5efb48b-c8b8-4a69-e619-6f3d64b971f0"
# Get row sums
row_sums = contingency_table.iloc[0:2,6].values
col_sums = contingency_table.iloc[2,0:6].values
print(col_sums)
row_sums
# + id="-fSS61i2khyR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="41eb7f0a-da79-47e3-efc7-4d6af19eaaa7"
total = contingency_table.loc['All', 'All']
total
# + id="DuU8upTHlK_j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="35fba9aa-1ea3-4521-df7e-88f32d592510"
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
# + id="MGn7rWrqlOd8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 159} outputId="7e37b14d-571d-446d-9a49-f3c00d7fb56a"
observed = pd.crosstab(df['sex'], df['hours-per-week-categories'].values)
print(observed.shape)
observed
# + [markdown] id="pTIzrkKdUaLl" colab_type="text"
# ## Run a $\chi^{2}$ Test "by hand" (Using Numpy)
# + id="ghoTbJnueOLN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b0231221-f3c4-472e-899e-123ccbf7fe10"
chi_squared = np.array(((observed - expected)**2/(expected))).sum()
print(f"Chi-Squared: {chi_squared}")
# + id="Ky_m-meFK7xv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c49924aa-b5ab-489c-b20a-d13496f8a44c"
# Calculate Degrees of Freedom
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
# + [markdown] id="7Igz-XHcVbW3" colab_type="text"
# ## Run a $\chi^{2}$ Test using Scipy
# + id="X52Nwt7AVlvk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="5f5ca646-6bda-4741-d0d5-3cf3875257df"
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
# + [markdown] id="11OzdxWTM7UR" colab_type="text"
# ## Assignment - Build a confidence interval
#
# A confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.
#
# 52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.
#
# In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.
#
# But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.
#
# How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."
#
# For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.
#
# Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.
#
# Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)):
#
#
# ### Confidence Intervals:
# 1. Generate and numerically represent a confidence interval
# 2. Graphically (with a plot) represent the confidence interval
# 3. Interpret the confidence interval - what does it tell you about the data and its distribution?
#
# ### Chi-squared tests:
# 4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data
# - By hand using Numpy
# - In a single line using Scipy
#
# Stretch goals:
#
# 1. Write a summary of your findings, mixing prose and math/code/results. *Note* - yes, this is by definition a political topic. It is challenging but important to keep your writing voice *neutral* and stick to the facts of the data. Data science often involves considering controversial issues, so it's important to be sensitive about them (especially if you want to publish).
# 2. Apply the techniques you learned today to your project data or other data of your choice, and write/discuss your findings here.
# 3. Refactor your code so it is elegant, readable, and can be easily run for all issues.
# + id="Ckcr4A4FM7cs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="bfeeb8cf-5b2f-4d1a-efaf-cd8591df547e"
# TODO - your code!
import pandas as pd
import numpy as np
# Loading and cleaning data
column_headers = ['party', 'handicapped-infants', 'water-project-cost-sharing', 'adoption-of-the-budget-resolution', 'physician-fee-freeze',
'el-salvador-aid', 'religions-groups-in-schools', 'anti-satellite-test-ban', 'aid-to-nicaraguan-contras',
'mx-missile', 'immigration', 'synfuels-corporation-cutback', 'education-spending', 'superfund-right-to-sue',
'crime', 'duty-free-exports', 'export-administration-act-south-africa']
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data', names = column_headers)
df.replace('?', np.NaN, inplace = True)
df.replace('n', 0, inplace = True)
df.replace('y', 1, inplace = True)
df.head()
# + id="v2Rj_jTmTC1F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9f1052ce-9b8a-4d54-a328-2c51f8ed2cee"
# Generating and numerically representing the confidence interval for the mean
# proportion of congress members supporting the synfuels corporation cutback
from scipy.stats import t
synfuels = df['synfuels-corporation-cutback'].dropna()
deg_freedom = len(synfuels)
mean = synfuels.mean()
std_dev = np.std(synfuels)
std_err = std_dev / (len(synfuels**2))
CI = t.interval(.95, deg_freedom, loc=mean, scale=std_err)
print(CI)
# + id="NjnGfq6Ptsro" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="106d54dc-b9ad-4a11-9a11-f905a9e18f29"
# Here's one plot, but it doesn't represent the interval well
import seaborn as sns
sns.kdeplot(synfuels, legend = False)
plt.axvline(x = CI[0], color = 'black')
plt.axvline(x = CI[1], color = 'black')
plt.axvline(x = synfuels.mean(), color = 'red')
plt.xlim(0,1)
# + id="kaRPE8B0wMUb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="de789346-7c53-4006-c6bc-2c7faa35bc1b"
# This is a better representation of the CI, but doesn't
# show the density plot as well
sns.kdeplot(synfuels, legend = False)
plt.axvline(x = CI[0], color = 'black')
plt.axvline(x = CI[1], color = 'black')
plt.axvline(x = synfuels.mean(), color = 'red')
plt.xlim(.35,.375)
# + [markdown] id="25PeHm-wxCeG" colab_type="text"
# **Interpreting the Confidence Interval**
#
# The standard wording would be something like:
#
# "This confidence interval tells me that I can be 95% confident that the true proportion of congress members who would vote for the synfuels corporation cutback bill is between .35 and .375."
#
# Honestly though, in this case it doesn't make a lot of sense because this isn't really a sample. These are all of the members of the House of Representatives so it doesn't make sense to try to estimate some sort of population mean from this as if it was a sample. Maybe you could try to interpret it as if this particular group of members of congress are a sample of all of the people who have ever been members. That doesn't really work though, because their votes are not independent of each other in that case because they are a product of their time and political climate.
# + [markdown] id="szRFJ5er1mcW" colab_type="text"
# Chi-squared tests:
# Take a dataset that we have used in the past in class that has categorical variables. Pick two of those categorical variables and run a chi-squared tests on that data
# By hand using Numpy
# In a single line using Scipy
# + id="HBERE7Puw377" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="a9b50876-5c7b-43ea-8506-cd7cd69d69b9"
# Loading dataset
entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
entities.head()
# + id="0Oxvg0ZS5CU4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="a0d1b2bd-27bd-44b9-9144-fe15f4fce402"
# Filtering to only 2 columns
entities = entities[['landlocked', 'main_religion_2008']].dropna()
entities.head()
# + id="lD1G7bIh7h08" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="94d0b09f-3043-4b57-839e-564088a0f13b"
# Checking for null values
entities.isnull().sum()
# + id="y2SDCOxm7thh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="fdf775c9-2725-4241-ebc8-f71514149ca3"
observed = pd.crosstab(entities['landlocked'], entities['main_religion_2008'], margins = True)
observed
# + id="omoI--ON9UAm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0818e29-5f70-466a-bbb6-5528ca227c01"
row_sums = observed.iloc[0:2, 3].values
row_sums
# + id="Ao5N2e_7IPpM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d382540e-5380-434a-85b8-c50f431da924"
col_sums = observed.iloc[2,0:3].values
col_sums
# + id="9Y5nf1CbIXpf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="60a84ea1-e1cd-44b0-bb0e-e4dd4a2d69e3"
total = observed.loc['All', 'All']
total
# + id="OQ1TJvOsIF_w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ea832fd3-ec5f-44ed-ba51-ae2e1fbbaf4c"
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
# + id="txFQoM-SCfsf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d3d71f44-42c1-46ba-c691-745db0d6b925"
observed = np.array(pd.crosstab(entities['landlocked'], entities['main_religion_2008'], margins = False))
observed
# + id="wptac3nHBJcG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d07fbf4c-a4ed-4eb5-c7de-081266759ccb"
chi_squared = ((observed - expected)**2/(expected)).sum()
print(chi_squared)
# + id="MnUboZtGGo0o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="21576a16-d8f7-4d7e-c58d-031fcd724c60"
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
# + [markdown] id="n-WyR9v9Mfi4" colab_type="text"
# **Interpreting the Chi-squared test:**
#
# Because the p-value is very high, whether a country is landlocked appears to be independent of the country's religion. I fail to reject the null hypothesis that the variables are independent.
# + [markdown] id="nyJ3ySr7R2k9" colab_type="text"
# ## Resources
#
# - [Interactive visualize the Chi-Squared test](https://homepage.divms.uiowa.edu/~mbognar/applets/chisq.html)
# - [Calculation of Chi-Squared test statistic](https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test)
# - [Visualization of a confidence interval generated by R code](https://commons.wikimedia.org/wiki/File:Confidence-interval.svg)
# - [Expected value of a squared standard normal](https://math.stackexchange.com/questions/264061/expected-value-calculation-for-squared-normal-distribution) (it's 1 - which is why the expected value of a Chi-Squared with $n$ degrees of freedom is $n$, as it's the sum of $n$ squared standard normals)
|
Scott_Huston_LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #Table of Contents
# * [Fresnel reflection coefficients](#Fresnel-reflection-coefficients)
# * [Set up and imports](#Set-up-and-imports)
# * [Compare external and internal reflection for $n_{low} = 1.0$ and $n_{high} = 1.5$ & $n_{high} = 2.5$](#Compare-external-and-internal-reflection-for-$n_{low}-=-1.0$-and-$n_{high}-=-1.5$-&-$n_{high}-=-2.5$)
# * [Old](#Old)
#
# # Fresnel reflection coefficients
# The Fresnel reflection coefficients are
#
# $$\begin{aligned} \\
# \Gamma_{TE} &= \frac{\eta_2/\cos{\theta_t} - \eta_1/\cos{\theta_i}}{\eta_2/\cos{\theta_t} + \eta_1/\cos{\theta_i}} = \frac{n_1\cos{\theta_i} - n_2\cos{\theta_t}}{n_1\cos{\theta_i} + n_2\cos{\theta_t}} \\
# \Gamma_{TM}&= \frac{\eta_2\cos{\theta_t} - \eta_1\cos{\theta_i}}{\eta_2\cos{\theta_t} + \eta_1\cos{\theta_i}} =
# \frac{n_1\cos{\theta_t} - n_2\cos{\theta_i}}{n_1\cos{\theta_t} + n_2\cos{\theta_i}} \\
# \end{aligned}$$
#
# where $\eta_i$ and $n_i$ are the impedance and refractive index of the $i^{th}$ material. The reflectance and transmittance are:
#
# $$\begin{aligned} \\
# R &= |\Gamma|^2 \\
# T &= 1 - R
# \end{aligned}$$
#
# # Set up and imports
# + language="javascript"
# IPython.load_extensions('calico-document-tools');
# +
from __future__ import division, print_function, absolute_import
from tmm import (coh_tmm, unpolarized_RT, ellips,
position_resolved, find_in_structure_with_inf)
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
# %matplotlib inline
try:
import colorpy.illuminants
import colorpy.colormodels
from tmm import color
colors_were_imported = True
except ImportError:
# without colorpy, you can't run sample5(), but everything else is fine.
colors_were_imported = False
# "5 * degree" is 5 degrees expressed in radians
# "1.2 / degree" is 1.2 radians expressed in degrees
degree = np.pi/180
# -
# # Compare external and internal reflection for $n_{low} = 1.0$ and $n_{high} = 1.5$ & $n_{high} = 2.5$
# Assume a plane wave is propagating in medium 1 with refractive index $n_1$ and is incident on a planar interface between medium 1 and medium 2 where the refractive index of medium 2 is $n_2$. Plot the magnitude of the reflection coefficient as a function of incidence angle for both TE and TM polarization for the case of external ($n_1 \lt n_2$) and internal ($n_1 \gt n_2$) reflection.
# +
# list of layer thicknesses in nm
d_list = [np.inf, np.inf]
# list of refractive indices
nlow1 = 1.0
nhigh1 = 1.5
n_list_ext1 = [nlow1, nhigh1]
n_list_int1 = [nhigh1, nlow1]
nlow2 = 1.0
nhigh2 = 2.5
n_list_ext2 = [nlow2, nhigh2]
n_list_int2 = [nhigh2, nlow2]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
fsize = 14
# initialize lists of y-values to plot
r_ext_TE=[]
r_ext_TM=[]
r_int_TE=[]
r_int_TM=[]
R_ext_TE=[]
R_ext_TM=[]
R_int_TE=[]
R_int_TM=[]
for angle in angles_deg:
r_ext_TE.append(coh_tmm('s',n_list_ext1, d_list, angle*degree, wavelength)['r'])
r_ext_TM.append(coh_tmm('p',n_list_ext1, d_list, angle*degree, wavelength)['r'])
r_int_TE.append(coh_tmm('s',n_list_int1, d_list, angle*degree, wavelength)['r'])
r_int_TM.append(coh_tmm('p',n_list_int1, d_list, angle*degree, wavelength)['r'])
R_ext_TE.append(coh_tmm('s',n_list_ext2, d_list, angle*degree, wavelength)['r'])
R_ext_TM.append(coh_tmm('p',n_list_ext2, d_list, angle*degree, wavelength)['r'])
R_int_TE.append(coh_tmm('s',n_list_int2, d_list, angle*degree, wavelength)['r'])
R_int_TM.append(coh_tmm('p',n_list_int2, d_list, angle*degree, wavelength)['r'])
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(12,10))
ax[0][0].set_ylabel('Reflection coefficient',fontsize=fsize)
ax[0][0].plot(angles_deg,np.abs(r_ext_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][0].plot(angles_deg,np.abs(r_ext_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][0].legend(loc=2,fontsize=14)
ax[0][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext1[0],n_list_ext1[1]), fontsize=fsize)
ax[0][1].plot(angles_deg,np.abs(r_int_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][1].plot(angles_deg,np.abs(r_int_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][1].legend(loc=4,fontsize=fsize)
ax[0][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int1[0],n_list_int1[1]), fontsize=fsize)
ax[1][0].set_ylabel('Reflection coefficient',fontsize=fsize)
ax[1][0].plot(angles_deg,np.abs(R_ext_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][0].plot(angles_deg,np.abs(R_ext_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][0].set_xlabel('Incidence Angle (degrees)',fontsize=fsize)
ax[1][0].legend(loc=2,fontsize=14)
ax[1][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext2[0],n_list_ext2[1]), fontsize=fsize)
ax[1][1].plot(angles_deg,np.abs(R_int_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][1].plot(angles_deg,np.abs(R_int_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][1].set_xlabel('Incidence Angle (degrees)',fontsize=fsize)
ax[1][1].legend(loc=4,fontsize=14)
ax[1][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int2[0],n_list_int2[1]), fontsize=fsize)
fig.subplots_adjust(hspace=0.05,wspace=0.05)
# -
# # Old
# list of layer thicknesses in nm
d_list = [np.inf,np.inf]
# list of refractive indices
n_list = [1,1.5]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
# initialize lists of y-values to plot
r_TE=[]
r_TM=[]
for angle in angles_deg:
r_TE.append(coh_tmm('s',n_list, d_list, angle*degree, wavelength)['r'])
r_TM.append(coh_tmm('p',n_list, d_list, angle*degree, wavelength)['r'])
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(angles_deg,np.abs(r_TE),'b-',label='$|\Gamma_{TE}|$')
ax.plot(angles_deg,np.abs(r_TM),'b--',label='$|\Gamma_{TM}|$')
ax.set_xlabel('Angle (degrees)',fontsize=14)
ax.set_ylabel('Reflection coefficient',fontsize=14)
ax.legend(loc=0,fontsize=14)
#ax.title('Reflection of unpolarized light at 0$^\circ$ incidence (blue), ','45$^\circ$ (purple)');
# list of layer thicknesses in nm
d_list = [np.inf,np.inf]
# list of refractive indices
n_list = [1.5,1.0]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
# initialize lists of y-values to plot
r_TE=[]
r_TM=[]
for angle in angles_deg:
r_TE.append(coh_tmm('s',n_list, d_list, angle*degree, wavelength)['r'])
r_TM.append(coh_tmm('p',n_list, d_list, angle*degree, wavelength)['r'])
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(angles_deg,np.abs(r_TE),'b-',label='$|\Gamma_{TE}|$')
ax.plot(angles_deg,np.abs(r_TM),'b--',label='$|\Gamma_{TM}|$')
ax.set_xlabel('Angle (degrees)',fontsize=14)
ax.set_ylabel('Reflection coefficient',fontsize=14)
ax.legend(loc=4,fontsize=14)
#ax.title('Reflection of unpolarized light at 0$^\circ$ incidence (blue), ','45$^\circ$ (purple)');
print(np.arcsin(1.0/1.5)/degree)
# +
# list of layer thicknesses in nm
d_list = [np.inf, np.inf]
# list of refractive indices
n_list_ext = [1.0, 1.5]
n_list_int = [1.5, 1.0]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
fsize = 14
# initialize lists of y-values to plot
r_ext_TE=[]
r_ext_TM=[]
r_int_TE=[]
r_int_TM=[]
for angle in angles_deg:
r_ext_TE.append(coh_tmm('s',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_ext_TM.append(coh_tmm('p',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_int_TE.append(coh_tmm('s',n_list_int, d_list, angle*degree, wavelength)['r'])
r_int_TM.append(coh_tmm('p',n_list_int, d_list, angle*degree, wavelength)['r'])
fig, (ax_ext, ax_int) = plt.subplots(1, 2, sharey=True, figsize=(12,6))
ax_ext.set_ylabel('Reflection coefficient',fontsize=fsize)
ax_ext.plot(angles_deg,np.abs(r_ext_TE),'b-',label='$|\Gamma_{TE}|$')
ax_ext.plot(angles_deg,np.abs(r_ext_TM),'b--',label='$|\Gamma_{TM}|$')
ax_ext.set_xlabel('Angle (degrees)',fontsize=fsize)
ax_ext.legend(loc=2,fontsize=14)
ax_ext.text(40,1.05,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext[0],n_list_ext[1]), fontsize=fsize)
ax_int.plot(angles_deg,np.abs(r_int_TE),'b-',label='$|\Gamma_{TE}|$')
ax_int.plot(angles_deg,np.abs(r_int_TM),'b--',label='$|\Gamma_{TM}|$')
ax_int.set_xlabel('Angle (degrees)',fontsize=fsize)
ax_int.legend(loc=2,fontsize=fsize)
ax_int.text(40,1.05,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int[0],n_list_int[1]), fontsize=fsize)
# +
# list of layer thicknesses in nm
d_list = [np.inf, np.inf]
# list of refractive indices
nlow = 1.0
nhigh = 1.5
n_list_ext = [nlow, nhigh]
n_list_int = [nhigh, nlow]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
fsize = 14
# initialize lists of y-values to plot
r_ext_TE=[]
r_ext_TM=[]
r_int_TE=[]
r_int_TM=[]
R_ext_TE=[]
R_ext_TM=[]
R_int_TE=[]
R_int_TM=[]
for angle in angles_deg:
r_ext_TE.append(coh_tmm('s',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_ext_TM.append(coh_tmm('p',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_int_TE.append(coh_tmm('s',n_list_int, d_list, angle*degree, wavelength)['r'])
r_int_TM.append(coh_tmm('p',n_list_int, d_list, angle*degree, wavelength)['r'])
R_ext_TE.append(coh_tmm('s',n_list_ext, d_list, angle*degree, wavelength)['R'])
R_ext_TM.append(coh_tmm('p',n_list_ext, d_list, angle*degree, wavelength)['R'])
R_int_TE.append(coh_tmm('s',n_list_int, d_list, angle*degree, wavelength)['R'])
R_int_TM.append(coh_tmm('p',n_list_int, d_list, angle*degree, wavelength)['R'])
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(12,10))
ax[0][0].set_ylabel('Reflection coefficient',fontsize=fsize)
ax[0][0].plot(angles_deg,np.abs(r_ext_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][0].plot(angles_deg,np.abs(r_ext_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][0].legend(loc=2,fontsize=14)
ax[0][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext[0],n_list_ext[1]), fontsize=fsize)
ax[0][1].plot(angles_deg,np.abs(r_int_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][1].plot(angles_deg,np.abs(r_int_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][1].legend(loc=2,fontsize=fsize)
ax[0][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int[0],n_list_int[1]), fontsize=fsize)
ax[1][0].set_ylabel('Reflectance',fontsize=fsize)
ax[1][0].plot(angles_deg,np.abs(R_ext_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][0].plot(angles_deg,np.abs(R_ext_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][0].set_xlabel('Angle (degrees)',fontsize=fsize)
ax[1][0].legend(loc=2,fontsize=14)
ax[1][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext[0],n_list_ext[1]), fontsize=fsize)
ax[1][1].plot(angles_deg,np.abs(R_int_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][1].plot(angles_deg,np.abs(R_int_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][1].set_xlabel('Angle (degrees)',fontsize=fsize)
ax[1][1].legend(loc=2,fontsize=14)
ax[1][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int[0],n_list_int[1]), fontsize=fsize)
fig.subplots_adjust(hspace=0.05,wspace=0.05)
# +
# list of layer thicknesses in nm
d_list = [np.inf, np.inf]
# list of refractive indices
nlow = 1.0
nhigh = 3.0
n_list_ext = [nlow, nhigh]
n_list_int = [nhigh, nlow]
wavelength = 0.5
angles_deg = np.linspace(0,90,num=181)
fsize = 14
# initialize lists of y-values to plot
r_ext_TE=[]
r_ext_TM=[]
r_int_TE=[]
r_int_TM=[]
R_ext_TE=[]
R_ext_TM=[]
R_int_TE=[]
R_int_TM=[]
for angle in angles_deg:
r_ext_TE.append(coh_tmm('s',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_ext_TM.append(coh_tmm('p',n_list_ext, d_list, angle*degree, wavelength)['r'])
r_int_TE.append(coh_tmm('s',n_list_int, d_list, angle*degree, wavelength)['r'])
r_int_TM.append(coh_tmm('p',n_list_int, d_list, angle*degree, wavelength)['r'])
R_ext_TE.append(coh_tmm('s',n_list_ext, d_list, angle*degree, wavelength)['R'])
R_ext_TM.append(coh_tmm('p',n_list_ext, d_list, angle*degree, wavelength)['R'])
R_int_TE.append(coh_tmm('s',n_list_int, d_list, angle*degree, wavelength)['R'])
R_int_TM.append(coh_tmm('p',n_list_int, d_list, angle*degree, wavelength)['R'])
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(12,10))
ax[0][0].set_ylabel('Reflection coefficient',fontsize=fsize)
ax[0][0].plot(angles_deg,np.abs(r_ext_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][0].plot(angles_deg,np.abs(r_ext_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][0].legend(loc=2,fontsize=14)
ax[0][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext[0],n_list_ext[1]), fontsize=fsize)
ax[0][1].plot(angles_deg,np.abs(r_int_TE),'b-',label='$|\Gamma_{TE}|$')
ax[0][1].plot(angles_deg,np.abs(r_int_TM),'b--',label='$|\Gamma_{TM}|$')
ax[0][1].legend(loc=2,fontsize=fsize)
ax[0][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int[0],n_list_int[1]), fontsize=fsize)
ax[1][0].set_ylabel('Reflectance',fontsize=fsize)
ax[1][0].plot(angles_deg,np.abs(R_ext_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][0].plot(angles_deg,np.abs(R_ext_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][0].set_xlabel('Angle (degrees)',fontsize=fsize)
ax[1][0].legend(loc=2,fontsize=14)
ax[1][0].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_ext[0],n_list_ext[1]), fontsize=fsize)
ax[1][1].plot(angles_deg,np.abs(R_int_TE),'b-',label='$|\Gamma_{TE}|^2$')
ax[1][1].plot(angles_deg,np.abs(R_int_TM),'b--',label='$|\Gamma_{TM}|^2$')
ax[1][1].set_xlabel('Angle (degrees)',fontsize=fsize)
ax[1][1].legend(loc=2,fontsize=14)
ax[1][1].text(40,1.03,'$n_1 = {}$\n$n_2 = {}$'.format(n_list_int[0],n_list_int[1]), fontsize=fsize)
fig.subplots_adjust(hspace=0.05,wspace=0.05)
# -
|
reflection_refraction/Reflection_Refraction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import math as m
np.set_printoptions(suppress=True)
def gradiente(h):
c=11000
d=20000
e=32000
if 0 <= h < c:
a = (-0.0065)
to=288.15
t=to + (a)*(h-0)
elif c <= h < d:
a = 0
to = 216.65
t = to
elif d <= h <= e:
a = 0.001
to=216.65
t = to + (a)*(h-20000)
else:
a = "Valor erroneo"
t= "Valor erroneo"
return(t,to,a)
h = input("Inserte la altura")
h = int(h)
t,to,a = gradiente(h)
print(t)
# +
def presion (t,to,a,h):
t,to,a = gradiente(h)
t = float(t)
to=float(to)
a= float(a)
global p
if a==-0.0065:
po = 101325
p = po*(t/to)**(-9.81/(a*287))
elif a==0:
po=22632
p=po*(m.e)**((-9.81/(287*t))*(h-11000))
elif a==0.001:
po = 5447
p = po*(t/to)**(-9.81/(a*287))
else:
a = "Valor erroneo"
t= "Valor erroneo"
return(p)
presion(t,to,a,h)
# -
p = 101325*(255.65/288.15)**(-9.81/((-0.0065)*287))
p
# +
rango = np.array([5000,15000,25000])
def gradiente_array(h):
salida_t = np.array([])
salida_to = np.array([])
salida_a = np.array([])
for i in h:
c=11000
d=20000
e=32000
h= i
if 0 <= h < c:
a = (-0.0065)
to=288.15
t=to + (a)*(h-0)
elif c <= h < d:
a = 0
to = 216.65
t = to
elif d <= h <= e:
a = 0.001
to=216.65
t = to + (a)*(h-20000)
else:
a = np.NaN
t= np.NaN
salida_t = np.append(salida_t, t)
salida_to = np.append(salida_to, to)
salida_a = np.append(salida_a, a)
return(salida_t, salida_to, salida_a)
salida_t, salida_to, salida_a = gradiente_array(rango)
print(salida_t)
# -
# +
def presion_array(h):
salida_p = np.array([])
tlist,tolist,alist = gradiente_array(h)
for i in range(len(tlist)):
t = float(tlist[i])
to=float(tolist[i])
a= float(alist[i])
if a==-0.0065:
po = 101325
p = po*(t/to)**(-9.81/(a*287))
elif a==0:
po=22632
p = po*(m.e)**((-9.81/(287*t))*(h[i]-11000))
elif a==0.001:
po = 5447
p = po*(t/to)**(-9.81/(a*287))
salida_p = np.append(salida_p, p)
return(salida_p,tlist, h)
presion_array(rango)
# -
x = np.arange(0,32001,100)
x
# +
p, t, h =presion_array(x)
results = np.vstack((h, t, p)).T
results
# -
plt_data = np.vstack((h, t, p))
fig = plt.figure()
ax1 = fig.add_subplot()
ax1.plot(plt_data[0],plt_data[1], c = "red", label = "temperatura")
ax1.set_xlabel('altura')
ax1.set_ylabel('temperatura')
ax2 = ax1.twinx()
ax2.plot(plt_data[0],plt_data[2], c = "blue", label = "presion")
ax2.set_ylabel('presion')
fig.legend(loc='upper right')
plt.show()
# +
#
# -
|
.ipynb_checkpoints/Act 1 programacio (1)-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Contenido bajo licencia Creative Commons BY 4.0 y código bajo licencia MIT. © <NAME> y <NAME> 2020. Este material es parte del curso Modelación Computacional en el programa de Ingeniería Civil de la Universidad EAFIT.
# # Integración numerica
# ## Introducción
# Discutiremos de manera breve la definición de cuadratura. Posteriormente nos concentraremos en las cuadraturas Gaussianas que por su eficiencia y facilidad de sistematización son de amplio uso en ingeniería y física. Para estas cubriremos su desarrollo general y su implementación en Python. Los detalles de la cuadratura Gaussiana y su implementación se discutirán por medio de un ejemplo.
#
# **Al completar este notebook usted debería estar en la capacidad de:**
#
# * Identificar una cuadratura como una formula de evaluar integrales numéricamente.
#
# * Identificar la relación entre la función a integrar y el tipo de esquema requerido para su evaluación.
#
# * Evaluar integrales numéricamente usando cuadraturas Gaussianas.
# ## Cuadraturas
# Una cuadratura es una formula para la evaluación numerica de integrales de la forma general:
#
#
# $$I=\int\limits_{V(\vec{x})} f(\vec{x}) \mathrm{d}V(\vec{x}) \approx\sum_{i=1}^{N} f(\vec{x}_i)w_i\, .$$
#
# Note que esta expresión corresponde a la evaluación de la función $f(x)$ en $N$ puntos de coordenadas $x_i$ multiplicados por $N$ factores $w_i$. Los factores se denominan **pesos** o factores de ponderación ya que se encargan de ponderar la contribución de cada término $f(x_i)$ a $I$ y tienen una interpretación similar al diferencial $\mathrm{d}V$. Incluso, estos últimos son los que se encargarían de aportar las unidades pertinentes a la integral (aproximada).
# ### Ejemplo: regla del trapecio
#
# Una cuadratura con la cual estamos familiarizados es la regla del trapecio dada por:
#
# $$I=\int\limits_a^b f(x) \mathrm{d}x \approx \frac{h}{2}[f(a) + f(b)]\, ,$$
#
# en donde $h = b - a$. En esta expresión podemos reconocer los factores de ponderación $w_1 = h/2$, $w_2 = h/2$ y los puntos de evaluación $x_1 = a$ y $x_2 = b$.
#
# Por ejemplo, consideremos la siguiente integral:
#
# $$I = \int\limits_{-1}^{+1} (x^3 + 4x^2 - 10) \mathrm{d}x \approx 1.0\cdot f(-1) + 1.0\cdot f(+1) = -12\, .$$
#
#
# ### Cuadraturas Gaussianas
#
# Una de las cuadraturas mas poderosas encontradas en la practica son las denominadas cuadraturas [Gaussianas](https://en.wikipedia.org/wiki/Gaussian_quadrature). En estas, los factores de ponderación $w_i$ y los puntos de evaluación $x_i$ son seleccionados de manera que se obtenga la mejor aproximación (mínimo error) de la manera más efectiva (mínimo número de puntos de evaluación). El ser formuladas usando un proceso de ajuste de $2 N$ parámetros correspondientes a los $N$ pesos y a los $N$ puntos de evaluación permiten integrar de manera exacta funciones polinomiales de orden a lo sumo $2 N - 1$.
#
# La principal desventaja de las cuadraturas Gaussianas es el hecho de que en estas los puntos de evaluación se encuentran especificados en términos de coordenadas en el rango fijo entre $x=-1$ y $x=+1$ lo cual obliga a que sea necesario realizar una transformación previa o cambio de variable.
#
# Para evitar confusiones en la notación denotemos el espacio en el que se indican las coordenadas de las cuadraturas Gaussianas mediante la letra $r$, de manera que el cambio de variables se expresa como:
#
# $$I = \int\limits_{x=a}^{x=b} f(x) \mathrm{d}x \equiv \int\limits_{r=-1}^{r=+1}F(r) \mathrm{d}r\, .$$
#
# Nótese que el cambio de variables implica:
#
# * Relacionar $x$ y $r$ lo que podemos escribir de forma general como $x = x(r)$ y $r = r(x)$.
#
# * Expresar $f(x)$ en términos de la nueva variable de acuerdo con $F(r) = f[x(r)]$.
#
# * Expresar $\mathrm{d}x$ en términos de $\mathrm{d}r$.
#
# ### Cuadratura de 2 puntos
# Considere el caso de una cuadratura de 2 puntos, es decir $N =2$. En este caso los factores de ponderación y puntos de evaluación se especifican en la siguiente tabla:
#
#
# | $r$ | $w$ |
# |---------------------|-------|
# | $\frac{-\sqrt3}{3}$ | $1.0$ |
# | $\frac{+\sqrt3}{3}$ | $1.0$ |
#
#
# Para realizar el cambio de variables asumamos que la relación entre las variables independientes $x$ y $r$ es lineal de manera que:
#
# $$x(r) = \frac{1}{2}(a + b) + \frac{r}{2}(b - a) \equiv \frac{1}{2}(a + b) + \frac{h}{2}r\, ,$$
#
# y por lo tanto:
#
# $$\mathrm{d}x=\frac{h}{2}\mathrm{d}r\, .$$
#
# Esto que produce la siguiente equivalencia entre las integrales en los 2 espacios:
#
# $$I = \int\limits_{x=a}^{x=b} f(x) \mathrm{d}x \equiv \int\limits_{r=-1}^{r=+1} f[ x(r)]\frac{h}{2} \mathrm{d}r\, .$$
#
# Ahora, la integral formulada en el espacio de $r$ es fácilmente evaluable mediante las coordenadas y pesos de la tabla.
# <div class="alert alert-warning">
# Consultar los factores y puntos de integración para una cuadratura Gaussiana de 4 puntos.
# </div>
# ## Solución en Python
#
# En los bloques de código que se presentan a continuación se implementa la cuadratura Gaussiana de 2 puntos para calcular la integral:
#
# $$
# I=\int_{x = -1}^{x = +1}(x^3+4x^2-10)\operatorname dx
# $$
#
# <div class="alert alert-warning">
# Adicionar comentarios a cada uno de los bloques de código que se presentan a continuación.
# </div>
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
# <div class="alert alert-warning">
# En el espacio encerrado entre comillas en cada una de las siguientes subrutinas indique el significado de cada uno de los parámetros y su tipo de dato.
# </div>
def gpoints2():
"""Cuadratura de Gauss de 2 puntos"""
xw = np.zeros([2])
xp = np.zeros([2])
xw[:] = 1.0
xp[0] = -0.577350269189626
xp[1] = 0.577350269189626
return xw, xp
def transform(a, b, r):
"""
"""
h = b-a
xr = (a + b)/2.0 + h*r/2.0
return xr, h
def myfun(x):
"""
"""
fx = x**3 + 4*x**2 - 10
return fx
# <div class="alert alert-warning">
# Adicione comentarios al código de integración.
# </div>
ngpts = 2
a = -1.0
b = +1.0
integral = 0.0
xw, xp = gpoints2()
for i in range(0, ngpts):
xr, h = transform(a, b, xp[i])
fx = myfun(xr)
integral = integral + fx*h/2.0*xw[i]
print(integral)
# <div class="alert alert-warning">
#
# **Preguntas:**
#
# 1. Modificar el código anterior para calcular la integral con una cuadratura de 3 puntos.
#
# 2. Repetir el cálculo de la integral anterior si ahora los límites de integración son $a =0$ y $b=2$.
#
# 3. Usando la cuadratura Gaussiana calcular la siguiente integral:
#
# $$I=\int\limits_{x=3.0}^{x=6.0} \mathrm{d}x$$
#
# 4. ¿Cómo sería la generalización de la cuadratura Gaussiana sobre un cuadrilátero?
#
# </div>
# ## Glosario de términos
# **Cuadratura:** Formula de integración numerica compuesta por un conjunto de puntos de evaluación y factores de ponderación.
#
# **Punto de integración:** Punto de evaluación de la función a integrar mediante una cuadratura numérica.
#
# **Punto de Gauss:** Punto de integración en una cuadratura Gaussiana.
#
# **Factor de ponderación:** Constante que pondera la contribución de la función a la integral cuando esta es evaluada en un punto de integración determinado.
# ## Actividad para la clase
#
# La figura muestra el problema de una cuña de semi-ángulo interno $\phi=\frac\pi4$ y lado $\ell = 10.0$ sometida a tracciones en las superficies inclinadas de magnitud $S = 1.0$.
#
#
# <center><img src="img/wedge.png"
# alt="Esquema de la cuña."
# style="width:300px">
# </center>
#
#
# Considerando que la relaciónes deformación-desplazamiento y tensión-deformación están dadas por:
#
# \begin{align}
# \varepsilon_{xx} &= \frac{\partial u}{\partial x}\, ,\\
# \varepsilon_{yy} &= \frac{\partial v}{\partial y}\, ,\\
# \varepsilon_{xy} &= \frac{1}{2}\left(\frac{\partial u}{\partial y}
# + \frac{\partial v}{\partial x}\right)\, ,\\
# \sigma_{xx} &= \frac E{1 + \nu}\varepsilon_{xx} + \frac{\nu E}{(1+\nu)(1-2\nu)}(\varepsilon_{xx} + \varepsilon_{yy})\, ,\\
# \sigma_{yy} &= \frac E{1+\nu}\varepsilon_{yy} + \frac{\nu E}{(1+\nu)(1-2\nu)}(\varepsilon_{xx} + \varepsilon_{yy})\, ,\\
# \sigma_{xy} &= \frac{E}{2(1 + \nu)} \varepsilon_{xy}\, ,
# \end{align}
#
# se pide:
#
# 1. Calcular la energía de deformación del sistema dada por:
#
# $$I = \frac{1}{2}\int\limits_S (\sigma_{xx}\varepsilon_{xx} + \sigma_{yy}\varepsilon_{yy}
# + 2\sigma_{xy}\varepsilon_{xy})\mathrm{d}S\, ,$$
#
# asumiendo que los desplazamientos en los puntos izquierdo y derecho están dados por
#
# $$\vec{u}_\text{izq} = -2.0 \hat{\imath}\, ,$$
#
# y
#
# $$\vec{u}_\text{der} = +2.0\hat{\imath}\, ,$$
#
# mientras que los de los puntos superior e inferior corresponden a
#
# $$\vec{u}_\text{sup} = -2.0 \hat{\jmath}\, ,$$
#
# y
#
# $$\vec{u}_\text{inf}=+2.0\hat{\jmath}\, .$$
#
# 2. Verifique que su resultado es correcto comparando con la solución analítica del problema.
# ## Formato del notebook
# La siguiente celda cambia el formato del Notebook.
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
|
notebooks/03_integracion_numerica.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# importing the libraries
import numpy as np
import peakutils
import syntheticdata
import threegaussians
import lorentzian
from peakutils.plot import plot as pplot
from matplotlib import pyplot as plt
# %matplotlib inline
from scipy.optimize import curve_fit
from scipy import interpolate
from astropy.modeling import models, fitting
import pandas as pd
# load some data
def loaddata(data_filename):
"""load matrix data"""
data = np.genfromtxt(data_filename, delimiter='\t')
data_nm = data[1:,0] #wavelength in nm
data_time = data[0,1:]
data_z = data[1:, 1:]
return data_nm, data_time, data_z
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
# add noise
def add_noise(nm_array, y_array, noise_coefficient):
# Add noise
np.random.seed(1800)
y_noise = noise_coefficient * np.random.normal(size=nm_array.size)
y_proc = y_array + y_noise
return y_proc
def Earth_Smoothing(nm_array, y_array,noise_coefficient):
"""
============================================
Plotting derivatives of simple sine function
============================================
A simple example plotting a fit of the sine function
and the derivatives computed by Earth.
Notes
-----
generates a denoise curve from the TA data
Parameters
----------
nm_array: wavelength array
timedelay: time delay array
noise_coefficient: the noise coefficients that user want to generate
Returns
-------
a smoothing curve from the original noise curve
"""
from pyearth import Earth
# Fit an Earth model
model = Earth(smooth=True)
np.random.seed(42)
ydata = y_array + noise_coefficient*np.random.normal(size=nm_array.size)
model.fit(nm_array, ydata)
# Print the model
#print(model.trace())
#print(model.summary())
# Get the predicted values and derivatives
y_hat = model.predict(nm_array)
return y_hat
# * py-earth
def earth_smooth_matrix(nm_array,data_matrix,noise_coefficient):
num_array = np.shape(data_matrix)[0]
smooth_matx = pd.DataFrame(np.empty((num_array,1)), columns = ['a'])
noise_matx = pd.DataFrame(np.empty((num_array,1)), columns = ['a'])
for i in range(500):
data_array = data_matrix[:, i]
# get noise and smooth list
noise_array = add_noise(nm_array, data_array, noise_coefficient).tolist()
smooth_array = Earth_Smoothing(nm_array,data_array,noise_coefficient).tolist()
# get noise dataframe
DF = pd.DataFrame(noise_array,columns = [i])
noise_matx = noise_matx.join(DF)
# get smooth dataframe
df = pd.DataFrame(smooth_array,columns = [i])
smooth_matx = smooth_matx.join(df)
# drop the first columns
noise_matx = noise_matx.drop(columns='a')
smooth_matx = smooth_matx.drop(columns='a')
return noise_matx, smooth_matx
def findpeak(data_z_array, threshold, min_dist):
"""find peaks and return indices of the peaks"""
peak_indices = peakutils.indexes(data_z_array, thres=threshold, min_dist=min_dist)
return peak_indices
def Earth_Peakutils(nm_array, timedelay,threshold,min_dist):
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
"""
============================================
Plotting derivatives of simple sine function
============================================
A simple example plotting a fit of the sine function
and the derivatives computed by Earth.
Notes
-----
generates a denoise curve from the TA data
Parameters
----------
nm_array: wavelength array
timedelay: time delay array
noise_coefficient: the noise coefficients that user want to generate
Returns
-------
a smoothing curve from the original noise curve
"""
# Create some fake data
# generate some noisy data from syntheticdata:
np.random.seed(1729)
y_noise = 0.1 * np.random.normal(size=nm_array.size)
ydata = timedelay + y_noise
# Fit an Earth model
model = Earth(max_degree=2, minspan_alpha=.5, smooth=True)
model.fit(nm_array, ydata)
# Get the predicted values and derivatives
y_hat = model.predict(nm_array)
# use peakutils to find peak indexs
peak_indices_true = peakutils.indexes(timedelay, thres=threshold, min_dist=min_dist)
peak_indices_smooth = peakutils.indexes(y_hat, thres=threshold, min_dist=min_dist)
return peak_indices_true,peak_indices_smooth
def earth_peak_matrix(nm_array,data_matrix,noise_coefficient,threshold, min_dist):
num_array = np.shape(data_matrix)[1]
true_peak = []
smooth_peak = []
for i in range(500):
data_array = data_matrix[:, i]
noise_array = add_noise(nm_array, data_array, noise_coefficient)
smooth_array = Earth_Smoothing(nm_array, data_array,noise_coefficient)
indexes=findpeak(data_array, threshold, min_dist).tolist()
true_peak.append(indexes)
indexes1=findpeak(smooth_array, threshold, min_dist).tolist()
smooth_peak.append(indexes1)
# transfer to dataframe
true_df=pd.DataFrame(true_peak)
smooth_df=pd.DataFrame(smooth_peak)
return true_df, smooth_df
# # -------------------------------------------------------------------------------------------
# # * Get peak dataframe
matx_filename = '20180418_twogaussian_spectralshfit.txt'
datanm, datatime, dataz_matx = loaddata(matx_filename)
noisez_matx, smooth_matx = earth_smooth_matrix(datanm,dataz_matx,0.1)
# * py-earth and peakutils
## get the peak position dataframe of true data set
true_df, smooth_df = earth_peak_matrix(datanm, dataz_matx, 0.1, 0, 10)
true_df
smooth_df
# # Peak width and fwhm Dataframe
def peakchar(data_nm, data_z_array, peak_index):
"""find the peak width, and intensity"""
num_peaks = len(peak_index)
#array of peak height
height = [data_z_array[idx] for idx in peak_index]
#array of peak width
half_height = [ht / 2 for ht in height]
fwhm_idx_1 = np.empty_like(half_height)
fwhm_idx_2 = np.empty_like(fwhm_idx_1)
fwhm_nm_1 = np.empty_like(fwhm_idx_1)
fwhm_nm_2 = np.empty_like(fwhm_idx_1)
for i in range(num_peaks):
#find the index and nmof the left side of the fwhm
if i == 0:
fwhm_idx_1[i] = find_nearest(data_z_array[0:peak_index[i]], half_height[i])
else:
fwhm_idx_1[i] = find_nearest(data_z_array[peak_index[i-1]:peak_index[i]], half_height[i]) + peak_index[i-1]
fwhm_nm_1[i] = data_nm[int(fwhm_idx_1[i])]
#find the index and nm of the right side of the fwhm
fwhm_idx_2[i] = find_nearest(data_z_array[peak_index[i]:], half_height[i]) + peak_index[i]
fwhm_nm_2[i] = data_nm[int(fwhm_idx_2[i])]
#find fwhm
fwhm = fwhm_nm_2 - fwhm_nm_1
return height, fwhm
def earth_peak_matrix(nm_array,data_matrix,noise_coefficient,threshold, min_dist):
num_array = np.shape(data_matrix)[1]
true_peak = []
smooth_peak = []
for i in range(num_array):
data_array = data_matrix[:, i]
noise_array = add_noise(nm_array, data_array, noise_coefficient)
smooth_array = Earth_Smoothing(nm_array, data_array,noise_coefficient)
indexes=findpeak(data_array, threshold, min_dist).tolist()
true_peak.append(indexes)
indexes1=findpeak(smooth_array, threshold, min_dist)
smooth_peak.append(indexes1)
# transfer to dataframe
true_df=pd.DataFrame(true_peak)
smooth_df=pd.DataFrame(smooth_peak)
return true_df, smooth_df
def peak_matrix(nm_array,data_matrix, threshold, mindist):
"""find peaks in a data matrix"""
peak_idx_matx = []
peak_height_matx = []
peak_fwhm_matx = []
for i in range(500):
data_timeslice = data_matrix.values[:, i]
peak_idx = findpeak(data_timeslice, threshold, mindist).tolist()
peak_idx_matx.append(peak_idx)
peak_height, peak_fwhm = peakchar(nm_array, data_timeslice, peak_idx)
peak_height_matx.append(peak_height)
peak_fwhm_matx.append(peak_fwhm)
# transfer to dataframe
peak_idx_df=pd.DataFrame(peak_idx_matx)
peak_height_df=pd.DataFrame(peak_height_matx)
peak_fwhm_df=pd.DataFrame(peak_fwhm_matx)
return peak_idx_df, peak_height_df, peak_fwhm_df
smooth_matx.values[:, 200]
peak_idx_df, peak_height_df, peak_fwhm_df = peak_matrix(datanm,smooth_matx, 0.00, 50)
peak_height_df
peak_fwhm_df
peak_idx_df
# # --------------------------------------------------------
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
from scipy.optimize import differential_evolution
from scipy.special import gamma
from sklearn.cluster import KMeans
def id_outliers_replacewith_interp(x_array, data, m, win_len):
reshape_x_array = []
reshape_data = []
quotient_array = np.empty(len(data))
remainder_array = np.empty(len(data))
quotient_array[0] = 0
remainder_array[0] = 0
#print divmod(len(data), win_len)
quotient_max = divmod(len(data), win_len)[0]
print (quotient_max)
#quotient_array_new = []
data_idx = np.arange(0, len(data), 1)
for i in range(1, len(data_idx)):
quotient = divmod(data_idx[i], win_len)[0]
quotient_array[i] = quotient
remainder = divmod(data_idx[i], win_len)[1]
remainder_array[i] = remainder
if quotient != quotient_array[i-1]:
newslice = data[i - win_len: i]
newslice_x = x_array[i - win_len: i]
#print newslice
reshape_data.append(newslice)
reshape_x_array.append(newslice_x)
else:
pass
quotient_max_idx = np.where(quotient_array == quotient_max)
#print quotient_max_idx
reshape_data.append(data[quotient_max_idx[0]])
reshape_x_array.append(x_array[quotient_max_idx[0]])
#print reshape_data
reshape_data_shape = np.shape(reshape_data)[0]
#print reshape_data_shape
def id_outliers_and_delete(d,x, m):
d_mean = np.mean(d)
d_stdev = np.std(d)
new_d = np.empty_like(d)
for i in range(len(d)):
d_pt = d[i]
if abs(d_pt - d_mean) > m * d_stdev and x[i] != x_array[0] and x[i] != x_array[len(x_array) - 1]:
new_d[i] = 1
else:
new_d[i] = 0
outlier_idx = np.nonzero(new_d)[0]
d_delete = np.delete(d, outlier_idx)
x_delete = np.delete(x, outlier_idx)
#print data2[outlier_idx]
return x_delete, d_delete
new_x_array = []
new_data = []
for i in range(reshape_data_shape):
new_data.append(id_outliers_and_delete(reshape_data[i],reshape_x_array[i], 1)[1])#(id_outliers_replacewith_mean(reshape_data[i], m))
new_x_array.append(id_outliers_and_delete(reshape_data[i],reshape_x_array[i],1)[0])
new_data_flat = np.concatenate(new_data[:-1]).ravel().tolist()#.flatten()
new_x_array_flat = np.concatenate(new_x_array[:-1]).ravel().tolist()#.flatten()
new_data_final = np.concatenate((new_data_flat, new_data[reshape_data_shape - 1]))
new_x_array_final = np.concatenate((new_x_array_flat, new_x_array[reshape_data_shape - 1]))
new_data_final_interp = np.interp(x_array, new_x_array_final, new_data_final)
return new_data_final_interp
# +
peak_pos = np.array(peak_idx_df.iloc[:, 0])
outlier_interp = id_outliers_replacewith_interp(datatime, peak_pos, 1, 25)
plt.figure(dpi = 300)
plt.plot(datatime, peak_pos, 'o', markersize = 3, label = 'before')
plt.plot(datatime, outlier_interp, 'o', markersize = 3, label = 'after')
plt.plot(datatime, peak_pos_true, linewidth = 4, label = 'true peak position')
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.ylabel('Peak index', fontsize = 20, fontweight = 'bold')
plt.xlabel('Time slices', fontsize = 20, fontweight = 'bold')
plt.legend()
# -
peak_pos_true = np.array(true_df.iloc[:, 0])
def peak_pos_eval(original, result):
error = np.abs(original-result) / original
return error
# +
peak1_eval = peak_pos_eval(np.array(peak_pos_true), np.array(peak_pos))
corrected_peak1_eval = peak_pos_eval(np.array(peak_pos_true), np.array(outlier_interp))
plt.figure()
plt.xlabel('Time-slice', fontsize = 20, fontweight = 'bold')
plt.ylabel('Relative error', fontsize = 20, fontweight = 'bold')
plt.xticks(fontsize = 18, fontweight = 'bold')
plt.yticks(fontsize = 18, fontweight = 'bold')
plt.plot(datatime, peak1_eval, 'o', markersize = 3, label = 'before')
plt.plot(datatime, corrected_peak1_eval, 'o', markersize = 3, label = 'after')
plt.legend()
# +
peak_pos_2 = np.array(peak_idx_df.iloc[:, 1])
outlier_interp_2 = id_outliers_replacewith_interp(datatime, peak_pos_2, 1, 50)
plt.figure
plt.plot(datatime, peak_pos_2, '-o', markersize = 2, label = 'output from peak-finding')
plt.plot(datatime, outlier_interp_2, '-o', markersize = 2, label = 'removing outlier by method 2')
#plt.plot(datatime, peak_pos_true, '-o', markersize = 2, label = 'true peak position')
plt.legend()
# -
peak_idx_df.iloc[:, 0] = outlier_interp
peak_idx_df.iloc[:, 1] = outlier_interp_2
first_comp = np.array([[peak_idx_df.loc[i][0], peak_height_df.loc[i][0], peak_fwhm_df.loc[i][0], i] for i in range(500)])
sec_comp = np.append(first_comp, [[peak_idx_df.loc[i][1], peak_height_df.loc[i][1], peak_fwhm_df.loc[i][1], i] for i in range(500)], axis=0)
all_points = np.append(sec_comp, [[peak_idx_df.loc[i][2], peak_height_df.loc[i][2], peak_fwhm_df.loc[i][2], i] for i in range(500)], axis=0)
np.shape(all_points)
all_points_df = pd.DataFrame(all_points, columns=['Position', 'Height', 'Width', 'Time'])
all_points_df.describe()
all_points_df
corrected_output = all_points_df.fillna(value=0)
all_points_df.describe()
corrected_output.describe()
corrected_output.iloc[:,:-1]
cluster = KMeans(n_clusters=3).fit(corrected_output.iloc[:,:-1])
cluster.labels_
# +
fignum = 1
fig = plt.figure(fignum, figsize=(7, 6))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=50, azim=123)
cluster
labels = cluster.labels_
ax.scatter(all_points[:, 0], all_points[:, 1], all_points[:, 2],
c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Position', fontsize = 20, fontweight = 'bold')
ax.set_ylabel('Height', fontsize = 20, fontweight = 'bold')
ax.set_zlabel('Width', fontsize = 20, fontweight = 'bold')
ax.dist = 12
# +
peak1_list = []
peak2_list = []
peak3_list = []
for i in range(1500):
peak = cluster.predict([corrected_output.iloc[i,:-1]])
signal = corrected_output.iloc[i][1]
if ( peak == 0 and (signal >= 0.001 or signal <= -0.001)):
peak1_list.append(corrected_output.iloc[i])
elif ( peak == 1 and (signal >= 0.001 or signal <= -0.001)):
peak2_list.append(corrected_output.iloc[i])
elif ( peak == 2 and (signal >= 0.001 or signal <= -0.001)):
peak3_list.append(corrected_output.iloc[i])
else:
pass
# +
peak1_unfilt = pd.DataFrame(peak1_list, columns=['Position', 'Height', 'Width', 'Time'])
peak1 = peak1_unfilt.drop_duplicates(subset='Time')
peak2_unfilt = pd.DataFrame(peak2_list, columns=['Position', 'Height', 'Width', 'Time'])
peak2 = peak2_unfilt.drop_duplicates(subset='Time')
peak3_unfilt = pd.DataFrame(peak3_list, columns=['Position', 'Height', 'Width', 'Time'])
peak3 = peak3_unfilt.drop_duplicates(subset='Time')
# -
peak1.describe()
peak2.describe()
peak3.describe()
plt.figure(dpi = 300)
plt.plot(peak1['Time'], peak1['Height'], 'o', markersize = 3, label = 'Peak 1')
#plt.plot(peak2['Time'], peak2['Height'], 'o', markersize = 2, label = 'Peak 2')
plt.plot(peak3['Time'], peak3['Height'], 'o', markersize = 3, label = 'Peak 2')
#plt.title('Kinetics of Identified Peaks')
plt.xlabel('Time', fontsize = 20, fontweight = 'bold')
plt.ylabel('Intensity (a.u.)', fontsize = 20, fontweight = 'bold')
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend()
plt.show()
plt.figure(dpi = 300)
plt.plot(peak1['Time'], peak1['Position'], 'o', markersize = 3, label = 'Peak 1')
#plt.plot(peak2['Time'], peak2['Position'], 'o', markersize = 2, label = 'Peak 2')
plt.plot(peak3['Time'], peak3['Position'], 'o', markersize = 3, label = 'Peak 2')
#plt.title('Shift of Identified Peaks')
plt.xlabel('Time', fontsize = 20, fontweight = 'bold')
plt.ylabel('Position', fontsize = 20, fontweight = 'bold')
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend()
plt.show()
# +
def singleexpfunc(t, params):
exp_array = params[0] *np.exp((-1.0/params[1]) * t)
return exp_array
def fit_single_exp_diffev(t, data, bounds):
time_array = t
data_array = data
def fit(params):
decaymodel = singleexpfunc(time_array, params[:])
cost = np.sum(((data_array - decaymodel) ** 2.0))
return cost
bestfit = differential_evolution(fit, bounds = bounds, polish = True)
bestfit_params = bestfit.x
def bestfit_decay(params):
decaymodel = singleexpfunc(time_array, params[:])
return decaymodel
bestfit_model = bestfit_decay(bestfit_params)
ss_res = np.sum((data_array - bestfit_model) ** 2.0)
ss_tot = np.sum((data_array - np.mean(data_array)) ** 2.0)
rsquare = 1 - (ss_res / ss_tot)
#print '--Single exponential best fit parameters--'
print ('a = %.5f \ntau = %.5f ps \nR-square = %.5f' %(bestfit_params[0], bestfit_params[1], rsquare))
plt.figure(dpi=300)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.ylabel('Intensity', fontsize = 20, fontweight = 'bold')
plt.xlabel('Time (ps)', fontsize = 20, fontweight = 'bold')
plt.plot(time_array[:-1], data_array[:-1], 'o', color = 'b', label = 'Data')
plt.plot(time_array[:-1], bestfit_model[:-1], color = 'r', linewidth = 4, label = 'Monoexponential')
plt.text(200, 0.15, 'tau = 200 ps', fontsize = 20)
plt.legend(loc = 'best')
plt.figure()
#plt.xlim(0, 200)
plt.ylabel('Intensity')
plt.xlabel('Time (ps)')
plt.xscale('log')
plt.plot(time_array, data_array, 'o', color = 'b', label = 'Data')
plt.plot(time_array, bestfit_model, color = 'r', label = 'single exp fit')
plt.legend(loc = 'best')
return bestfit_params, bestfit_model, data_array, time_array
# +
"""load TA data"""
#experiment name
experiment = ''
times, decaytrace = peak1['Time'], peak1['Height']
"""exponential decay parameters"""
a1_bounds = (0, 2)
tau1_bounds = (0, 1000)
beta1_bounds = (0,1)
sing_expdec_bounds = [a1_bounds, tau1_bounds]
exp_stret_bounds = [a1_bounds, tau1_bounds, beta1_bounds]
"""fit data"""
fit_data_sing_expdec = fit_single_exp_diffev(times, decaytrace, sing_expdec_bounds)
#fit_data_exp_stretch = fit_exp_stretch_diffev(times, decaytrace, exp_stret_bounds)
# +
"""load TA data"""
#experiment name
experiment = ''
times, decaytrace = peak1['Time'], peak1['Height']
"""exponential decay parameters"""
a1_bounds = (0, 2)
tau1_bounds = (0, 10000)
beta1_bounds = (0,1)
sing_expdec_bounds = [a1_bounds, tau1_bounds]
exp_stret_bounds = [a1_bounds, tau1_bounds, beta1_bounds]
"""fit data"""
fit_data_sing_expdec = fit_single_exp_diffev(times, decaytrace, sing_expdec_bounds)
#fit_data_exp_stretch = fit_exp_stretch_diffev(times, decaytrace, exp_stret_bounds)
# -
|
code_dev/Peak-Info/Get_Peak_and_Classify.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pruebas de bondad y ajuste
# # Introducción
#
# En las sesiones previas, hemos considerado diferentes familias de modelos que se puede usar como ingrediente clave para construir un simulador Monte Carlo. Aquí consideramos el lado cuantitativo de la moneda: después de *seleccionar una estructura de modelo*, **¿cómo debemos estimar sus parámetros?** Por lo tanto, entramos en el dominio de estadística inferencial, mediante la cual, dada una muestra de datos observados, participamos en tareas cada vez más difíciles:
# - Encontrar estimaciones de puntos e intervalos de momentos básicos como el valor esperado y varianza *(Pruebas de hipótesis e intervalos de confianza)*.
# - Estimar los parámetros de una distribución de probabilidad posiblemente complicada
# - Estimar los parámetros de un modelo de serie temporal
#
# ## Estadística inferencial básica
#
# ### 1. Intervalos de confianza
#
# La mayoría de nosotros conoce las estadística inferencial mediante el cálculo de un intervalo de confianza para un valor esperado $\mu$. Este concepto es relevante en ambos extremos de una simulación Monte Carlo, ya que solía definir las variables aleatorias de entrada, así como analizar la salida. Dada una muestra $X_i, i = 1,. . . , n$ de i.i.d. variables aleatorias (independientes e idénticamente distribuidas), el simulacro es el siguiente:
# 1. Calcular la media y varianza de la muestra
# $$ \bar X ={1\over n}\sum_{i=1}^nX_i,\qquad S^2={1\over n-1}\sum_{i=1}^n (X_i-\bar X )^2$$
# 2. Escoger un nivel de confianza $(1-\alpha)$ y tomar el correspondiente cuantil $t_{n-1,1-\alpha/2}$ de la distribución $t$ con $n-1$ grados de libertad.
# 3. Calcular el intervalo de confianza
# $$\bar X \pm t_{n-1,1-\alpha/2}{S\over\sqrt{n}}$$
# Este procedimiento es tan fácil de realizar que uno tiende a olvidar que se basa en algunas suposiciones importantes. Las siguientes observaciones están en orden:
# - Estrictamente hablando, el procedimiento anterior es correcto solo para variables aleatorias normales. De hecho, si las variables $X_i \sim N (\mu, \sigma^2)$ son independientes, entonces es cierto que la siguiente estadística estandarizada es normal:
# $$Z={\bar X -\mu \over \sigma/\sqrt{n}}\sim N\bigg(\mu,{\sigma^2\over n}\bigg)$$
#
# Si nosotros reemplazamos $\sigma$ por su contraparte $S$, encontramos una distribución *$t$ student*
# $$T={\bar X -\mu \over S/\sqrt{n}}\sim t_{n-1}, \qquad \qquad (1) $$
# El cual implica
# $$P\{-t_{n-1,1-\alpha/2}\leq T \leq t_{n-1,1-\alpha/2}\} = 1-\alpha$$
#
# Al reordenar esta relación obtenemos el intervalo de confianza dado anteriormente. Una gran parte de las estadísticas inferenciales se basa en resultados de distribución similares. Si aplicamos el procedimiento a una **distribución diferente**, lo que encontramos es, en el mejor de los casos, una buena aproximación para una muestra **adecuadamente grande**; con una muestra pequeña y una distribución sesgada, deberíamos repetir el ejercicio para las características específicas de esa distribución.
#
# - También es muy importante enfatizar el rol de la independencia. Es la independencia en la muestra lo que nos permite escribir
# $$Var(\bar X)={\sigma^2 \over n}$$
#
# - Al analizar el resultado de una simulación Monte Carlo, el tamaño de la muestra suele ser bastante grande. Por lo tanto, generalmente reemplazamos los cuantiles de la distribución $t$ con los cuantiles $z_{1-\alpha/2}$ de la distribución normal estándar. Sin embargo, esto no necesita aplicarse al análisis de datos de entrada.
import numpy as np
import scipy.stats as st # Librería estadística
import matplotlib.pyplot as plt
# Gráfica t-student
# # %matplotlib inline
dat = np.arange(-4,4,.1)
# for i in range(1):
y = st.t.pdf(dat,df=5-1)
y1=st.norm.pdf(dat)
plt.plot(dat,y,label='t-student 5-1 df')
plt.plot(dat,y1,label='Normal')
plt.legend()
plt.grid()
plt.show()
# # Intervalos de confianza en python
#
# Utilizando la librería estadística `scipy.stats` y utilizando la función `t.intervar(confianza, len(a)-1, loc=np.mean(a), scale=st.sem(a))`, se puede calcula un intervalo de confianza con un nivel de confianza definido en confianza, para un vector de entrada `a`. Se puede utilizar intervalos con los cuantiles de la normal estándar usando `st.norm.interval(confianza, loc=np.mean(a), scale=st.sem(a))`
np.random.seed(55555)
media = 10; sd = 20; N = 100
confianza = 0.95
X = np.random.normal(media,sd,N)
st.sem
i1 = st.t.interval(confianza,len(X)-1, loc=np.mean(X), scale=st.sem(X))
i2 = st.norm.interval(confianza, loc=np.mean(X), scale=st.sem(X))
print('Con una confianza de %2.2f la media estará en el intervalo t,\n %s' %(confianza,i1))
print('Con una confianza de %2.2f la media estará en el intervalo normal,\n %s' %(confianza,i2))
print('Media calculada' ,np.mean(X))
# ## 2. Pruebas de hipótesis
#
# La prueba de hipótesis básica que uno puede desear ejecutar se refiere al valor esperado:
# - Probamos la hipótesis nula Ho: $\mu = \mu_0$, para un $\mu_0$ dado,
# - contra la hipótesis alternativa Ha: $\mu\neq \mu_0$
#
# En el caso normal, confiamos en el resultado de distribución de Eq. (1), donde el el valor esperado desconocido $\mu$ se reemplaza por el valor hipotético $\mu_0$. Esto muestra que, si la hipótesis nula es verdadera, entonces
#
# $$P\bigg(-t_{n-1,1-\alpha/2}\leq {\bar X-\mu_0\over S/\sqrt n} \leq t_{n-1,1-\alpha/2}\bigg) = 1-\alpha$$
#
# En otras palabras, todo se reduce a analizar el estadístico de prueba estandarizado
# $$T={\bar X-\mu_0\over S/\sqrt n}$$
#
# si la hipótesis nula es verdadera, tiene una distribución de t Student con n - 1 grados de libertad, y debe estar dentro de los límites correspondientes a los cuantiles. Si T cae fuera de ese intervalo, hay dos posibles explicaciones: puede ser simplemente mala suerte, o tal vez la hipótesis nula es incorrecta. No podemos estar seguros de ninguno de los dos, y podemos cometer dos tipos de error: **podemos rechazar una hipótesis verdadera, o podemos aceptar una falsa**. El enfoque elemental es conservador y mantiene la probabilidad de rechazar una verdadera hipótesis nula bajo control. Por lo tanto, formamos una región de rechazo que consta de dos colas
#
# $$RJ = \{t:t<-t_{n-1,1-\alpha/2}\}U\{t:t>t_{n-1,1-\alpha/2}\}$$
#
# Se rechaza la hipótesis nula si el estadístico $T\in RJ$. Acá $\alpha$ juega un papel de el nivel de significancia o mejor dicho, la probabilidad de rechazar la hipótesis nula si esta es cierta. El valor típico para este nivel de significancia es tomarlo en 5%.
#
# $$p-value= P(T)=P\bigg({\bar X -\mu_0 \over S/\sqrt n}\bigg)\sim t_{n-1}$$
#
# La prueba mide si el puntaje promedio difiere significativamente entre las muestras. Si observamos un **valor de p grande**, por ejemplo mayor que 0.05 o 0.1, entonces **no podemos rechazar la hipótesis nula**. Si el valor p es menor que el umbral, p. 1%, 5% o 10%, luego rechazamos la hipótesis nula. Los pequeños valores p se asocian con grandes t-estadísticas.
# +
# Hipótesis nula
media2 = 20
# Realizamos la prueba de hipotesis para H0= mu=media2
ho = st.ttest_1samp(X,media2)
print('La prueba de hipótesis arroja como resultado\n',ho)
# Calculamos el estadístico normalizado
t = (np.mean(X)-media2)/(np.std(X)/np.sqrt(N))
print('Cálculo del estadístico de prueba teórico=',t)
# Gráfica t-student
# # %matplotlib inline
dat = np.arange(-4,4,.1)
# for i in range(1):
y = st.t.pdf(dat,df=N-1)
y1=st.norm.pdf(dat)
plt.plot(dat,y,label='t-student %d df' %(N-1))
plt.plot(dat,y1,label='Normal')
plt.legend()
plt.show()
# -
# > ### Mostrar el efecto de la cantidad de muestras en las pruebas de hipótesis
# # Pruebas de bondad y ajuste
#
#
# 
# 
# # Uso
# - La prueba de chi-cuadrada nos permite probar si más de dos proporciones de población pueden ser consideradas iguales
# - Además, si calificamos una población en diferentes categorías respecto a dos atributos (ej, edad y desempeño laboral), entonces podemos utilizarla para determinar si los dos atributos son independientes entre sí
#
# # chi-cuadrada como prueba de independencia
# Suponga que en 4 regiones de una compañía de salud muestra la actitud de los empleados respecto a la evaluación del desempeño en el trabajo. Los trabajadores eligen el método actual (2 por año) y el nuevo método (1 Trimestral)
# 
# - La tabla anterior ilustra la respuesta a esta pregunta
# - Las 4 columnas proporcionan una base de clasificación – regiones gráficas
# - Los 2 renglones clasifican la información de otra manera: preferencia por los métodos de evaluación
# - Tabla de contingencia de 2X4
# 
# 
# # Determinación de frecuencias esperadas
#
# - Si el valor 0.6643 estima la proporción de población esperada que prefiere el método presente de evaluación, entonces 0.3357 es la estimación de la proporción esperada de la población que prefiere el nuevo método de evaluación, se puede estimar el número de empleados de la muestra de cada región que podríamos esperar que prefieran cada uno de los métodos de evaluación
# 
# # Chi-cuadrada Razonamiento intuitivo
#
# - En las tablas anteriores se ilustra las proporciones y frecuencias real y la teórica
# - Para probar Ho se deben comparar las frecuencias que se observaron con las frecuencias que se esperaría si Ho=V
# - Si los conjuntos de frecuencias observadas y esperadas son casi iguales, se puede razonar intuitivamente que la Ho=v
# - Si existe diferencias grandes entre estas frecuencias, podemos rechazar la Ho
#
# 
# 
# 
# # Interpretación del cálculo
# $$ \chi^2 = \sum \frac{(f_0-f_e)^2}{f_e}=2.764$$
#
# - Esta es el valor de chi-cuadrada en el problema de comparación de las preferencias de métodos de evaluación. Si este fuera muy grande, digamos 20, indicaría una diferencia sustancial entre valores reales y esperados.
# - Una chi-cuadra=0 significa observancias = valores reales
# - chi-cuadrada nunca puede ser negativa
#
df = [2,4,6,10]
x = np.linspace(0,20,100)
y = np.matrix(list(map(lambda df:st.chi2.pdf(x,df),df)))
plt.plot(y.T)
plt.legend(df)
plt.title('Función chi cuadrado')
plt.show()
# # Descripción de distribución chi-cuadrada
#
# - Si Ho=V, entonces la distribución de chi-cuadrada puede aproximarse bastante bien a una curva continua conocida con el mismo nombre(DJC)
# - Existe una DJC por cada grado de libertad
# - Para un número pequeño de GDL, la DJC está seriamente sesgada a la derecha
# - Al aumentar GDL se hace simétrica
# - Si GDL es grande se asemeja a DNormal
#
# # Determinación de los GDL
#
# - GDL=(núm. Renglones – 1) (núm. Columnas - 1)
# - En el ejercicio anterior GDL=(r-1)(c-1)=(2-1)(4-1)=3
#
# - Si seleccionamos una nivel de significancia = 0.10
# - Se busca en columna 0.10 y 3GDL entonces
# Estadistico de chi=6.251, la región de aceptación de Ho va de la cola izquierada de la curva al valor chi-cuadrada
# - El valor calculado de chi-cuadrada de la muestra es de $\chi_{stat}^2=2.764$ y cae dentro de la región de aceptación. Por lo tanto se acepta la Ho.
# - P-value es la probabilidad de que un estadístico chi-cuadrado con GDL grados de libertad sea más extrema que $\chi_{stat}^2$, es decir
# $$P-value = P(\chi^2>\chi_{stat}^2)$$
# # Consideraciones con la utilización de datos recolectados
# - Si el valor de chi-cuadrada fuera cero, tendríamos que ser cuidadosos al preguntar si no existe absolutamente ninguna diferencia entre las frecuencias observadas y las esperadas
# - Es vital revisar si el sistema de muestreo fue el adecuado para validar o rechazar la Ho específica que se esta tratando
# # Chi cuadrado en python
# +
#### Método chi cuadrado
F_obse = [68,75,57,79,32,45,33,31]
F_espe = [66.43,79.72,59.79,73.07,33.57,40.28,30.21,36.93]
x2 = st.chisquare(F_obse,F_espe,ddof=3)
print('Valor de chi cuadrado = ',list(x2)[0],',p-value de la prueba=',list(x2)[1])
Ji = st.chi2.ppf(q = 0.9,df=3)
print('Estadístico de Ji = ',Ji)
# st.t.interval
# -
x1 = st.chi2.cdf(list(x2)[0],df=3)
x1
# ## Interpretación de resultados
#
# Si los resultados de la muestra son poco probables, dada la hipótesis nula, el investigador rechaza la hipótesis nula. Por lo general, esto implica comparar el valor P con el nivel de significancia y **rechazar la hipótesis nula cuando el valor P es menor que el nivel de significancia.**
# # Consideraciones de prueba chi cuadrado
#
# ## - Tamaños de muestras grandes
# Para evitar incurrir en inferencias incorrectas de la prueba de Ho de chi-cuadrada, si la regla general de que una frecuencia esperada de menos de 5 en una celda de una tabla de contingencia, se considera **demasiado pequeña para utilizarse**
#
# ## - Utilización de datos recolectados
# - Si el valor de chi-cuadrada fuera cero, tendríamos que ser cuidadosos al preguntar si no existe absolutamente ninguna diferencia entre las frecuencias observadas y las esperadas
# - Es vital revisar si el sistema de muestreo fue el adecuado para validar o rechazar la Ho específica que se esta tratando
#
# # Ejercicio
# Para ver si la venta de chips de silicio son independientes del punto del ciclo de negocios en que se encuentre la economía del país se han recogido las ventas semanales de una empresa y datos acerca de la economía del país, y se reportan los siguientes resultados:
# 
#
# Realice la prueba de chi-cuadrado para validar la independencia del punto del ciclo de negocios en que se encuentre la economía.
# > Ver en este enlace la forma de probar independencia: https://stattrek.com/chi-square-test/independence.aspx?Tutorial=AP
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
|
TEMA-2/Clase15_PruebasDeBondadYAjuste.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import nltk
import random
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
import numpy as np
import pandas as pd
import heapq
import numpy as np
from collections import Counter
from random import sample
# -
podcasts_df = pd.read_pickle('../pickle_files/english_podcasts_detailed_cleaned.pkl')
podcasts_df['text'] = podcasts_df[['title', 'producer', 'genre', 'description', 'episode_titles', 'episode_descriptions']].apply(lambda x: ' '.join(x), axis=1)
podcasts_df = podcasts_df.drop(columns=['genre', 'description', 'num_episodes', 'rating', 'num_reviews', 'link', 'episode_titles', 'episode_descriptions'])
podcasts_df['ID'] = list(range(podcasts_df.shape[0]))
# +
# create list of stop words
stop = get_stop_words('en')
# remove non-alphanumeric, non-space
stop = [re.sub(r'([^\s\w]|_)+', '', x) for x in stop]
# add in custom stop words
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
other = ['nan', 'podcast', 'podcasts', 'every', 'new', 'weekly',
'stories', 'story', 'episode', 'episodes', 'listen',
'host', 'hosted', 'join']
[stop.append(str(day)) for day in days]
[stop.append(str(month)) for month in months]
[stop.append(str(x)) for x in other]
def topKFrequent(tokenized_text, k):
count = Counter(tokenized_text)
return heapq.nlargest(k, count.keys(), key=count.get)
def remove_stop(text, stop):
custom_stop = stop
# top5 = topKFrequent(text, 5)
# custom_stop = custom_stop + top5
new_text = []
for word in text:
if word not in custom_stop:
new_text.append(word)
return new_text
# create tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create stemmer
p_stemmer = PorterStemmer()
l_stemmer = WordNetLemmatizer()
def stem_list(text, p_stemmer):
new_list = []
for word in text:
new_list.append(p_stemmer.stem(word))
return new_list
def lem_list(text, l_stemmer):
new_list = []
for word in text:
new_list.append(l_stemmer.lemmatize(word))
return new_list
def preprocess_text(text):
# remove mixed alphanumeric
text = re.sub(r"""(?x) # verbose regex
\b # Start of word
(?= # Look ahead to ensure that this word contains...
\w* # (after any number of alphanumeric characters)
\d # ...at least one digit.
) # End of lookahead
\w+ # Match the alphanumeric word
\s* # Match any following whitespace""",
"", text)
# remove urls (will check and remove http and www later)
text = re.sub(r'\s([\S]*.com[\S]*)\b', '', text)
text = re.sub(r'\s([\S]*.org[\S]*)\b', '', text)
text = re.sub(r'\s([\S]*.net[\S]*)\b', '', text)
text = re.sub(r'\s([\S]*.edu[\S]*)\b', '', text)
text = re.sub(r'\s([\S]*.gov[\S]*)\b', '', text)
# remove non-alphanumeric, non-space
text = re.sub(r'([^\s\w]|_)+', '', text)
# tokenize text
text = tokenizer.tokenize(text.lower())
# remove stop words
text = remove_stop(text, stop)
# stem
text = lem_list(text, l_stemmer)
# remove instances of http or www
new_text_list = []
for word in text:
if re.search(r'http', word):
continue
if re.search(r'www', word):
continue
new_text_list.append(word)
new_text = ' '.join(new_text_list)
return new_text
# -
podcasts_df['text'] = podcasts_df['text'].map(preprocess_text)
podcasts_df = podcasts_df[podcasts_df.text != '']
podcasts_df.head()
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from matplotlib import pyplot
# +
class MyTokenizer:
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = []
for document in X:
tokenized_doc = []
for sent in nltk.sent_tokenize(document):
tokenized_doc += nltk.word_tokenize(sent)
transformed_X.append(np.array(tokenized_doc))
return np.array(transformed_X)
def fit_transform(self, X, y=None):
return self.transform(X)
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
# if a text is empty we should return a vector of zeros
# with the same dimensionality as all the other vectors
self.dim = len(word2vec.wv.syn0[0])
def fit(self, X, y=None):
return self
def transform(self, X):
X = MyTokenizer().fit_transform(X)
return np.array([
np.mean([self.word2vec.wv[w] for w in words if w in self.word2vec.wv]
or [np.zeros(self.dim)], axis=0)
for words in X
])
def fit_transform(self, X, y=None):
return self.transform(X)
# -
text_list = list(podcasts_df.text)
tokenized_text = [tokenizer.tokenize(i) for i in text_list]
w2v_model = Word2Vec(tokenized_text, sg=1)
model_word_list = list(w2v_model.wv.vocab)
sample_word_list = sample(model_word_list, 15)
sample_word_list
# ### Building Visualization
# +
keys = sample_word_list
embedding_clusters = []
word_clusters = []
for word in keys:
embeddings = []
words = []
for similar_word, _ in w2v_model.most_similar(word, topn=30):
words.append(similar_word)
embeddings.append(w2v_model[similar_word])
embedding_clusters.append(embeddings)
word_clusters.append(words)
# +
from sklearn.manifold import TSNE
import numpy as np
embedding_clusters = np.array(embedding_clusters)
n, m, k = embedding_clusters.shape
tsne_model_en_2d = TSNE(perplexity=15, n_components=2, init='pca', n_iter=3500, random_state=32)
embeddings_en_2d = np.array(tsne_model_en_2d.fit_transform(embedding_clusters.reshape(n * m, k))).reshape(n, m, 2)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def tsne_plot_similar_words(title, labels, embedding_clusters, word_clusters, a, filename=None):
plt.figure(figsize=(16, 9))
colors = cm.rainbow(np.linspace(0, 1, len(labels)))
for label, embeddings, words, color in zip(labels, embedding_clusters, word_clusters, colors):
x = embeddings[:, 0]
y = embeddings[:, 1]
plt.scatter(x, y, c=color, alpha=a, label=label)
for i, word in enumerate(words):
plt.annotate(word, alpha=0.5, xy=(x[i], y[i]), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom', size=8)
plt.legend(loc=4)
plt.title(title)
plt.grid(True)
if filename:
plt.savefig(filename, format='png', dpi=150, bbox_inches='tight')
plt.show()
tsne_plot_similar_words('Similar Words from Podcasts', keys, embeddings_en_2d, word_clusters, 0.7,
'../images/similar_words.png')
# -
# fit a 2d PCA model to the vectors
X = w2v_model[w2v_model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.figure(figsize=(50, 50))
pyplot.scatter(result[:, 0], result[:, 1], alpha=1)
plt.savefig("hhh.png", format='png', dpi=150, bbox_inches='tight')
pyplot.show()
|
notebooks/w2v_visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kanjulakshmi/understanding-dataset/blob/master/KNN_algo_iris_dataset_anju.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="cAKN3yMM5m5E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="eec20175-0ccc-4934-c16a-61df395e00b7"
import numpy as np
import pandas as pd
# simple graph, bar chart, line , scattered plot, pie, histogram
import matplotlib.pyplot as plt
# complex graph and data distribution, violin, pairplt, heatmap etc
import seaborn as sns
# + id="5A39n6tG59r9" colab_type="code" colab={}
iris_data=pd.read_csv('https://raw.githubusercontent.com/kusumikakd/Datasets/master/Datasets/iris.csv')
# + id="p8_ZOW-M6WS7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="35ddca6e-a45e-4e01-e88c-95cfd7c250d7"
iris_data.describe()
# + id="6gkgqhXE6dhq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 152} outputId="23fa4da5-9f7b-4bc1-94c1-d5f0d7c6e95b"
plt.scatter(x-iris data['sepal_length' ], y-iris_data("sepal with), label ="sepal width', marker='x', c'red', s-49) plt.scatter(x-iris_data sepal_length), iris data[ petal_width"), label='petal width", marker', plt.scatter(x-iris_data[ sepal_length'], iris data['petal length' ), label 'petal_length', marker=' ' blue') plt.xlabel("sepal length")
plt. legend (loc-3)
# + id="0XiLBV7c8lWn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="303c7983-604e-40ae-97b9-9139be46761a"
plt.figure(figsize-(10,10))
sns.set_style darkgrid)
plt.scatter(x-iris data['sepal_length' ], y-iris_data("sepal with), label ="sepal width', marker='x', c'red', s-49) plt.scatter(x-iris_data sepal_length), iris data[ petal_width"), label='petal width", marker', plt.scatter(x-iris_data[ sepal_length'], iris data['petal length' ), label 'petal_length', marker=' ' blue') plt.xlabel("sepal length")
plt. legend (loc-3)
# + id="yVAs40Gh8zub" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="b7ec7a94-099e-4e39-e967-da5ee4d1048f"
plt.figure(figsize=(10,10))
sns.set_style ('darkgrid')
plt.scatter(x-iris data['sepal_length'], y=iris_data'sepal width'], label ='sepal width', marker='x', c='red', s=49)
plt.scatter(x-iris_data['sepal_length'],y=iris data['petal_width'], label='petal width', marker='^',c='green')
plt.scatter(x-iris_data[ 'sepal_length'], y=iris data['petal length'], label= 'petal_length', marker='o' c='blue')
plt.xlabel("sepal length")
plt. legend (loc=3)
# + id="Kyu1JWBM-64F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 737} outputId="6be16380-57be-413f-aa2e-e2166d3fd626"
plt.figure(figsize=(10,10))
sns.set_style ('darkgrid')
plt.scatter(x=iris_data['sepal_length'], y=iris_data['sepal width'], label ='sepal width', marker='x', c='red', s=49)
plt.scatter(x=iris_data['sepal_length'],y=iris_data['petal_width'], label='petal width', marker='^',c='green')
plt.scatter(x=iris_data[ 'sepal_length'], y=iris_data['petal_length'], label= 'petal_length', marker='o', c='blue')
plt.xlabel("sepal length")
plt. legend (loc=3)
# + id="9T67zJQv_CwL" colab_type="code" colab={}
|
KNN_algo_iris_dataset_anju.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bite Size Bayes
#
# Copyright 2020 <NAME>
#
# License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Review
#
# [The previous notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/10_joint.ipynb) I introduced cross tabulation, joint distributions, conditional disrtribution, and marginal distributions.
#
# In this notebook, we'll apply these ideas to Bayesian inference.
#
# But first I want to introduce a computational tool we will need, outer operations.
# ## Outer operations
#
# Suppose you have two sequences, like `t1` and `t2`:
t1 = [1,3,5]
t2 = [2,4]
# Many useful operations can be expressed in the form of an "outer operation" of these sequences.
#
# The most common outer operation is the outer product, which computes the product of every pair of values, one from each sequence.
#
# For example, here is the outer product of `t1` and `t2`:
a = np.multiply.outer(t1, t2)
a
# We can understand this result more easily if we put it in a DataFrame:
pd.DataFrame(a, index=t1, columns=t2)
# The values from `t1` appear along the rows of the result; the values from `t2` appear along the columns.
#
# Each element of the result is the product of an element from `t1` and an element from `t2`.
#
# The outer sum is similar, except that each element is the *sum* of an element from `t1` and an element from `t2`.
a = np.add.outer(t1, t2)
a
pd.DataFrame(a, index=t1, columns=t2)
# We can do the same thing with almost any operation. For example, the "outer greater than" operation compares each element from `t1` to each element of `t2`; the result is an array of Boolean values.
a = np.greater.outer(t1, t2)
a
pd.DataFrame(a, index=t1, columns=t2)
# These outer operations work with Python lists and tuples, and NumPy arrays, but not Pandas Series.
#
# However, the following function works with Pandas Series, and puts the result into a DataFrame.
def outer_product(s1, s2):
"""Compute the outer product of two Series.
First Series goes down the rows;
second goes across the columns.
s1: Series
s2: Series
return: DataFrame
"""
a = np.multiply.outer(s1.to_numpy(), s2.to_numpy())
return pd.DataFrame(a, index=s1.index, columns=s2.index)
# It might not be obvious yet why these operations are useful, but we'll see some examples soon.
#
# With that, we are ready to take on a new Bayesian problem.
# ## How tall is A?
#
# Suppose I choose two people from the population of adult males in the U.S.; I'll call them A and B. If we see that A taller than B, how tall is A?
#
# To answer this question:
#
# 1. I'll use background information about the height of men in the U.S. to form a prior distribution of height,
#
# 2. I'll construct a joint distribution of height for A and B, and update it with the information that A is taller, and
#
# 3. I'll extract from the posterior joint distribution the posterior distribution of height for A.
# In the U.S. the average height of male adults in 178 cm and the standard deviation is 7.7 cm. The distribution is not exactly normal, because nothing in the real world is, but the normal distribution is a pretty good model of the actual distribution, so we can use it as a prior distribution for A and B.
#
# Here's an array of equally-spaced values from roughly 3 standard deviations below the mean to 3 standard deviations above.
# +
mean = 178
std = 7.7
xs = np.arange(mean-24, mean+24, 0.5)
# -
# SciPy provides a function called `norm` that represents a normal distribution with a given mean and standard deviation, and provides `pdf`, which evaluates the probability distribution function (PDF), which we will use as the prior probabilities.
# +
from scipy.stats import norm
ps = norm(mean, std).pdf(xs)
# -
# I'll store the `xs` and `ps` in a Series that represents the prior PMF.
prior = pd.Series(ps, index=xs)
# And normalize it:
prior /= prior.sum()
# And here's what it looks like.
# +
prior.plot()
plt.xlabel('Height in cm')
plt.ylabel('Probability')
plt.title('Distribution of height for men in U.S.');
# -
# We can think of this prior distribution as the marginal distribution for A and B, but what we want is the joint probability of their heights.
# ## Joint distribution
#
# As we saw in the previous notebook, it is not *generally* possible to construct a joint distribution if we only have the marginals, because the marginals don't contain information about correlations between the variables.
#
# However, in the special case where there are no correlations, or they are small enough to ignore, it *is* possible to construct the joint distribution.
# To see how, let's consider one element of the joint distribution,
#
# $P(A_y~\mathrm{and}~B_x)$
#
# which is the probability that `A` is $y$ cm tall and `B` is $x$ cm tall. We can rewrite this conjuction in terms of conditional probability:
#
# $P(A_y)~P(B_x~|~A_y)$
#
# We can compute $P(A_y)$ from the marginal distribution, but how should we compute the conditional probability, $P(B_x~|~A_y)$?
# In this case, the heights of `A` and `B` are "independent", which means that knowing the height of `A` provides no additional information about the height of `B`.
#
# And that means that the conditional probability, $P(B_x~|~A_y)$, is just the marginal probability $P(B_y)$.
#
# Which means that in this case, the joint probability is just the product of the marginal probabilities.
#
# $P(A_y~\mathrm{and}~B_x) = P(A_y)~P(B_x)$
#
# Now, to compute the joint distribution, we have to compute this product for all values of $x$ and $y$. And we can do that by computing the outer product of the marginal distributions, like this:
joint = outer_product(prior, prior)
joint.shape
# If the prior is normalized, the joint prior should also be normalized.
joint.to_numpy().sum()
# The following function uses `pcolormesh` to plot the joint distribution.
#
# Recall that `outer_product` puts the values of `A` along the rows and the values of `B` across the columns.
def plot_joint(joint):
"""Plot a joint distribution.
joint: DataFrame representing a joint PMF
"""
plt.pcolormesh(joint.index, joint.index, joint)
plt.ylabel('A height in cm')
plt.xlabel('B height in cm')
#
# And here's what the result looks like.
plot_joint(joint)
plt.colorbar()
plt.title('Joint prior distribution of height for A and B');
# As you might expect, the probability is highest near the mean and drops off away from the mean.
#
# Another way to visualize the joint distribution is a contour plot.
def plot_contour(joint):
"""Plot a joint distribution.
joint: DataFrame representing a joint PMF
"""
plt.contour(joint.index, joint.index, joint)
plt.ylabel('A height in cm')
plt.xlabel('B height in cm')
plot_contour(joint)
plt.title('Joint prior distribution of height for A and B');
# Each circle represents a level of equal probability.
# ## Likelihood
#
# Now that we have a joint PMF that represents the prior distribution, we can update it with the data, which is that `A` is taller than `B`.
#
# Each element in the joint distribution represents a hypothesis about the heights of `A` and `B`; for example:
#
# * The element `(180, 170)` represents the hypothesis that `A` is 180 cm tall and `B` is 170 cm tall. Under this hypothesis, the probability that `A` is taller than `B` is 1.
#
# * The element `(170, 180)` represents the hypothesis that `A` is 170 cm tall and `B` is 180 cm tall. Under this hypothesis, the probability that `A` is taller than `B` is 0.
#
# To compute the likelihood of every pair of values, we can extract the values from the prior, like this:
Y = prior.index.to_numpy()
X = prior.index.to_numpy()
# And then apply the `outer` version of `np.greater`, which compares every element of `Y` (height of `A`) to every element of `X` (height of `B`).
a = np.greater.outer(Y, X)
# The result is an array, which we can put in a DataFrame with the corresponding `index` and `columns`.
likelihood = pd.DataFrame(a, index=Y, columns=X)
# Here's what it looks like:
plot_joint(likelihood)
plt.title('Likelihood of A>B');
# The likelihood of the data is 1 where `Y>X` and 0 otherwise.
# ## The update
#
# We have a prior, we have a likelihood, and we are ready for the update. As usual, the unnormalized posterior is the product of the prior and the likelihood.
unnorm_posterior = joint * likelihood
# And we can get the normalized posterior by dividing through by the total.
total = unnorm_posterior.to_numpy().sum()
joint_posterior = unnorm_posterior / total
total
# The total probability of the data is a little less than $1/2$.
#
# Here's what the normalized posterior looks like.
# +
plot_joint(joint_posterior)
plt.colorbar()
plt.title('Joint posterior distribution of height for A and B');
# -
# It looks like a sunrise as seen from the deck of a [heeling sailboat](https://en.wikipedia.org/wiki/Sailing#Heeling).
# ## The marginals
#
# From the posterior joint distribution we can extract the posterior marginal distribution of `A` and `B`.
def marginal(joint, axis):
"""Compute a marginal distribution.
axis=0 returns the marginal distribution of the second variable
axis=1 returns the marginal distribution of the first variable
joint: DataFrame representing a joint PMF
axis: int axis to sum along
returns: Series representing a marginal PMF
"""
return joint.sum(axis=axis)
marginal_A = marginal(joint_posterior, axis=1)
marginal_B = marginal(joint_posterior, axis=0)
# Here's what they look like.
# +
prior.plot(label='Prior')
marginal_A.plot(label='Posterior for A')
marginal_B.plot(label='Posterior for B')
plt.xlabel('Height in cm')
plt.ylabel('Probability')
plt.title('Prior and posterior distributions for A and B')
plt.legend();
# -
# As you might expect, the posterior distribution for `A` is shifted to the right and the posterior distribution for `B` is shifted to the left.
#
# We can summarize the results by computing the posterior means:
def pmf_mean(pmf):
"""Compute the mean of a PMF.
pmf: Series representing a PMF
return: float
"""
return np.sum(pmf.index * pmf)
pmf_mean(prior)
pmf_mean(marginal_A), pmf_mean(marginal_B)
# Based on the observation that `A` is taller than `B`, we are inclined to believe that `A` is a little taller than average, and `B` is a little shorter.
# Notice that the posterior distribution are a little taller and narrower than the prior. We can quantify that my computing their standard deviations.
def pmf_std(pmf):
"""Compute the standard deviation of a PMF.
pmf: Series representing a PMF
return: float
"""
deviation = pmf.index - pmf_mean(pmf)
var = np.sum(deviation**2 * pmf)
return np.sqrt(var)
pmf_std(prior), pmf_std(marginal_A)
# The standard deviation of the posterior distributions are a little smaller, which means we are a little more certain about the heights of `A` and `B` after we compare them.
# ## Conditional posteriors
#
# Now suppose we measure `B` and find that he is 185 cm tall. What does that tell us about `A`?
#
# We can answer that question by extracting the conditional posterior distribution for `A`, conditioned on `B=185`.
#
# Possible heights for `A` run down the rows of the joint PMF, so each row is an unnormalized posterior distribution conditioned on `A`.
#
# And possible heights for `B` run across the columns, so each column is an unnormalized posterior distribution conditioned on `B`.
#
# So we can condition on `B` by selecting a column and normalizing it.
cond_A = joint_posterior[185].copy()
cond_A /= cond_A.sum()
# +
prior.plot(label='Prior')
marginal_A.plot(label='Posterior for A')
cond_A.plot(label='Posterior for A given B=185', color='C4')
plt.xlabel('Height in cm')
plt.ylabel('Probability')
plt.title('Prior, posterior and conditional distribution for A')
plt.legend();
# -
# The posterior conditional distribution is cut off at 185 cm, because we have established that `A` is taller than `B` and `B` is 185 cm.
#
# And the posterior conditional is substantially different from the unconditional posterior; that is, for each value of $y$
#
# $P(A_y | B_x) \ne P(A_y)$
#
# which means that in the posterior distribution, `A` and `B` are not independent.
# ## Elo rating
#
# [The Elo rating system](https://en.wikipedia.org/wiki/Elo_rating_system) is a way to quantify the skill level of players for games like chess.
#
# It is based on a model of the relationship between the ratings of players and the outcome of a game. Specifically, if $R_A$ is the rating of player $A$ and $R_B$ is the rating of player $B$, the probability that $A$ beats $B$ is given by the [logistic function](https://en.wikipedia.org/wiki/Logistic_function):
#
# $P(A~\mathrm{wins}) = 1 / (1 + 10^{(R_B-R_A)/400})$
#
# The parameters $10$ and $400$ are arbitrary choices that determine the range of the ratings. In chess, values range from 100 to 2800.
#
# Notice that the probability of winning depends only on the difference in rankings. As an example, if $R_A$ exceeds $R_B$ by 100 points, the probability that $A$ wins is
1 / (1 + 10**(-100/400))
# **Exercise:** Suppose `A` has a current rating of 1600, but we are not sure it is accurate. We could describe their true rating with a normal distribution with mean 1600 and standard deviation 100, to indicate our uncertainty.
#
# And suppose `B` has a current rating of 1800, with the same level of uncertaintly.
#
# Finally, `A` and `B` play and `A` wins. How should we update their ratings?
#
# To answer this question:
#
# 1. Construct prior distributions for `A` and `B`.
#
# 2. Use them to construct a joint distribution, assuming that the prior distributions are independent.
#
# 3. Use the logistic function above to compute the likelihood of the outcome under each joint hypothesis. Hint: use `np.subtract.outer`.
#
# 4. Use the joint prior and likelihood to compute the joint posterior.
#
# 5. Extract and plot the marginal posteriors for `A` and `B`.
#
# 6. Compute the posterior means for `A` and `B`. How much should their ratings change based on this outcome?
# +
# Solution
xs = np.arange(1300, 2100, 10)
ps = norm(1600, 100).pdf(xs)
prior_A = pd.Series(ps, index=xs)
prior_A /= prior_A.sum()
ps = norm(1800, 100).pdf(xs)
prior_B = pd.Series(ps, index=xs)
prior_B /= prior_B.sum()
# +
# Solution
prior_A.plot(label='Prior for A')
prior_B.plot(label='Prior for B')
plt.xlabel('Elo rating')
plt.ylabel('Probability')
plt.title('Prior distributions for A and B')
plt.legend();
# +
# Solution
joint = outer_product(prior_A, prior_B)
joint.shape
# +
# Solution
plt.pcolormesh(joint.index, joint.index, joint)
plt.ylabel('A rating')
plt.xlabel('B rating');
# +
# Solution
diff = np.subtract.outer(prior_A.index, prior_B.index)
# +
# Solution
likelihood = 1 / (1 + 10**(-diff/400))
plt.pcolormesh(joint.index, joint.index, likelihood)
plt.ylabel('A rating')
plt.xlabel('B rating');
# +
# Solution
joint_posterior = joint * likelihood
joint_posterior /= joint_posterior.to_numpy().sum()
# +
# Solution
plt.pcolormesh(joint.index, joint.index, joint_posterior)
plt.ylabel('A rating')
plt.xlabel('B rating');
# +
# Solution
marginal_A = marginal(joint_posterior, axis=1)
marginal_B = marginal(joint_posterior, axis=0)
# +
# Solution
marginal_A.plot(label='Posterior for A')
marginal_B.plot(label='Posterior for B')
plt.xlabel('Elo rating')
plt.ylabel('Probability')
plt.title('Posterior distributions for A and B')
plt.legend();
# +
# Solution
pmf_mean(marginal_A), pmf_mean(marginal_B)
# +
# Solution
pmf_std(prior_A), pmf_std(marginal_A)
# -
# ## Summary
#
# In this notebook I started with the "outer" operations, like outer product and outer sum; then we used them to construct a joint distribution.
#
# In general, you cannot construct a joint distrubution from two marginal distributions, but in the special case where the distributions are independent, you can.
#
# We extended the Bayesian update process we've seen in previous notebook and applied it to a joint distribution. Then from the posterior joint distribution we extracted posterior marginal distributions and posterior conditional distributions.
#
# As an exercise, you had a chance to apply the same process to a slightly more difficult problem, updating Elo ratings based on the outcome of a chess game.
#
# [In the next notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/12_binomial.ipynb) we'll get back to a problem we left half-finished: the Euro problem.
|
11_faceoff_soln.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ----
# <img src="../../../files/refinitiv.png" width="20%" style="vertical-align: top;">
#
# # Data Library for Python
#
# ----
# + [markdown] tags=[]
# ## Content layer - Estimates - Actuals KPI
# This notebook demonstrates how to retrieve Estimates.
#
# I/B/E/S (Institutional Brokers' Estimate System) delivers a complete suite of Estimates content with a global view and is the largest contributor base in the industry. RDP I/B/E/S Estimates API provides information about consensus and aggregates data(26 generic measures, 23 KPI measures), company guidance data and advanced analytics. With over 40 years of collection experience and extensive quality controls that include thousands of automated error checks and stringent manual analysis, RDP I/B/E/S gives the clients the content they need for superior insight, research and investment decision making.
#
# The I/B/E/S database currently covers over 56,000 companies in 100 markets.
# More than 900 firms contribute data to I/B/E/S, from the largest global houses to regional and local brokers, with US data back to 1976 and international data back to 1987.
# + [markdown] tags=[]
# #### Learn more
#
# To learn more about the Refinitiv Data Library for Python please join the Refinitiv Developer Community. By [registering](https://developers.refinitiv.com/iam/register) and [logging](https://developers.refinitiv.com/content/devportal/en_us/initCookie.html) into the Refinitiv Developer Community portal you will have free access to a number of learning materials like
# [Quick Start guides](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/quick-start),
# [Tutorials](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/learning),
# [Documentation](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/docs)
# and much more.
#
# #### Getting Help and Support
#
# If you have any questions regarding using the API, please post them on
# the [Refinitiv Data Q&A Forum](https://community.developers.refinitiv.com/spaces/321/index.html).
# The Refinitiv Developer Community will be happy to help.
#
# ----
# + [markdown] tags=[]
# ## Set the configuration file location
# For a better ease of use, you have the option to set initialization parameters of the Refinitiv Data Library in the _refinitiv-data.config.json_ configuration file. This file must be located beside your notebook, in your user folder or in a folder defined by the _RD_LIB_CONFIG_PATH_ environment variable. The _RD_LIB_CONFIG_PATH_ environment variable is the option used by this series of examples. The following code sets this environment variable.
# -
import os
os.environ["RD_LIB_CONFIG_PATH"] = "../../../Configuration"
# <div style="color:white;background-color:crimson;">
# <strong>Important note: </strong>
# <br>
# Data retrieved in this example requires a specific Refinitiv Data Platform (RDP) commercial license or a RDP Data Exploration license. If you do not have this license, please contact your account representative.
# </div>
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Some Imports to start with
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
import refinitiv.data as rd
from refinitiv.data.content import estimates
# -
# ## Open the data session
#
# The open_session() function creates and open sessions based on the information contained in the refinitiv-data.config.json configuration file. Please edit this file to set the session type and other parameters required for the session you want to open.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
rd.open_session("platform.rdp")
# -
# ## Retrieve Data
# ### Actuals KPI - Annual
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
response = estimates.view_actuals_kpi.annual.Definition("BNPP.PA").get_data()
response.data.df
# -
# ### Actuals KPI - Interim
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
response = estimates.view_actuals_kpi.interim.Definition("BNPP.PA").get_data()
response.data.df
# -
# ### Close the session
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
rd.close_session()
# -
|
Examples/2-Content/2.11-Estimates/EX-2.11.04-Estimates-ActualsKPI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Interface
# For the C++ interface, see [here](http://www.open3d.org/docs/release/cpp_api.html).
#
# ## Install Open3D Python package
# For installing Open3D Python package, see [here](../../getting_started.rst).
#
# ## Install Open3D from source
# For installing from source, see [here](../../compilation.rst).
# ## Getting started
# This tutorial shows how to import the `open3d` module and use it to load and inspect a point cloud.
import open3d as o3d
# <div class="alert alert-info">
#
# **Note:**
#
# Depending on the environment, the name of the Python library may not be `open3d.so`. Regardless of the file name, `import open3d` should work.
#
# </div>
sample_pcd_data = o3d.data.SamplePointCloudPCD()
pcd = o3d.io.read_point_cloud(sample_pcd_data.path)
print(pcd)
# This imports the `read_point_cloud` function from the `open3d` module. It reads a point cloud file and returns an instance of the `PointCloud` class. `print(pcd)` prints some brief information about the point cloud.
# ## Using built-in help function
#
# ### Browse Open3D
# `help(open3d)` prints a description of the `open3d` module.
help(o3d)
# ### Description of a class in Open3D
# `help(open3d.PointCloud)` provides a description of the `PointCloud` class.
help(o3d.geometry.PointCloud)
# ### Description of a function in Open3D
# `help(open3d.read_point_cloud)` provides a description of the input arguments and return type of the `read_point_cloud` function.
help(o3d.io.read_point_cloud)
|
docs/jupyter/geometry/python_interface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 图像处理方法(Hough 直线识别)
# +
# -*- coding: UTF-8 -*-
import cv2, math
import numpy as np
DIR = 'samples\\'
ANGLE_RANGE = (-math.pi/6, math.pi/6)
# 图象预处理
img = cv2.imread( DIR + 'pic (1).jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
edges = cv2.Canny(blur,80,150,apertureSize = 3)
canny = np.uint8(np.absolute(edges))
# 直线识别
lines = cv2.HoughLinesP(edges,1,np.pi/180,threshold = 100, minLineLength = 10,maxLineGap = 100)
for line in lines:
for x1,y1,x2,y2 in line:
# 尝试忽略横向直线
if abs(x1 - x2) != 0:
slope = (y1 - y2) / (x1 - x2)
if slope > math.tan(ANGLE_RANGE[0]) and slope < math.tan(ANGLE_RANGE[1]):
continue
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
height, width, channels = img.shape
if height > 600:
ratio = 600 / height
img = cv2.resize(img, (0,0), fx=ratio, fy=ratio)
cv2.imshow('houghlines5.jpg',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# ### 图片高度等比缩小至指定值 默认为600pixel
# +
# -*- coding: UTF-8 -*-
import cv2
from os import listdir
from os.path import isfile, join
mypath = r'Images\004(crop)'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
img = cv2.imread(mypath + '\\' + file)
img = cv2.resize(img, (450, 600))
cv2.imwrite(mypath + '\\' + file.split('.')[0] + '.jpg', img)
# -
# ### BBox-Label -> YOLO dataset format
# +
# -*- coding: UTF-8 -*-
import re
from os import listdir
from os.path import isfile, join, exists
# CHANGE TO YOUR OWN DIRECTORY
label_src = r'Labels\001'
image_src = r'Images\001'
# CHANGE TO UNIX IF YOU'RE USING MAC OR LINUX
file_path_format = 'WIN' # possible values('UNIX','WIN')
if file_path_format == 'WIN':
slash = '\\'
elif file_path_format == 'UNIX':
slash = '/'
else:
raise ValueError('Invalid value for file_path_format')
dst = label_src + '(YOLO).txt'
fdst = open(dst, 'w')
onlyfiles = [f for f in listdir(label_src) if isfile(join(label_src, f))]
for file in onlyfiles:
fsrc = open(label_src + '\\' + file)
outputString = ''
for line in fsrc.readlines():
numbers = line.split()
if len(numbers) == 1:
continue
for number in numbers:
outputString += '%d,' % int(number)
outputString += '%d ' % 0
fsrc.close()
output = image_src + '\\' + file.split('.')[0] + '.jpg ' + outputString
fdst.write(output + '\n')
if not re.match(r'.*(crop\d).*', file.split('.')[0]):
for trans in ['(bright30)', '(dark30)', '(gauss)']:
output = image_src + trans + '\\' + file.split('.')[0] + trans + '.jpg ' + outputString
fdst.write(output + '\n')
fdst.close()
# -
# ### 数据扩充(镜像、高斯模糊)
# +
# -*- coding: UTF-8 -*-
import cv2, re
import numpy as np
from os import listdir, mkdir
from os.path import isfile, join, exists
# IMAGE SOURCE AND DESTINATION
src_dir = r'RealSenseCamera'
# def add_gauss_noise(image, mean=0, var=0.001):
# image = np.array(image/255, dtype=float)
# noise = np.random.normal(mean, var ** 0.5, image.shape)
# out = image + noise
# low_clip = -1. if out.min() < 0 else 0.
# out = np.clip(out, low_clip, 1.0)
# out = np.uint8(out*255)
# return out
# def increase_brightness(img, value):
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split(hsv)
# if value > 0:
# lim = 255 - value
# v[v > lim] = 255
# v[v <= lim] += value
# else:
# v[v + value < 0] = 0
# v[v != 0] -= value*-1
# final_hsv = cv2.merge((h, s, v))
# img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
# return img
def resize_img(img, _height):
height, width, channels = img.shape
if height != _height:
ratio = _height / height
img = cv2.resize(img, (0,0), fx=ratio, fy=ratio)
return img
def crop_and_resize(img):
height, width, channels = img.shape
crop1 = img[int(height*0.2):, :int(width*0.8), :]
crop2 = img[:int(height*0.8), int(width*0.2):, :]
crop3 = img[int(height*0.2):, int(width*0.2):, :]
crop4 = img[:int(height*0.8), :int(width*0.8), :]
crop1 = resize_img(crop1, height)
crop2 = resize_img(crop2, height)
crop3 = resize_img(crop3, height)
crop4 = resize_img(crop4, height)
return crop1, crop2, crop3, crop4
def main():
# flip
# safe = True
# onlyfiles = [f for f in listdir(src_dir) if isfile(join(src_dir, f))]
# for file in onlyfiles:
# if not re.match(r'.*(flip).*', file):
# img = cv2.imread(src_dir + '\\' + file)
# # img = resize_img(img)
# # flip
# flip_img = cv2.flip(img, 1)
# cv2.imwrite(src_dir + '\\' + file.split('.')[0] + '.jpg', img)
# cv2.imwrite(src_dir + '\\' + file.split('.')[0] + '(flip).jpg', flip_img)
# gauss, cropped, etc
onlyfiles = [f for f in listdir(src_dir) if isfile(join(src_dir, f))]
for file in onlyfiles:
if re.match(r'.*\.(jpg|jpeg)', file, re.I):
img = cv2.imread(src_dir + '\\' + file)
# # gauss
# gauss_img = add_gauss_noise(img)
# cv2.imwrite(gauss_dir + '\\' + file.split('.')[0] + '(gauss).jpg', gauss_img)
# # bright
# bright_img = increase_brightness(img, 30)
# cv2.imwrite(bright_dir + '\\' + file.split('.')[0] + '(bright30).jpg', bright_img)
# # dark
# dark_img = increase_brightness(img, -30)
# cv2.imwrite(dark_dir + '\\' + file.split('.')[0] + '(dark30).jpg', dark_img)
# crop
crop_img1, crop_img2, crop_img3, crop_img4 = crop_and_resize(img)
cv2.imwrite(src_dir + '\\' + file.split('.')[0] + 'a.jpg', crop_img1)
cv2.imwrite(src_dir + '\\' + file.split('.')[0] + 'b.jpg', crop_img2)
cv2.imwrite(src_dir + '\\' + file.split('.')[0] + 'c.jpg', crop_img3)
cv2.imwrite(src_dir + '\\' + file.split('.')[0] + 'd.jpg', crop_img4)
if __name__ == "__main__":
main()
# -
# ### File Rename
# +
# -*- coding: UTF-8 -*-
import re
from os import listdir, rename, remove
from os.path import isfile, join
i = 1
path = 'Images1.0/003'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for file in onlyfiles:
if re.match(r'pic1.*', file):
remove(path + '/' + file)
# -
# ### XML processing
# +
# -*- coding: UTF-8 -*-
import re, os
import xml.etree.ElementTree as ET
from os import listdir, rename, mkdir
from os.path import isfile, join, exists
src = 'Images1.4'
path = '/SomeDirectory'
onlyfiles = [f for f in listdir(src) if isfile(join(src, f))]
for file in onlyfiles:
if not re.match(r'.*x\.xml', file, re.I):
os.remove(src + '/' + file)
# tree = ET.parse(src + '/' + file)
# root = tree.getroot()
# filename = file.split('.xml')[0] + 'x.jpg'
# # root[1] - file name
# root[1].text = filename
# # root[2] - path
# root[2].text = path + '/' + filename
# tree.write(src + '/' + file.split('.xml')[0] + 'x.xml')
# -
# ### Json to dataset
# +
# -*- coding: UTF-8 -*-
import base64
import json
import os, re
import os.path as osp
import PIL.Image
import yaml
import utils
def draw_mask(filename):
json_file = filename + '.json'
data = json.load(open(json_file))
imageData = data.get('imageData')
# if not imageData:
# imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
# with open(imagePath, 'rb') as f:
# imageData = f.read()
# imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {'_background_': 0, 'p': 1, 'c': 2, 't': 3, 'o': 4, 'd':5, 's': 6}
# for shape in sorted(data['shapes'], key=lambda x: x['label']):
# label_name = shape['label']
# if label_name in label_name_to_value:
# label_value = label_name_to_value[label_name]
# else:
# label_value = len(label_name_to_value)
# label_name_to_value[label_name] = label_value
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = utils.draw_label(lbl, img, label_names)
utils.lblsave(filename + '.png', lbl)
if __name__ == '__main__':
path = 'mask_imgs_json/'
onlyfiles = [f for f in os.listdir(path) if osp.isfile(osp.join(path, f))]
for file in onlyfiles:
if not re.match(r'\..*', file, re.I):
draw_mask(path + file.split('.')[0])
|
HardWare/tools.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# This notebook gives you an overview in which order the scripts should be executed to get all results. The scripts can in principle be executed independently of each other, in a few cases results from another script are needed.
# <div style="width:100%; background-color: #D9EDF7; border: 1px solid #CFCFCF; text-align: left; padding: 10px;">
# <b>CO2 notebook process order overview:</b>
# <ul>
# <li>Main Notebook</li>
# <li><a href="Matching EUETS and ENTSO E.ipynb">1) Matching EUETS and ENTSO-E data</a></li>
# <li><a href="CI calculation top down method.ipynb">2) CI calculation top down method</a></li>
# <li><a href="EF calculation bottom up method.ipynb">3) EF caluclation bottom up method</a></li>
# <li><a href="CI calculation bottom up method.ipynb">4) CI calculation bottom up method</a></li>
# <li><a href="CO2 Signals.ipynb">5) CO2 Signals</a></li>
# </ul>
# </div>
# # Notebook overview
# - Matching EUETS and ENTSO E.ipynb
#
# Contains information on how the matching was performed and shows some quantitative results of the matching process.
#
#
# - CI calculation top down method.ipynb
#
# Calculates the CO2 intensity of electricity production using the top-down method for EU countries.
#
#
# - EF calculation bottom up method.ipynb
#
# Calculates the emission factors of electricity production for different countries and technologies using the bottom-up method for EU countries.
#
#
# - CI calculation top down method.ipynb
#
# Calculates the CO2 intensity of electricity production using the EF from the bottom-up method for EU countries.
#
#
# - CO2 Signals.ipynb
#
# Uses the calculated EF to generate a CO2 signal for countries of the EU.
|
.ipynb_checkpoints/main-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.datasets import mnist, cifar10
from keras.optimizers import SGD
import keras.utils
import keras.backend as K
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
print(K.tensorflow_backend._get_available_gpus())
print('img data fmt: ', K.image_data_format())
# +
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train.shape = ', X_train.shape, ' y_train.shape = ', y_train.shape)
print('X_test.shape = ', X_test.shape, ' y_test.shape = ', y_test.shape)
print('data type: ', type(X_train[0][0][0][0]))
print('label type: ', type(y_train[0][0]))
# +
# Reshape data and normalize between [0.0 , 1.0] before feeding it to our model
#X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
#X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
# convert target labels from scalars to an array of binary values
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# convert img data from ints in range [0,255] to floats in range [0,1]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255
X_test = X_test / 255
print('X_train.shape = ', X_train.shape, ' y_train.shape = ', y_train.shape)
print('X_test.shape = ', X_test.shape, ' y_test.shape = ', y_test.shape)
# check the type of the first element in our 4-D array
print('data type: ', type(X_train[0][0][0][0]))
print('label type: ', type(y_train[0][0]))
# -
# # MultiLayer Perceptron (MLP)
#
# * 4 fully connected (dense) layers
# * 1 input layer (784,)
# * 2 hidden layers (128,) ReLU
# * 1 output layer (10,) softmax
#
# Outputs probability for each class
# +
# create an empty sequential model
model = Sequential()
# add layers
model.add(Flatten(input_shape=(32,32,3)))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
# Stochastic Gradient Descent (SGD) optimizer
sgd = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False)
# Compile the model - this is where memory is allocated and a compute graph is generated
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# print out a summary of our model
model.summary()
# -
# if you get a ResourceExhaustedError, decrease the batch_size
hist = model.fit(X_train, y_train, epochs=10, batch_size=64)
# model.fit() returns a history object, which is a dict containing loss and acc metrics (and any other metrics we pass in)
plt.style.use('seaborn')
plt.figure(figsize=(8,5)) # figsize is in inches
# plot the history object returned by the fit() method
plt.plot(hist.history['loss'], 'r')
plt.plot(hist.history['acc'], 'r--')
plt.xlabel('Training Epoch')
plt.ylabel('Training Accuracy/Loss')
plt.legend(['Loss (categorical cross-entropy)', 'Accuracy'])
plt.grid(True)
plt.ylim(0,1)
plt.show()
# +
loss, accuracy = model.evaluate(X_test, y_test, batch_size=128)
print('test loss: ', loss)
print('test accuracy: ', accuracy)
# -
# # Convolutional Neural Net
#
# * 2 Convolutional Layers
# * 2 Max-Pooling Layers
# * 2 Fully-Connected layers
#
# in $\rightarrow$ Conv2d $\rightarrow$ Max Pooling $\rightarrow$ Conv2d $\rightarrow$ Max Pooling $\rightarrow$ Fully Connected $\rightarrow$ out
# +
cnn = Sequential()
cnn.add(Conv2D(32, kernel_size=(3,3),
activation='relu',
input_shape= (32,32,3)))
cnn.add(MaxPooling2D(pool_size=(2,2)))
cnn.add(Conv2D(64, (3,3), activation='relu'))
cnn.add(MaxPooling2D(pool_size=(2,2)))
cnn.add(Flatten())
cnn.add(Dense(128, activation='relu'))
cnn.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False)
cnn.compile(loss=keras.losses.categorical_crossentropy,
optimizer=sgd,
metrics=['accuracy'])
cnn.summary()
# +
hist_cnn = cnn.fit(X_train, y_train, batch_size=64, epochs=10)
# -
plt.figure(figsize=(8,5))
#plt.plot(hist.history['loss'], 'r')
#plt.plot(hist.history['acc'], 'r--')
plt.plot(hist_cnn.history['loss'], 'b')
plt.plot(hist.history['loss'], 'r')
plt.legend(['CNN', 'MLP'])
plt.xlabel('Training Epoch')
plt.grid(True)
plt.ylim(0,2)
plt.title('loss')
plt.show()
plt.plot(hist_cnn.history['acc'], 'b')
plt.plot(hist.history['acc'], 'r')
plt.legend(['CNN', 'MLP'])
plt.xlabel('Training Epoch')
plt.grid(True)
plt.ylim(0,1)
plt.title('accuracy')
#plt.savefig('MNIST_cnn_mlp.png', dpi=150, bbox_inches='tight')
plt.show() # if you call plt.savefig() after plt.show(), you'll save a blank figure
# +
loss, accuracy = cnn.evaluate(X_test, y_test, batch_size=128)
print('test loss: ', loss)
print('test accuracy: ', accuracy)
# -
|
keras/keras_CIFAR-10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''env'': venv)'
# name: python3
# ---
# ## Object Detection - YOLOv3
import numpy as np
import cv2 as cv
from motrackers.detectors import YOLOv3
import pandas
import pandas as pd
import csv
COLS = ['id','bounding_box']
df = pd.DataFrame(columns=COLS)
# +
VIDEO_FILE = "./../video_data/walking.mp4"
WEIGHTS_PATH = './../pretrained_models/yolo_weights/yolov3.weights'
CONFIG_FILE_PATH = './../pretrained_models/yolo_weights/yolov3.cfg'
LABELS_PATH = "./../pretrained_models/yolo_weights/coco_names.json"
USE_GPU = False
CONFIDENCE_THRESHOLD = 0.5
NMS_THRESHOLD = 0.2
DRAW_BOUNDING_BOXES = True
# -
model = YOLOv3(
weights_path=WEIGHTS_PATH,
configfile_path=CONFIG_FILE_PATH,
labels_path=LABELS_PATH,
confidence_threshold=CONFIDENCE_THRESHOLD,
nms_threshold=NMS_THRESHOLD,
draw_bboxes=DRAW_BOUNDING_BOXES,
use_gpu=USE_GPU
)
cap = cv.VideoCapture(VIDEO_FILE)
# +
while True:
ok, image = cap.read()
if not ok:
print("Cannot read the video feed.")
break
bboxes, confidences, class_ids = model.detect(image)
updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)
iter = 0
for cid in class_ids:
try:
label = "{}:{:.4f}".format(model.object_names[cid], confidences[iter])
new_entry = []
new_entry.append(label)
new_entry.append(bboxes[iter])
single_tweet_df = pd.DataFrame([new_entry], columns=COLS)
df = df.append(single_tweet_df, ignore_index=True)
df.to_csv('bounding_box_collection_walking.csv', columns=COLS,index=False)
iter+=1
except:
continue
cv.imshow("image", updated_image)
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv.destroyWindow("image")
# -
|
examples/example_notebooks/detector_YOLOv3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.9 64-bit
# name: python3
# ---
# # Задания
from typing import List, Any, Dict, Optional
from collections import Counter, namedtuple
import itertools
import re
import dataclasses
# ### Задание 1: <a class="anchor" id="task1"></a>
# Все пункты нужно ввыполнить с использованием ТОЛЬКО срезов
# * Получите подстроку 'string' из строки test_string
# * Получите подстроку 'Just' из строки test_string
# * Получите подстроку 'simple' из строки test_string
# * Получите каждый 5 символ из строки test_string
# * Получите каждый 3 символ из строки test_string, начиная с конца.
# +
test_string = 'Just a simple string'
word_string = test_string[-6:]
word_just = test_string[0:4]
word_simple = test_string[7:13]
every_fifth_char = test_string[0::5]
every_third_char_from_end = test_string[::-3]
# -
# ### Задание 2: <a class="anchor" id="task3"></a>
#
# Написать функцию, которая будет приниметь одно значение с логическими типы, а затем ковертировать их в строковые 'True' и 'False' и возвращать эти значения.
# +
def bool_to_str(input: bool) -> str:
return 'True' if input else 'False'
bool_to_str(True)
# -
# ### Задание 3: <a class="anchor" id="task3"></a>
#
# Написать функцию, которая будет приниметь одно значение - список. Вычислить разницу между максимальным и минимальным значением и вернуть его.
# +
def count_diff(input: List[int]) -> int:
return max(input) - min(input)
count_diff([1, 17, 83, 2])
# -
# ### Задание 4: <a class="anchor" id="task4"></a>
#
# Написать функцию, которая будет принимать одно значение - число. Функция должна возвращать список всех четных чисел в диапозоне от 1 до полученного числа. В этом задании нужно использовать list comprehension.
# +
def get_even_to_num(input: int) -> List[int]:
return [num for num in range(1, input + 1) if num % 2 == 0]
get_even_to_num(8)
# -
# ### Задание 5: <a class="anchor" id="task2"></a>
#
# Напишите функцию, который имеет два аргумента - x и y. Функция должна выводить координатный угол, в котором находятся координаты (x, y).
# Точки внутри координатного угла I имеют положительные абсциссы и ординаты.
# Точки внутри координатного угла II имеют отрицательные абсциссы и положительные ординаты.
# Точки внутри координатного угла III имеют отрицательные абсциссы и ординаты
# Точки внутри координатного угла IV имеют положительные абсциссы и отрицательные ординаты.
# +
class Point:
'''
A class representing a Point
'''
def __init__(self, x: int, y: int):
self._x = x
self._y = y
def get_coordinate_quarter(self) -> int:
'''
: return: the coordinate quarter this Point is placed at.
: raise: ValueError if either x or y equals zero.
'''
if self._x == 0 or self._y == 0:
raise ValueError('It\'s impossible to get the coordinate quarter for the Point with one of the coordinates being Zero.')
if self._x > 0:
return 1 if self._y > 0 else 2
return 3 if self._y < 0 else 4
point = Point(x=3, y=-4)
print(point.get_coordinate_quarter())
point_with_zero = Point(x=0, y=-2)
try:
point_with_zero.get_coordinate_quarter()
except Exception as ex:
print(ex)
# -
# ### Задание 6: <a class="anchor" id="task2"></a>
#
# Напишите функцию, которая принимает одно значение - число . Функция должна возвращать строку - полученное число в двоичном виде
# +
def to_binary(input: int) -> str:
return f'{input:03b}'
to_binary(6)
# -
# ### Задание 7:<a class="anchor" id="task5"></a>
#
# Написать функцию, которая будет принимать одно значение - список. Список содержит числа. Все числа, кроме двух, повторяются как минимум два раза. Вернуть список из этих двух неповторяющихся чисел
# +
def get_only_unique(input: List[int]) -> List[int]:
return [num for num, count in Counter(input).items() if count > 1]
get_only_unique([5, 8, 4, 8, 1, 13, 5, 76, 1, 3])
# + [markdown] tags=[]
# ### Задание 8: <a class="anchor" id="task2"></a>
#
# Напишите функцию, которая принимает два значения - числа **num**, **length** (основное число, количество умножений). Функция должна возвращать список перемножений числа **num** **length** раз. Пример: test_function(7, 5) ➞ [7, 14, 21, 28, 35]
# +
def multiply_n_times(input_num: int, times_to_multiply: int) -> List[int]:
return [input_num * multiplier for multiplier in range(1, times_to_multiply + 1)]
multiply_n_times(7, 5)
# -
# ### Задание 9: <a class="anchor" id="task6"></a>
#
# Написать функцию, которая будет принимать одно значение - строку. Функция должна возвращать представление полученной строки закодированной азбукой Морзе. Входная строка может содержать буквы как нижнего, так и верхнего регистра. Междк всеми словами присутствует пробел
# +
char_to_dots = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.'
}
def convert_to_morse(input: str) -> str:
'''
: params: Any string in English
: return: Morse encoded string with one space between chars and 2 spaces between words.
'''
seq_to_convert = input.upper().split(' ')
return ' '.join([(lambda x: ' '.join([char_to_dots[char] for char in x]))(word) for word in seq_to_convert])
convert_to_morse("This is a test string")
# -
# ### Задание 10: <a class="anchor" id="task6"></a>
#
# Написать функцию, которая будет принимать одно значение - список. Функция должна возвращать самое частое значение в списке (встречается > N/2). Пример: test_function(["A", "A", "A", "B", "C", "A"]) ➞ "A"
# +
def get_the_most_frequent(input: List[Any]):
'''
: return: function returns the most freuqent item if there is only 1
and a list of items if there are multiple
'''
counted = Counter(input)
max_count = max(counted.values())
most_freq = [key for key, count in counted.items() if count == max_count]
return most_freq[0] if len(most_freq) == 1 else most_freq
print(get_the_most_frequent(['A', 3, 3, 'B', 8, 'A']))
print(get_the_most_frequent(['A', 3, 'B', 8, 'A']))
# -
# ### Задание 11:
# Создайте функцию для выполнения основных арифметических операций, которая применяет сложение, вычитание, умножение и деление к строковому значению (например, "12 + 24" или "23-21" или "12 // 12" или "12 * 21").
#
# Здесь у нас есть 1 число, за которым следует пробел, затем оператор, за которым следует другой пробел, и 2 число. Возвращаемое значение должно быть числом.
#
# Применение функции eval() не допускается. В случае деления, всякий раз, когда второе число равно "0", возвращайте -1.
# +
Expression = namedtuple('Expression', 'first_operand operator second_operand')
class OperationOptions(object):
Sum = '+'
Subtract = '-'
Multiply = '*'
Divide = '//'
def perform_operation(input: str) -> int:
parts = input.split(' ')
exp = Expression(int(parts[0]), parts[1], int(parts[2]))
if exp.operator == OperationOptions.Sum:
return exp.first_operand + exp.second_operand
if exp.operator == OperationOptions.Multiply:
return exp.first_operand * exp.second_operand
if exp.operator == OperationOptions.Subtract:
return exp.first_operand - exp.second_operand
if exp.operator == OperationOptions.Divide:
if exp.second_operand == 0:
return -1
return exp.first_operand // exp.second_operand
raise ValueError("Wrong operator")
print(perform_operation('12 + 14'))
print(perform_operation('23 * 17'))
print(perform_operation('23 // 0'))
print(perform_operation('76 - 12'))
try:
perform_operation('23 % 0')
except Exception as ex:
print(ex)
# -
# ### Задание 12:
# Напишите функцию, которая принимает список списков и возвращает значение всех символов в нем, где каждый символ добавляет или отнимает что-то от общего балла. Значения символов:
#
# * \# = 5
# * О = 3
# * Х = 1
# * ! = -1
# * !! = -3
# * !!! = -5
#
# Если итоговый результат отрицательный, верните 0 (например, 3 ``#``, 3 ``!!``, 2 ``!!!`` и ``X`` будет (5 + 5 + 5 - 3 - 3 - 3 - 5 - 5 + 1) = -3, так что верните 0.
# +
symbol_points = {
'#': 5,
'O': 3,
'X': 1,
'!': -1,
'!!': -3,
'!!!': -5
}
test_list = [['#', 'O', '!', '!'], ['!!', 'O'], ['#', '#', '!!!']]
test_list_negative = [['#', 'O', '!', '!'], ['!!', 'O'], ['#', '!!!', '!!!', '!!!']]
def process_symbols(input: List[List[str]]) -> int:
symbol_points_counted: List[str] = sum([symbol_points[symbol] for symbol in list(itertools.chain.from_iterable(input))])
return symbol_points_counted if symbol_points_counted >= 0 else 0
print(process_symbols(test_list))
print(process_symbols(test_list_negative))
# -
# ### Задание 13: <a class="anchor" id="task7"></a>
#
# Написать функцию, которая будет принимать одно значение - строку. Функция определяет свободные и занятые участки пляжа. Строка состоит из двух символов 0 - свободный участок, 1 - занятый участок. Из-за недавних ограничений новый человек не может занять место рядом с другим. Должно быть одно свободное место между двумя людьми, отдыхающими на пляже. Функци должна вернуть число - количество новых людей, которые могут воспользоваться местами на пляже.
# +
# For this task I assumed that the input string represents a single row on a beach even though it's not clear from the task description.
# Thus the start and the end of the input string have only one neighbour
test_string = '0101000100'
def count_vaccant(input: str) -> int:
total = 0
# add '00' from edges
double_zeros_on_edges = [m.start() for m in re.compile("00").finditer(input) if m.start() == 0 or m.start() == (len(input) - 2)]
total += len(double_zeros_on_edges)
# add '000' from anywhere
triple_zeroes = re.findall('000', input)
total += len(triple_zeroes)
return total
count_vaccant(test_string)
# -
# ### Задание 14: <a class="anchor" id="task7"></a>
#
# Написать функцию, которая будет принимать одно значение - строку или список. Необходимо зашифровать строку. Первый элемент строки - код буквы в ascii (например 'a' = 97, a 'A' = 65). Следующий элемент - закодированная с помощью таблицы разница между текущим и предыдущим символом, итд. Если подается список - необходимо расшифровать его. Алгоритм такой же - первое число перекодируется в соответствием с таблицей ascii, второй символ - сумма первого и второго числа перекодированная с помощью таблицы ascii.
# +
# test_function("Hello") ➞ [72, 29, 7, 0, 3]
# test_function([ 72, 33, -73, 84, -12, -3, 13, -13, -68 ]) ➞ "Hi there!"
ascii_value_char = {i: chr(i) for i in range(128)}
ascii_char_value = {chr(i): i for i in range(128)}
def encrypt_decrypt(input):
final = []
if type(input) is str:
for ind in range(len(input)):
previous_char = ascii_char_value[input[ind - 1]] if ind != 0 else 0
current_char = ascii_char_value[input[ind]]
encrypted_val = current_char if ind == 0 else max(current_char, previous_char) - min(current_char, previous_char)
final.append(encrypted_val)
return final
if type(input) is list:
buffer_num = 0
for ind in range(len(input)):
current_num = input[ind]
buffer_num = current_num + buffer_num
decrypted_char = ascii_value_char[current_num] if ind == 0 else ascii_value_char[buffer_num]
final.append(decrypted_char)
return ''.join(final)
print(encrypt_decrypt("Hello"))
print(encrypt_decrypt([ 72, 33, -73, 84, -12, -3, 13, -13, -68 ]))
# -
# ### Задание 15:
# Создайте функцию, которая определяет, может ли c каждого места в концертном заде видеть сцену. С места можно увидеть сцену, если значение этого места (указано во входном списке) строго больше, чем значение перед ним.
#
# Каждый может увидеть сцену в примере ниже:
#
# ``[[1, 2, 3, 2, 1, 1],
# [2, 4, 4, 3, 2, 2],
# [5, 5, 5, 5, 4, 4],
# [6, 6, 7, 6, 5, 5]]``
#
# Не все видят сцену:
#
# ``[[1, 2, 3, 2, 1, 1],
# [2, 4, 4, 3, 2, 2],
# [5, 5, 5, 10, 4, 4],
# [6, 6, 7, 6, 5, 5]]``
#
# Функция должна возвращать True, если абсолютно все видят сцену, ичане False
# +
test_true = [[1, 2, 3, 2, 1, 1],
[2, 4, 4, 3, 2, 2],
[5, 5, 5, 5, 4, 4],
[6, 6, 7, 6, 5, 5]]
test_false = [[1, 2, 3, 2, 1, 1],
[2, 4, 4, 3, 2, 2],
[5, 5, 5, 10, 4, 4],
[6, 6, 7, 6, 5, 5]]
def can_all_see_stage(input: List[List[int]]) -> bool:
pairs_to_compare = list(filter(None, [(input[ind], input[ind + 1])
if ind != (len(input) - 1) else
(input[ind - 1], input[ind]) if len(input) % 2 != 0 else None
for ind in range(len(input))]))
for (row_to_compare1, row_to_compare2) in pairs_to_compare:
for (place_score1, place_score2) in list(zip(row_to_compare1, row_to_compare2)):
if place_score2 <= place_score1:
return False
return True
print(can_all_see_stage(test_true))
print(can_all_see_stage(test_false))
# -
# ### Задание 16:
# Создать функции, которая будет строить лестницу, используя знаки ‘_’ и ‘#’. Положительное значение обозначают, что направление лестницы направленно вверх и вниз для отрицательных значений.
# Пример
#
# ``staircase(3) ➞ "__#\n_##\n###"
# __#
# _##
# ###``
#
# ``staircase(7) ➞ "______#\n_____##\n____###\n___####\n__#####\n_######\n#######"
# ______#
# _____##
# ____###
# ___####
# __#####
# _######
# #######``
#
# ``staircase(2) ➞ "_#\n##"
# _#
# ##``
#
# ``staircase(-8) ➞ "########\n_#######\n__######\n___#####\n____####\n_____###\n______##\n_______#"
# ########
# _#######
# __######
# ___#####
# ____####
# _____###
# ______##
# _______#``
#
# Замечания:
# Возвращаемая строка дополняется символом перехода на новую строку \n
# +
def staircase(input: int) -> str:
if input == 0:
raise ValueError('Zero is not supported')
if input > 0:
return '\n'.join([f'{"_" * (input - num)}{"#" * num}' for num in range(1, input + 1)])
positive_input = abs(input)
return '\n'.join([f'{"_" * num}{"#" * (positive_input - num)}' for num in range(0, positive_input)])
print(staircase(3))
print(staircase(-7))
# -
# ### Задание 17:
#
# Имеется строк из символов в нижнем регистре ascii[["a".."z"]]. Нужно сократить строку следующим образом: берется пара соседних символов и если они одинаковы, то они удаляются. Например aab должно превратится в b.
# Нужно удалить как можно больше символов. Если результирующая строка пустая, нужно вернуть "Empty String"
#
# Пример:
#
# ``super_reduced_string("aaabccddd") ➞ "abd"``
# +
@dataclasses.dataclass
class ProcessedChar:
def __init__(self, char: str, index: Optional[int], is_removed: bool = False):
self.index = index
self.is_removed = is_removed
self.char = char
self.hash = f'{char}_{index}'
test_string_not_empty = "aaabccddd"
test_string_empty = "aabbcc"
test_string_multy_steps = "abba"
def super_reduced_string(input: str):
def process_string_inner(input_inner: str):
processed_chars: Dict[str, ProcessedChar] = {}
for char_ind in range(len(input_inner)):
next_char_ind = char_ind + 1 if char_ind != len(input_inner) - 1 else None
next_char = input_inner[next_char_ind] if next_char_ind else None
current_char_item = ProcessedChar(char=input_inner[char_ind], index=char_ind)
next_char_item = ProcessedChar(char=next_char, index=next_char_ind)
retrieved_current_char = processed_chars.get(current_char_item.hash)
retrieved_next_char = processed_chars.get(next_char_item.hash)
if retrieved_next_char:
if retrieved_next_char.is_removed:
continue
if retrieved_current_char:
if retrieved_current_char.is_removed:
continue
if current_char_item.char == next_char_item.char:
current_char_item.is_removed = True
next_char_item.is_removed = True
processed_chars[current_char_item.hash] = current_char_item
processed_chars[next_char_item.hash] = next_char_item
final_chars = [char.char for char in processed_chars.values() if char.is_removed == False and char.char]
return ''.join(final_chars)
buffer_string = input
while True:
buffer_string = process_string_inner(buffer_string)
if not buffer_string or max(Counter(list(buffer_string)).values()) == 1:
break
return buffer_string if buffer_string else "Empty String"
print(super_reduced_string(test_string_not_empty))
print(super_reduced_string(test_string_empty))
print(super_reduced_string(test_string_multy_steps))
# -
# ### Задание 18:
#
# Создать функцию, которая вернет ближайшую к текущей странице главу. Если две главы одинаково близко, то выбирается та, которая находится на большей по порядку странице.
# Пример
#
# ``nearest_chapter({
# "Chapter 1" : 1,
# "Chapter 2" : 15,
# "Chapter 3" : 37
# }, 10) ➞ "Chapter 2"``
#
#
# ``nearest_chapter({
# "New Beginnings" : 1,
# "Strange Developments" : 62,
# "The End?" : 194,
# "The True Ending" : 460
# }, 200) ➞ "The End?"``
#
#
# ``nearest_chapter({
# "Chapter 1a" : 1,
# "Chapter 1b" : 5
# }, 3) ➞ "Chapter 1b"``
# +
ChapterWithGap = namedtuple("ChapterWithGap", "Chapter Gap")
def nearest_chapter(chapters_pages: Dict[str, int], input_page: int) -> str:
chapters_with_gap = [ChapterWithGap(chapter, abs(input_page - page)) for (chapter, page) in chapters_pages.items()]
chapters_with_min_gaps = [item for item in chapters_with_gap if item.Gap == min(chapters_with_gap, key=lambda x: x.Gap).Gap]
return chapters_with_min_gaps[0].Chapter if len(chapters_with_min_gaps) == 1 else chapters_with_min_gaps[-1].Chapter
res1 = nearest_chapter({
"Chapter 1" : 1,
"Chapter 2" : 15,
"Chapter 3" : 37
}, 10)
res2 = nearest_chapter({
"Chapter 1a" : 1,
"Chapter 1b" : 5
}, 3)
res3 = nearest_chapter({
"New Beginnings" : 1,
"Strange Developments" : 62,
"The End?" : 194,
"The True Ending" : 460
}, 200)
print(res1, res2, res3, sep='|')
# -
|
Tasks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data_set = pd.read_excel('Data.xlsx') # this file is only in my local machine
import os
path = 'Dataset/'
data_set.to_csv(os.path.join(path+r'data.csv'))
data = pd.read_csv("Dataset/data.csv")
data
# Data cleaning
# There is no null data and unrelated data , so we dont need to drop or change anything
# just look up the datatypes
data.info()
# We can convert the relative headache to a boolean value
# we drop Rh+ cause they dont have any meaning for only this table - no negative blood type
data["relative_headache"]= data["relative_headache"].map(dict(yes=1, no=0))
data
data.info()
for i in range(data.shape[0]):
data.Blood[i] = data.Blood[i][:-4]
data
# +
# we have 17 unique values like male-female- 18 19 A B etc
# we can use them as original names
# or we can convert all the possible values to an integer like 100 - 117
# like male = 100, female = 101, 19 = 102 , 19 = 103 ... no = 116
# or we can give them alphabetical chracters like A B C D
# -
for i in df.columns:
print(df[i].unique())
#we need a dict to store these as an order
for i in df.columns:
print(df[i].nunique())
total_unique_values = 0
for i in df.columns:
total_unique_values += df[i].nunique()
total_unique_values
# we'll have 17 after data binning - for weight range
bins = [50, 75, 90, 100]
labels = ["W1","W2","W3"]
df['Weight'] = pd.cut(df['Weight'], bins=bins, labels=labels)
print (df)
# we'll have 17 after data binning - for height range
bins2 = [160, 167, 175, 183]
labels2 = ["H1","H2","H3"]
df['Height'] = pd.cut(df['Height'], bins=bins2, labels=labels2)
print (df)
# run unique counts again
total_unique_values = 0
for i in df.columns:
total_unique_values += df[i].nunique()
total_unique_values
# there is no problem anymore
for i in df.columns:
for j in range(df[i].nunique()):
print(df[i].unique()[j])
# +
unique_int = 0
our_dict = {}
for i in df.columns:
for j in range(df[i].nunique()):
our_dict[df[i].unique()[j]] = unique_int
unique_int += 1
print(our_dict)
#now assign the integer values for each unique values
# -
# there is category datatypes in data- we have to convert them to str for further operations like conditional things
df.info()
# conversion
df['Height'] = df.Height.astype(str)
df['Weight'] = df.Weight.astype(str)
df.info()
for i in df.columns:
for j in range(df.shape[0]):
df[i][j] = our_dict[df[i][j]]
print(df)
print(our_dict)
df
unique_int
# +
L1 = {}
# btw we have %40 support value as a threshold value - choose 5 as thresh - couse this dataset has 13 item/row
# check the count for each value in data set
# this is first iteration there will be more
for i in range(total_unique_values):
for j in df.columns:
count = 0
for k in range(df.shape[0]):
if int(df[j][k]) == int(i):
count += 1
if count > 4:
print(count)
print("greater")
# -
# I am just trying to make some improvements - this is not actual algorithm solution and also this one is data specific.
# Still updating
|
Algorithms/AprioriAlgorithm/AprioriAlgorithm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Market Basket Analysis
#
# The dataset can be found here: [Online Retail Dataset](https://archive.ics.uci.edu/ml/datasets/online+retail)
import numpy as np
import pandas as pd
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
# Takes a little while to load
df = pd.read_excel('online_retail.xlsx')
df.head()
df.shape
df.describe()
df.isna().sum()
df['InvoiceNo'].nunique()
# TODO: Check for credit/debit transactions
# ## Data Cleaning
df['Description'] = df['Description'].str.strip() # removing the blank spaces before and after
df.dropna(axis=0, subset=['InvoiceNo'], inplace=True) # removing the missing invoices
df['InvoiceNo'] = df['InvoiceNo'].astype('str') # converting to strings
df = df[~df['InvoiceNo'].str.contains('C')] # removing the credit transaction data
df.head()
df.shape
df['Country'].value_counts()
# ## Running the Analysis on the German Data
#
# 1st iteration for the speed of analysis purposes.
#
# Let's start with transforming our dataset - we're grouping by the invoice numbers and descriptions, indexed by the product. In the nutshell, each invoice now represents a "basket", with the counts of individual product quantities purchased.
mybasket = (df[df['Country'] == 'Germany']
.groupby(['InvoiceNo', 'Description'])['Quantity'] # grouping by the quantity
.sum().unstack().reset_index().fillna(0)
.set_index('InvoiceNo'))
mybasket.head()
mybasket.shape
# The association analysis argument expects our numbers to be within the 0 to 1 range, and herefore we need to convert our actual counts to comply with this.
# converting all positive values to 1 and the rest to 0
def my_encode_units(value):
if value <= 0:
return 0
if value >= 1:
return 1
# using the applymap function to substitue the values
my_basket_sets = mybasket.applymap(my_encode_units)
# removing the postage from the dataset
my_basket_sets.drop('POSTAGE', inplace=True, axis=1)
# a sanity check
my_basket_sets.describe()
# ## Training the Model
# generating frequent itemsets
frequent_itemsets = apriori(my_basket_sets, min_support=0.07, use_colnames=True)
# generating rules
rules = association_rules(frequent_itemsets, metric='lift', min_threshold=1)
rules.head(100)
my_basket_sets['ROUND SNACK BOXES SET OF4 WOODLAND'].sum()
my_basket_sets['SPACEBOY LUNCH BOX'].sum()
# filtering rules based on high lift and confidence
rules[ (rules['lift'] >= 3) &
(rules['confidence'] >= 0.3)]
# ## Summary
#
# In summary, we were able to effectively apply the apriori algorithm to find and recommend similar items based on the purchase data. The example above showcases an item with a recommended pair that we're highly confident in being generally purchased together.
#
# This analysis has potential implications for online shopping, retail shelving placement and optimization, and will ideally significantly increase the sales of the products offered.
|
market-basket-analysis/python-implementation/market-basket-analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NAVIGATION-->
#
# <a href="https://colab.research.google.com/github/bpesquet/machine-learning-handbook/blob/master/deep-learning/Convolutional_Neural_Networks.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
#
# # Convolutional Neural Networks (aka CNN or convnets)
# ## Package setup
# +
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.datasets import mnist
from keras.utils import to_categorical
# Display plots inline, change default figure size and change plot resolution to retina
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import seaborn as sns
# Set Seaborn aesthetic parameters to defaults
sns.set()
# -
# ## Utility functions
def plot_loss_acc(history):
"""Plot training and (optionally) validation loss and accuracy"""
loss = history.history['loss']
epochs = range(1, len(loss) + 1)
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(epochs, loss, '.--', label='Training loss')
final_loss = loss[-1]
title = 'Training loss: {:.4f}'.format(final_loss)
plt.ylabel('Loss')
if 'val_loss' in history.history:
val_loss = history.history['val_loss']
plt.plot(epochs, val_loss, 'o-', label='Validation loss')
final_val_loss = val_loss[-1]
title += ', Validation loss: {:.4f}'.format(final_val_loss)
plt.title(title)
plt.legend()
acc = history.history['acc']
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, '.--', label='Training acc')
final_acc = acc[-1]
title = 'Training accuracy: {:.2f}%'.format(final_acc * 100)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
if 'val_acc' in history.history:
val_acc = history.history['val_acc']
plt.plot(epochs, val_acc, 'o-', label='Validation acc')
final_val_acc = val_acc[-1]
title += ', Validation accuracy: {:.2f}%'.format(final_val_acc * 100)
plt.title(title)
plt.legend()
# ## Convnets for images
# +
# Load the Keras MNIST digits dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print(f'Training images: {train_images.shape}. Training labels: {train_labels.shape}')
# +
# Reshape train and test data
x_train = train_images.reshape((60000, 28, 28, 1))
x_train = x_train.astype('float32') / 255
x_test = test_images.reshape((10000, 28, 28, 1))
x_test = x_test.astype('float32') / 255
# One-hot encoding of expected results
y_train = to_categorical(train_labels)
y_test = to_categorical(test_labels)
print(f'x_train: {x_train.shape}. y_train: {y_train.shape}')
print(f'x_test: {x_test.shape}. y_test: {y_test.shape}')
# +
# Create a Convnet
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
# Add dense classifier on top of it
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
# +
# Train the convnet
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=5, batch_size=64)
plot_loss_acc(history)
# +
# Evaluate the model on test data
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print(f'Test accuracy: {test_acc * 100}%')
# +
# Plot the first 10 digits with associated predictions
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.figure(figsize=(12, 6))
for i in range(10):
digit = test_images[i]
# Reshape image into a 4D tensor of dimensions (1, 28, 28, 1)
input = digit.reshape(28, 28, 1)[np.newaxis, :]
prediction = np.argmax(model.predict(input))
fig = plt.subplot(2, 5, i + 1)
plt.title('It\'s a {:d}'.format(prediction), fontsize=24)
plt.imshow(digit, cmap=plt.cm.binary)
# -
# ## Visualizing convnet filters
#
# Inspired by this [Keras blog article](https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
print(layer_dict.keys())
# +
from keras.models import load_model
from keras import backend as K
# Set the matplotlib figure size
plt.rc('figure', figsize = (12.0, 12.0))
# Set the learning phase to false, the model is pre-trained.
K.set_learning_phase(False)
# A placeholder for the input images
input_img = model.input
# Dimensions of the images
img_width = 28
img_height = 28
# A constant size step function for gradient ascent
def constant_step(total_steps, step, step_size = 1):
return step_size
# Define an initial divisor and decay rate for a varied step function
# This function works better than constant step for the output layer
init_step_divisor = 100
decay = 10
def vary_step(total_steps, step):
return (1.0 / (init_step_divisor + decay * step))
# +
# Function from the Keras blog that normalizes and scales
# a filter before it is rendered as an image
def normalize_image(x):
# Normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# Clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# Convert to grayscale image array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# Create a numpy array that represents the image of a filter
# in the passed layer output and loss functions. Based on the
# core parts of <NAME>'s blog post.
def visualize_filter(layer_output, loss, steps = 256, step_fn = constant_step, input_initialization = 'random'):
# Compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# Normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# This function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
if K.image_data_format() == 'channels_first':
input_shape = (1, img_width, img_height)
else:
input_shape = (img_width, img_height, 1)
# Initialize the input image. Random works well for the conv layers,
# zeros works better for the output layer.
input_img_data = np.random.random(input_shape) * 255.
if input_initialization == "zeros":
input_img_data = np.zeros(input_shape)
input_img_data = np.array(input_img_data).reshape(1, 28, 28, 1)
# Run gradient ascent for the specified number of steps
for i in range(steps):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step_fn(steps, i)
final_img = input_img_data[0]
return final_img
# Define a function that stitches the 28 * 28 numpy arrays
# together into a collage of filters for each layer.
def stitch_filters(layer_filters, y_img_count, x_img_count):
margin = 2
width = y_img_count * img_width + (y_img_count - 1) * margin
height = x_img_count * img_height + (x_img_count - 1) * margin
stitched_filters = np.zeros((width, height))
# Fill the picture with our saved filters
for i in range(y_img_count):
for j in range(x_img_count):
img = layer_filters[i * x_img_count + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height] = img
return stitched_filters
# +
# Start by visualizing the first convolutional layer
layer_name = 'conv2d_1'
layer_filters = []
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# For each filter in this layer
for i in range(32):
layer_output = layer_dict[layer_name].output
img = deprocess_image(layer_output)
layer_filters.append(img.reshape(28,28))
layer_filters = [normalize_image(image) for image in layer_filters]
layer_image = stitch_filters(layer_filters, 4, 8)
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.imshow(layer_image, cmap = 'gray')
plt.show()
# +
# Start by visualizing the first convolutional layer
layer_name = 'conv2d_1'
layer_filters = []
# For each filter in this layer
for i in range(32):
layer_output = layer_dict[layer_name].output
loss = K.mean(layer_output[:, :, :, i])
img = visualize_filter(layer_output, loss)
layer_filters.append(img.reshape(28,28))
layer_filters = [normalize_image(image) for image in layer_filters]
layer_image = stitch_filters(layer_filters, 4, 8)
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.imshow(layer_image, cmap = 'gray')
plt.show()
# +
# The second convolutional layer
layer_name = 'conv2d_2'
layer_filters = []
# For each filter in this layer
for i in range(32):
layer_output = layer_dict[layer_name].output
loss = K.mean(layer_output[:, :, :, i])
img = visualize_filter(layer_output, loss)
layer_filters.append(img.reshape(28,28))
layer_filters = [normalize_image(image) for image in layer_filters]
layer_image = stitch_filters(layer_filters, 4, 8)
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.imshow(layer_image, cmap = 'gray')
plt.show()
# +
# The final output layer of the model
output_filters = []
for i in range(10):
output = model.output
loss = K.mean(output[:, i])
img = visualize_filter(output, loss,
steps = 4096,
step_fn = vary_step,
input_initialization = 'zeros')
output_filters.append(img.reshape(28,28))
output_image_raw = stitch_filters(output_filters, 2, 5)
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.imshow(output_image_raw, cmap = 'gray')
plt.show()
# +
# The above output filters are very grey, which isn't the way the
# original MNIST digits are represented.
def deaverage_digit(digit):
deaveraged_digit = np.clip(digit - digit.mean(), 0, 255)
deaveraged_digit *= (255.0/deaveraged_digit.max())
return deaveraged_digit
deaveraged_outputs = [deaverage_digit(x) for x in output_filters]
output_image_deaveraged = stitch_filters(deaveraged_outputs, 2, 5)
# Temporary hide Seaborn grid lines
with sns.axes_style('white'):
plt.imshow(output_image_deaveraged, cmap = 'gray')
plt.show()
# -
|
deep-learning/Convolutional_Neural_Networks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import os
if not os.path.exists("data"):
os.makedirs("data")
os.makedirs("data/train")
os.makedirs("data/test")
os.makedirs("data/train/0")
os.makedirs("data/train/1")
os.makedirs("data/train/2")
os.makedirs("data/train/3")
os.makedirs("data/train/4")
os.makedirs("data/train/5")
os.makedirs("data/test/0")
os.makedirs("data/test/1")
os.makedirs("data/test/2")
os.makedirs("data/test/3")
os.makedirs("data/test/4")
os.makedirs("data/test/5")
# +
# train or test
mode = 'train'
directory = 'data/'+mode+'/'
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
frame = cv2.flip(frame, 1) #mirror image
# Getting count of existing images
count = {'zero': len(os.listdir(directory+"/0")),
'one': len(os.listdir(directory+"/1")),
'two': len(os.listdir(directory+'/2')),
'three': len(os.listdir(directory+'/3')),
'four': len(os.listdir(directory+'/4')),
'five': len(os.listdir(directory+'/5'))}
cv2.putText(frame, "MODE : "+mode, (10,10), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "IMAGE COUNT", (10,22), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "ZERO : "+str(count['zero']), (10,34), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "ONE : "+str(count['one']), (10,46), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "TWO : "+str(count['two']), (10,58), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "THREE : "+str(count['three']), (10,70), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "FOUR : "+str(count['four']), (10,82), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
cv2.putText(frame, "FIVE : "+str(count['five']), (10,94), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,255), 1)
# coordinates of the ROI
x1 = int(0.6*frame.shape[1])
y1 = 120
x2 = frame.shape[1]-10
y2 = int(0.5*frame.shape[1])
#Drawing the ROI
# The increment/decrement by 1 is to compensate for the bounding box
cv2.rectangle(frame, (x1-1, y1-1), (x2+1, y2+1), (255, 0, 0), 1)
# Extracting the ROI
roi = frame[y1:y2, x1:x2]
roi = cv2.resize(roi, (68, 68))
cv2.imshow("Frame", frame)
#do the processing after capturing the image
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
_, roi = cv2.threshold(roi, 120, 255, cv2.THRESH_BINARY)
cv2.imshow("ROI", roi)
interrupt = cv2.waitKey(10)
if interrupt & 0xFF == 27: #esc key
break
if interrupt & 0xFF == ord('0'):
cv2.imwrite(directory+'0/'+str(count['zero'])+'.jpg', roi)
if interrupt & 0xFF == ord('1'):
cv2.imwrite(directory+'1/'+str(count['one'])+'.jpg', roi)
if interrupt & 0xFF == ord('2'):
cv2.imwrite(directory+'2/'+str(count['two'])+'.jpg', roi)
if interrupt & 0xFF == ord('3'):
cv2.imwrite(directory+'3/'+str(count['three'])+'.jpg', roi)
if interrupt & 0xFF == ord('4'):
cv2.imwrite(directory+'4/'+str(count['four'])+'.jpg', roi)
if interrupt & 0xFF == ord('5'):
cv2.imwrite(directory+'5/'+str(count['five'])+'.jpg', roi)
cap.release()
cv2.destroyAllWindows()
# -
|
Hand_gusture.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Construct school interaction networks for reduced class sizes
# +
import networkx as nx
import pandas as pd
from os.path import join
import os
# network construction utilities
from scseirx import construct_school_network as csn
# for progress bars
from ipywidgets import IntProgress
from IPython.display import display
import time
# -
# In this script, contact networks of "average" Austrian schools, depending on school type are created. These characteristics (mean number of classes, mean students per class) were determined from [statistics about Austrian schools](https://www.bmbwf.gv.at/Themen/schule/schulsystem/gd.html) (year 2017/18, page 10) and confirmed in interviews with a range of Austrian teachers and school directors conducted in December 2020. The school types modeled here are
# * Primary schools (Volksschule), ```primary```
# * Primary schools with daycare (Volksschule mit Ganztagesbetreuung), ```primary_dc```
# * Lower secondary schools (Unterstufe), ```lower_secondary```
# * Lower secondary schools with daycare (Unterstufe mit Ganztagesbetreuung), ```lower_secondary_dc```
# * Upper secondary schools (Oberstufe), ```upper_secondary```
# * Secondary schools (Gymnasium), ```secondary```
# * Secondary schools with daycare (Gymnasium mit Ganztagesbetreuung), ```secondary_dc```
#
# For every school type, one network is created.
#
# **NOTE**: A more detailed description about the design decisions entering the modeling of each school type can be found in the document ```school_type_documentation```. In the following, "students" always refers to the number of students per class.
# ## Background information
# ### School characteristics
# Descriptive school statistics are taken from [statistics](https://www.bmbwf.gv.at/Themen/schule/schulsystem/gd.html) about Austrian schools from 2017/18 and from a series of stakeholder-interviews with Austrian teachers and school directors conducted in December 2020.
# different age structures in Austrian school types
age_brackets = {'primary':[6, 7, 8, 9],
'primary_dc':[6, 7, 8, 9],
'lower_secondary':[10, 11, 12, 13],
'lower_secondary_dc':[10, 11, 12, 13],
'upper_secondary':[14, 15, 16, 17],
'secondary':[10, 11, 12, 13, 14, 15, 16, 17],
}
# average number of classes per school type and students per class
school_characteristics = {
# Primary schools
# Volksschule: schools 3033, classes: 18245, students: 339382
'primary': {'classes':8, 'students':19},
'primary_dc': {'classes':8, 'students':19},
# Lower secondary schools
# Hauptschule: schools 47, classes 104, students: 1993
# Mittelschule: schools 1131, classes: 10354, students: 205905
# Sonderschule: schools 292, classes: 1626, students: 14815
# Total: schools: 1470, classes: 12084, students: 222713
'lower_secondary': {'classes':8, 'students':18},
'lower_secondary_dc': {'classes':8, 'students':18},
# Upper secondary schools
# Oberstufenrealgymnasium: schools 114, classes 1183, students: 26211
# BMHS: schools 734, classes 8042, students 187592
# Total: schools: 848, classes 9225, students: 213803
'upper_secondary': {'classes':10, 'students':23}, # rounded down from 10.8 classes
# Secondary schools
# AHS Langform: schools 281, classes 7610, students 179633
'secondary': {'classes':28, 'students':24}, # rounded up from 27.1 classes
}
# ### Characteristics of Austrian families
# Family sizes with children < 18 years old from the [Austrian microcensus 2019](https://www.statistik.at/web_de/statistiken/menschen_und_gesellschaft/bevoelkerung/haushalte_familien_lebensformen/familien/index.html) (Note: 63.45 % of all households have no children), file ```familien_nach_familientyp_und_zahl_der_kinder_ausgewaehlter_altersgruppen_```:
#
# * 1 child: 48.15 % (81.95 % two parents, 18.05 % single parents)
# * 2 children: 38.12 % (89.70 % two parents, 10.30% single parents)
# * 3 children: 10.69 % (88.26 % two parents, 11.74 % single parents)
# * 4 or more children: 3.04 % (87.44 % two parents, 12.56 % single parents)
# +
# given the precondition that the family has at least one child, how many
# children does the family have?
p_children = {1:0.4815, 2:0.3812, 3:0.1069, 4:0.0304}
# probability of being a single parent, depending on the number of children
p_parents = {1:{1:0.1805, 2:0.8195},
2:{1:0.1030, 2:0.8970},
3:{1:0.1174, 2:0.8826},
4:{1:0.1256, 2:0.8744}
}
# -
# General household sizes of households with one family (2.51% of households have more than one family) [Austrain household statistics 2019](https://www.statistik.at/web_de/statistiken/menschen_und_gesellschaft/bevoelkerung/haushalte_familien_lebensformen/haushalte/index.html), files
# * ```ergebnisse_im_ueberblick_privathaushalte_1985_-_2019```
# * ```familien_nach_familientyp_und_zahl_der_kinder_ausgewaehlter_altersgruppen_```
#
# Percentages:
# * single $\frac{(3950 - 2388)}{3959}$ = 39.54 %
# * couple, no kids $\frac{1001}{3959}$ = 25.28 %
# * single parent with one kid < 18: $\frac{277}{3950} \cdot \frac{87.0}{137.4}$ = 4.44 %
# * single parent with two kids < 18: $\frac{277}{3950} \cdot \frac{37.3}{137.4}$ = 1.9%
# * single parent with three or more kids < 18: $\frac{277}{3950} \cdot \frac{13.1}{137.4}$ = 0.67%
# * couples with one kid < 18: $\frac{1050}{3950} \cdot \frac{252.4}{606.7}$ = 11.06 %
# * couples with two kids < 18: $\frac{1050}{3950} \cdot \frac{255.5}{606.7}$ = 11.19 %
# * couples with three or more kids <18: $\frac{1050}{3950} \cdot \frac{98.9}{606.7}$ = 4.33 %
# * households with three adults (statistic: household with kids > 18 years): 1.59 %
# probability of a household having a certain size, independent of having a child
teacher_p_adults = {1:0.4655, 2:0.5186, 3:0.0159}
teacher_p_children = {1:{0:0.8495, 1:0.0953, 2:0.0408, 3:0.0144},
2:{0:0.4874, 1:0.2133, 2:0.2158, 3:0.0835},
3:{0:1, 1:0, 2:0, 3:0}}
# ### Link type <-> contact type mapping
# The simulation relies on specified contact strengths (close, intermediate, far, very far) to determine infection risk. Nevertheless, depending on the setting, there are a multitude of different contacts (link types) between different agent groups and during different activities. The below dictionary provides a complete list of all link types that exist in the school setting, and a mapping of every link type to the corresponding contact type.
# +
contact_map = {
'student_household':'close',
'student_student_intra_class':'far',
'student_student_table_neighbour':'intermediate',
'student_student_daycare':'far',
'teacher_household':'close',
'teacher_teacher_short':'far',
'teacher_teacher_long':'intermediate',
'teacher_teacher_team_teaching':'intermediate',
'teacher_teacher_daycare_supervision':'far',
'teaching_teacher_student':'far',
'daycare_supervision_teacher_student':'intermediate'
}
# Note: student_student_daycare overwrites student_student_intra_class and
# student_student_table_neighbour
# Note: teacher_teacher_daycare_supervision and teacher_teacher_team_teaching
# overwrite teacher_teacher_short and teacher_teacher_long
# -
# ### Teacher social contacts
# Network density scores from an [article about interactions between teachers](https://academic.oup.com/her/article/23/1/62/834723?login=true) for "socialize with outside of school" (```r_friend```) and "engage in conversation regularly" (```r_conversation```).
r_teacher_friend = 0.059
r_teacher_conversation = 0.255
# ## Compose representative schools with reduced class sizes
# +
dst = '../../data/contact_networks/reduced_class_size'
# in principle there is functionality in place to generate contacts
# between students in different classes, depending on the floor the
# classes are on. We currently don't use this functionality, as
# schools all implement measures to keep between-class-contacts to
# a minimum- Therefore floor specifications are not important for our
# school layout and we just assume that all classes are on the same
# floor.
N_floors = 1
school_types = ['primary', 'primary_dc', 'lower_secondary','lower_secondary_dc',
'upper_secondary', 'secondary']
for school_type in school_types:
print(school_type)
school_dst = join(dst, school_type)
try:
os.mkdir(school_dst)
except FileExistsError:
pass
for class_size_reduction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
print(class_size_reduction)
N_classes = school_characteristics[school_type]['classes']
class_size = school_characteristics[school_type]['students']
school_name = '{}_classes-{}_students-{}'.format(school_type,\
N_classes, class_size)
# generate the contact graph given all the information about the
# school layout, household characteristics and contact character-
# istics of teachers
G, teacher_schedule, student_schedule = csn.compose_school_graph(\
school_type, N_classes, class_size, N_floors, p_children,
p_parents, teacher_p_adults, teacher_p_children,
r_teacher_conversation, r_teacher_friend)
# map the link types to contact types
csn.map_contacts(G, contact_map)
# for the interactive visualization, we also need a list of all
# agents (nodes) in the contact graph and their attributes
node_list = csn.get_node_list(G)
node_list.to_csv(join(school_dst,'{}_node_list.csv'.format(school_name)),
index=False)
# remove a random number of students from the classes every day
csn.reduce_class_size(class_size_reduction, class_size, N_classes,
G, student_schedule)
# save the graph
nx.readwrite.gpickle.write_gpickle(G, \
join(school_dst,'{}_removed-{}_network.bz2'\
.format(school_name, class_size_reduction)), protocol=4)
# for the interactive visualization, we also need the respective
# schedules of students and teachers for teaching days (i.e. non-
# weekends)
for schedule, agent_type in zip([teacher_schedule, student_schedule],
['teachers', 'students']):
schedule.to_csv(join(school_dst,'{}_schedule_removed-{}_{}.csv'
.format(school_name, class_size_reduction, agent_type)))
|
code/sensitivity_analysis/construct_school_networks_reduced_class_sizes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GB9hB7hZjNFM"
# ###**Project: evaluating salary of developers over time**
#
# + [markdown] id="3lxXlP9CjVLn"
# ##**Business understanding**
#
# **My goal in this notebook is to evaluate how developer's salaries and other characteristics have changed over time, according to Stackoverflow’s Annual Developer Survey of 2015 and 2020.**
#
# - Average salary 2015 x 2020
# - Profile and salary by Education
# - Profile and salary by age
# - Profile and salary by years coding
#
#
# + [markdown] id="3ivbmWQMdGWi"
# **Set up Google directory**
# + colab={"base_uri": "https://localhost:8080/"} id="Kz7c6afsdIrs" outputId="1a578f3b-ce94-4209-fb63-09d952b2d194"
from google.colab import drive
drive.mount('/content/drive')
# + id="_enjCJx1dRr3"
import os
workdir_path = '/content/drive/My Drive/Project1' # Inserir o local da pasta onde estão os arquivos de entrada (treino e teste)
os.chdir(workdir_path)
# + [markdown] id="XnQB080BdbNL"
# **Import necessary libraries**
#
#
#
#
#
#
#
# + id="f0i32NyQdb_g"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# %matplotlib inline
# + [markdown] id="xGS4st5OmSHQ"
# ###**Data Understanding**
# + colab={"base_uri": "https://localhost:8080/", "height": 632} id="-9nD6L9vdgmt" outputId="99cb2ec4-2007-41c0-b416-0d68c9a4e8a7"
#Load 2020 file and a quick look
df = pd.read_csv('survey_results_public.csv')
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="98PPTxBmdgxJ" outputId="9bef6679-aa9f-4585-cc6a-631bdedde689"
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 797} id="tAqAOca8dg0P" outputId="a418ecbd-357a-48b4-bd53-b97e47db38f8"
#Load 2015 file and a quick look
df2 = pd.read_csv('2015 Stack Overflow Developer Survey Responses.csv', header=1)
df2.head()
# + colab={"base_uri": "https://localhost:8080/"} id="DCyrzvOPdg2r" outputId="18c3226a-ea73-4eab-c913-553b1e019181"
df2.shape
# + [markdown] id="F7VB-2lMwiTu"
# The files includes survey of 2015 and 2020 related to some developers questions. The focus of this analysis is: average salary, salary and Education, salary and age and salary and years coding.
#
# We have all of this information in our dataset, we just need to clean and prepare our data.
# + [markdown] id="vlnf_4WXds8o"
# ###**Data preparation**
# + [markdown] id="1vRWqh1od2wj"
# **First, df (2020 survey)**
# + colab={"base_uri": "https://localhost:8080/"} id="oyyJKGNndg6O" outputId="5f51cb6e-32b4-4326-8ffc-dcd1c5c68d8e"
#Quick look at the columns
df.columns
# + colab={"base_uri": "https://localhost:8080/"} id="VqQ5F7v2dg9I" outputId="c867ca11-a60d-418c-e7d6-81912f2a58a2"
#Select only the important columns to our analysis and check null values
new_df = df[['Age', 'ConvertedComp', 'YearsCodePro', 'Country', 'EdLevel']]
new_df.isnull().mean()
# + [markdown] id="pZAYIHMSepMQ"
# We can see that we have more missing values in the most important column to our analysis, ConvertedComp. This way, it doesn't seem to be a good idea fill in almost 50% of our values with the mean or other statistic about the data - it can bias our results. The chosen approach is to drop missing values and keep only the original ones.
# + colab={"base_uri": "https://localhost:8080/"} id="0Apqnn_kdg_p" outputId="7d7c6d5a-384e-455e-f932-b54c15543093"
#Drop missing values by ConvertedComp
new_df.dropna(subset=['ConvertedComp'],inplace=True)
new_df.isnull().mean()
# + [markdown] id="RwYPQUv4ej-K"
# Looking into Age
# + colab={"base_uri": "https://localhost:8080/"} id="y2m_Y047dhCA" outputId="213db8f5-cb08-41b0-9456-141ebc0e76b4"
new_df["Age"].describe().apply(lambda x: format(x, 'f'))
# + [markdown] id="_BEsvkBte3nn"
# We can already see from min and max values (1 - 279) that some data are inconsistent. Anyway, lets check tukey test in this variable.
# + colab={"base_uri": "https://localhost:8080/"} id="ix2Sow28fBtl" outputId="21a9a9db-6946-49f9-93d9-ce8471927239"
#Tukey test on Age data to check min and max values to identify outliers
Q1 = new_df['Age'].quantile(0.25)
Q3 = new_df['Age'].quantile(0.75)
IQR = Q3 - Q1
Max = Q3 + 1.5*IQR
Min = Q1 - 1.5*IQR
print('Min value {} , Max value {}'.format(Min,Max))
# + colab={"base_uri": "https://localhost:8080/"} id="CZpBJHkcfIQ_" outputId="83669a81-4e55-4d5c-84a1-da7be0f7115c"
new_df[new_df['Age'] > 51]['Age'].count() / new_df['Age'].shape[0]
# + colab={"base_uri": "https://localhost:8080/"} id="UuYT4ihdfLW-" outputId="e19e48c7-f11d-4543-c99d-37e6d0e83799"
new_df[new_df['Age'] > 60]['Age'].count() / new_df['Age'].shape[0]
# + [markdown] id="KJWNhSLm0CNO"
# Since we still have a substantial percentage of data between 51 and 60, let's keep the threshold to 60.
# + colab={"base_uri": "https://localhost:8080/"} id="QqeGrhwbzoA_" outputId="5de2346e-1476-4094-915d-60484da09465"
new_df[new_df['Age'] < 11]['Age'].count() / new_df['Age'].shape[0]
# + colab={"base_uri": "https://localhost:8080/"} id="Ta4u1HYVzoG6" outputId="e160cbd3-c5e2-40ab-b622-0075382e0586"
new_df[new_df['Age'] < 15]['Age'].count() / new_df['Age'].shape[0]
# + [markdown] id="pFZnP2TVfQoU"
# For the minimum value, even if we change from 11 to 15 we still have less than 1% of data. Considering that the more older more plausible is the age to start working, we are going to consider more than 15 as minimum age.
#
# + colab={"base_uri": "https://localhost:8080/"} id="7hhv4PdnfL3O" outputId="3fd96bd6-c5e0-4b34-8e42-57ffa8436344"
new_df = new_df.query('Age < 60 & Age > 15')
new_df.Age.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="g4q_8AU62YB9" outputId="9b714554-a620-4fe3-a512-b3b234f85ad2"
new_df.isnull().mean()
# + [markdown] id="FKURE3mf3OOV"
# Ater the modifications above we don't have missing data on Age anymore.
# + [markdown] id="_UZLLX4IfYNg"
# Looking at ConvertedComp - compensation in USD
# + colab={"base_uri": "https://localhost:8080/"} id="mOfyfEuvfZdh" outputId="c4ce2593-9976-4d34-a26b-1bb5b9eda878"
new_df["ConvertedComp"].describe().apply(lambda x: format(x, 'f'))
# + colab={"base_uri": "https://localhost:8080/"} id="KhhqD8E-ffPG" outputId="4e18f014-3f23-45eb-aa57-0195aa6996b8"
#Tukey rule for Outliers
Q1 = new_df['ConvertedComp'].quantile(0.25)
Q3 = new_df['ConvertedComp'].quantile(0.75)
IQR = Q3 -Q1
Max = Q3 + 1.5*IQR
Min = Q1 - 1.5*IQR
print('Min value {} , Max value {}'.format(Min,Max))
# + colab={"base_uri": "https://localhost:8080/"} id="1ym90Qs_hPLd" outputId="ca8e4446-f35a-43e8-afab-bc89ac1c4d44"
#Quantity of values above 200000 (changed from 197431.5 to 200000)
new_df[new_df['ConvertedComp'] > 200000]['ConvertedComp'].count()
# + colab={"base_uri": "https://localhost:8080/"} id="orILgYa6hSPu" outputId="c2933913-59c6-47d3-8afc-5418b42b61f1"
#Percentage of values above 350000
new_df[new_df['ConvertedComp'] > 350000].shape[0] / new_df['ConvertedComp'].shape[0]
# + [markdown] id="JyjSR5HohXg4"
# In order to keep our data of ConvertedComp closer to the range of tukey rule we are going to set out threshold to 350.000 (losing about 4% of that column)
# + colab={"base_uri": "https://localhost:8080/"} id="aJYyiJYYhYiQ" outputId="7b56c21b-190c-4691-8dd5-b987667511f6"
new_df = new_df.query('ConvertedComp < 350000')
new_df.ConvertedComp.describe()
# + [markdown] id="iU-12oRchiXm"
# Looking at YearsCodePro (years coding professionally)
# + colab={"base_uri": "https://localhost:8080/"} id="ABDnCws4hcXW" outputId="e0b8e3ee-eb37-45ae-8da8-d27f1e8b33f6"
new_df['YearsCodePro'].value_counts()
# + [markdown] id="JGvQ0OfrhvRc"
# To keep only numerical values, lets transform "less than 1 year" to 1 year and
# "more than 50 years" to 50
# + colab={"base_uri": "https://localhost:8080/"} id="ZvyRrV0FhlDY" outputId="b1889cb6-0242-4668-9866-afed3c3a1449"
new_df['YearsCodePro'][new_df['YearsCodePro'] == 'Less than 1 year'] = 1
new_df['YearsCodePro'][new_df['YearsCodePro'] == 'More than 50 years'] = 50
new_df['YearsCodePro'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="-JDV1U9zszDY" outputId="59195a00-49b9-4787-e8a9-d1592b81ddd2"
new_df['YearsCodePro'].isnull().mean()
# + id="FMFRskwfiyu-"
#Since we have few percentage of missing and it won't influence our analysis, we'll drop them
new_df.dropna(subset=['YearsCodePro'], inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="yy_4GAfJi6uh" outputId="42279479-7628-4c8b-e15b-9e39261dcffd"
#Changing the column to int
new_df['YearsCodePro'] = new_df['YearsCodePro'].astype('int')
new_df['YearsCodePro'].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="KF2QCbRhi7Nm" outputId="a0632ab0-4fb2-4dd7-a3dc-fc99fe549279"
new_df['YearsCodePro'].plot(kind ='box')
# + colab={"base_uri": "https://localhost:8080/"} id="y30hR44YjAID" outputId="327940b0-7de4-40db-ee3b-f0fc872a24b6"
#Tukey rule for outliers
Q1 = new_df['YearsCodePro'].quantile(0.25)
Q3 = new_df['YearsCodePro'].quantile(0.75)
IQR = Q3 -Q1
Max = Q3 + 1.5*IQR
Min = Q1 - 1.5*IQR
print('Min value {} , Max value {}'.format(Min,Max))
# + colab={"base_uri": "https://localhost:8080/"} id="63v-lKuQjLx7" outputId="d8438141-4f9d-4518-ee59-d21195918615"
np.sort(new_df[new_df['YearsCodePro'] > 23]['YearsCodePro'].unique())
# + [markdown] id="9Tht078ojQ8D"
# After checking the above-listed values, we found a problem in YearsCodePro = 47, the Age is only 2 years higher than 47
# + colab={"base_uri": "https://localhost:8080/", "height": 80} id="fSM86fokjMZc" outputId="4b770acf-152f-482c-f49a-9f0c1c88a38a"
new_df[new_df['YearsCodePro'] == 47]
# + colab={"base_uri": "https://localhost:8080/"} id="oeTHRAD7jUJh" outputId="18d5de80-0867-4ddd-e592-57e2b2ca7b16"
#dropp 47
new_df = new_df[new_df.YearsCodePro != 47]
new_df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="24ae6EJ-jXJQ" outputId="d5c74194-d101-4ef0-efea-4f3063f9769a"
#Numerical Data from df (2020 survey)
new_df.describe()
# + [markdown] id="jHcwG7XjjhJw"
# Looking into "Country" data
# + colab={"base_uri": "https://localhost:8080/"} id="6Zvv5LUDjgTP" outputId="11b30635-99cb-457b-9d27-f13b88dbca30"
new_df.Country.value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="iduvPTNWjjfX" outputId="f057a3fe-10d2-4d3c-f2c7-9c2d5de997ae"
list_countries = list(new_df.Country.value_counts().loc[lambda x : x > 100].to_frame().index)
(new_df.shape[0] - new_df[new_df['Country'].isin(list_countries)].shape[0]) / new_df.shape[0]
# + [markdown] id="jCO-SfgejpuE"
# We are going to discard countries with less than 100 total of population, since it can bias a future model (few samplings to train)
# + id="de9g8m43jm1a"
new_df = new_df[new_df['Country'].isin(list_countries)]
# + [markdown] id="5vxiDXjejvsF"
# Looking into EdLevel (Education data)
# + colab={"base_uri": "https://localhost:8080/"} id="BHIWBiHJjtEz" outputId="493569a2-ca36-4bf0-a208-b0c8c4b1c564"
new_df.isnull().mean()
# + [markdown] id="xh8Su6WL4nBu"
# Since we have less than 2% of missing values in Ed. Level, we are going to drop them (it is a very small percentage and won't affect our analysis).
#
#
#
# + id="TCau9rmYj4wD"
#Dropping remaining missing values from new_df
new_df.dropna(subset=['EdLevel'], inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="vLV_zjpzkAmp" outputId="b42012c7-6f86-43f1-b69e-3e5239187878"
new_df['EdLevel'].value_counts(normalize=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 706} id="I1tlCDvdkKV0" outputId="b4f7aef5-0aad-49d5-9243-a2cfa6713983"
new_df['EdLevel'].value_counts(normalize=True).plot(kind='bar')
# + [markdown] id="CVGBXhAlj2GZ"
# **Clean df2 (2015 survey)**
# + colab={"base_uri": "https://localhost:8080/", "height": 763} id="Sgslp5w1kVU5" outputId="92947b0d-bce7-44f3-e714-7386ab38bde2"
df2.head()
# + colab={"base_uri": "https://localhost:8080/"} id="mkn7o53mkXFd" outputId="fcfe4612-47f4-416d-f136-1f52230e517e"
df2.shape
# + [markdown] id="plaIdcbDC-a3"
# Since we have too many columns, let's check columns with values missing in df2 (dataframe 2015) > 65%
# + colab={"base_uri": "https://localhost:8080/"} id="WAt_ysCpC_sT" outputId="5e6bdf8c-51c9-4fa6-c50a-fa23bb0f6bf8"
#columns with values missing in df2 (dataframe 2015) > 65%
missing_df2 = list(df2.columns[df2.isnull().mean() > 0.65])
missing_df2
# + colab={"base_uri": "https://localhost:8080/"} id="xWBGNikADG3d" outputId="62db1bfc-5ecb-4d6a-eb36-809b3bfb92fe"
#Drop the columns with more than 65% of missing values in df2 (dataframe 2015)
new_df2 = df2.drop(missing_df2, axis=1)
new_df2.shape
# + colab={"base_uri": "https://localhost:8080/"} id="kBK5IPFyDJ_9" outputId="f309ddac-6144-4122-fc5a-8f44ceee7ae8"
#Select only the important columns to our analysis
new_df2 = new_df2[['Country', 'Age','Years IT / Programming Experience', 'Compensation: midpoint']]
new_df2.shape
# + [markdown] id="1ZVszUVMDVEp"
# From the original DataFrame we can see the Education information in the columns named as Training & Education: (as prefix). Then, we are going to separate these columns in another DataFrame
# + colab={"base_uri": "https://localhost:8080/"} id="SJm2KZXHDSST" outputId="e1b25e13-6ea2-4046-9a4d-9b581cbfd833"
educ_list = list(df2.columns[df2.columns.str.contains('Training & Education:')])
educ_list
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="vzyyUt2GDXYP" outputId="a0671849-57bb-4738-d315-c574944804fa"
#Concatenating both Dataframes
educ_df2 = df2[educ_list]
new_df2 = pd.concat([new_df2, educ_df2], axis=1)
new_df2.head()
# + colab={"base_uri": "https://localhost:8080/"} id="xfZ7ERSlDgBb" outputId="4f7760d0-1e67-4736-fb78-f0bf7ffa3aae"
#Checking missing values from new_df2 dataframe
new_df2.isnull().mean()
# + [markdown] id="3w_5mIsGDq1E"
# We can see that we have about 25% of missing values in the most important column to our analysis, Compensation: Midpoint. This way, it doesn't seem to be a good idea fill in 25% of our values with the mean or other statistic about the data - it can bias our results. The chosen approach is to drop missing values and keep only the original ones, as we did in df related to 2020 survey
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="e1rohCa_DnHQ" outputId="950aa831-09e1-48e0-ccef-d215cb482471"
new_df2.dropna(subset=['Compensation: midpoint'], inplace=True)
new_df2.isnull().sum()
# + [markdown] id="qDYMQ7vUDxrE"
# We still have too many missing values, mainly in Training & Educations columns. This is due to the method used to fill in these columns, which are only filled if the person is into that category, and the remaining columns from training and education can be all missing
# + colab={"base_uri": "https://localhost:8080/"} id="-MR3fwUhDt66" outputId="c5168071-4bce-42f7-c7d9-a0a5226de4f8"
new_df2.dropna(subset=['Country', 'Age', 'Years IT / Programming Experience'], inplace=True)
new_df2.isnull().sum()
# + [markdown] id="kY1ju5TkEI0P"
# Looking into Age
# + colab={"base_uri": "https://localhost:8080/"} id="3PCEBwB6D_Yg" outputId="44c52095-ad27-46e7-ce37-5c2302eaf5c7"
new_df2.Age.value_counts(normalize=True, dropna=True)
# + [markdown] id="2rmsV2K7EUQh"
# Looking into Years IT / Programming Experience
# + colab={"base_uri": "https://localhost:8080/"} id="BqPkhwBUELUe" outputId="bedd82b1-c442-4cdc-ba7d-6c56641a4633"
new_df2['Years IT / Programming Experience'].value_counts(normalize=True, dropna=True)
# + [markdown] id="7F-Z-0VlEj-D"
# Looking into Compensation (MidPoint)
# + colab={"base_uri": "https://localhost:8080/"} id="pO_NNOuwEgUt" outputId="cd830cf3-b38a-4735-e612-0eb3d3870983"
new_df2['Compensation: midpoint'].value_counts(normalize=True, dropna=True)
# + [markdown] id="OyYuBrU4EqIQ"
# We can delete "rather not say" data since it does not mean anything to our analysis
#
# We will change Unemployed to 0
# + id="lQh6HrqYEnW5"
new_df2 = new_df2[new_df2['Compensation: midpoint'] != 'Rather not say']
new_df2['Compensation: midpoint'][new_df2['Compensation: midpoint'] == 'Unemployed'] = 0
new_df2['Compensation: midpoint'] = new_df2['Compensation: midpoint'].astype('int')
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="xWSbo_yzEtmE" outputId="1a8e497c-da95-47ea-c451-2ac7c99aafa4"
#overview of Compensation Midpoint (but zero values)
new_df2[new_df2['Compensation: midpoint'] != 0].describe()
# + [markdown] id="Me0LURDaE0Bt"
# Looking into Country
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="hs1_NEr0EwqU" outputId="bf665000-e3e9-4361-f614-a6926db2d6a1"
#as did in the first dataframe, let's keep countries with more than 100 samplings
new_df2.Country.value_counts().loc[lambda x : x > 100].to_frame()
# + id="jjT0vOGFE15d"
list_countries2 = list(new_df2.Country.value_counts().loc[lambda x : x > 100].to_frame().index)
new_df2 = new_df2[new_df2['Country'].isin(list_countries2)]
# + [markdown] id="xUM2MCzjFBpU"
# Looking into Training and Education variables
# + id="hhDv4zLMFA-C"
#Aggregating the data related to T&E
Total_educ = {}
for col in educ_list:
col_total = new_df2[col].notnull().sum()
Total_educ[col] = col_total
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="cuq0xHHRE-qT" outputId="d1e69940-b787-4938-e0ce-b451b5cba512"
Count_educ_2015 = pd.DataFrame(list(Total_educ.items()),columns = ['Education','Quantity'])
Count_educ_2015.sort_values(by='Quantity', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="M2iLRzTTFGPc" outputId="c1ca2661-5631-47fb-f6d3-78dec28c9b14"
Count_educ_2015.set_index('Education', inplace=True)
Count_educ_2015.sort_values(by='Quantity', ascending=False).plot(kind='bar')
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="8R2BdoIhFjq0" outputId="a45ed692-6ea9-4407-f786-ce6dc3684f07"
normalized_educ = Count_educ_2015 / Count_educ_2015.Quantity.sum()
normalized_educ.sort_values(by='Quantity', ascending=False).plot(kind='bar')
# + [markdown] id="wZ5qagasFqjL"
# ##**Data analysis and project motivation answers - 2015 x 2020**
# + [markdown] id="kZvL3x0AF1Ob"
# **1 - Average Salary 2015 x 2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="8eoa5fIWFnbY" outputId="751e8c90-52b3-4658-c4fd-2cb621416e2d"
#Average salary 2015
new_df2['Compensation: midpoint'].describe().to_frame()
# + colab={"base_uri": "https://localhost:8080/"} id="LMwMf9taF4Li" outputId="50fd5560-f700-47d5-e64a-10dad179a583"
avg_sal_2015 = new_df2['Compensation: midpoint'].describe().to_frame().loc['mean'][0]
print("{:.2f}".format(avg_sal_2015))
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ut6S6-ztGFrO" outputId="9635a065-9af9-4950-850b-696b1fcd61f6"
#Average salary 2020
new_df['ConvertedComp'].describe().to_frame()
# + colab={"base_uri": "https://localhost:8080/"} id="1JBgPCGRF-R6" outputId="c0f6efe0-8158-4332-b7d7-b95a91a8a60b"
avg_sal_2020 = new_df['ConvertedComp'].describe().to_frame().loc['mean'][0]
print("{:.2f}".format(avg_sal_2020))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="GghflMSGGTtk" outputId="5fd9595b-43ea-4ea2-e723-809f391b58ff"
Dif_avg_sal = avg_sal_2020 - avg_sal_2015
"The difference of average salary between 2015 and 2020 is: {:.2f}".format(Dif_avg_sal)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="kh7w8VbDGlSV" outputId="db5278a1-4d1d-427f-886a-70cfa0987c3c"
"The salaries in 2020 are {:.2f} % higher in average.".format(Dif_avg_sal / avg_sal_2015 * 100)
# + [markdown] id="2LDjJrIGnnsa"
# We can clearly see that salaries are growing over time.
# + [markdown] id="gcK5kvCMLqGY"
# **2 - Education profile 2015 x 2020**
# + [markdown] id="WowVa7KjLwDk"
# **2015**
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="TS1g7_dqLk6P" outputId="613f22ae-c52e-4867-859a-d766ca1b6c58"
normalized_educ = Count_educ_2015 / Count_educ_2015.Quantity.sum()
normalized_educ.sort_values(by='Quantity', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="nR6-TaiLLy6t" outputId="1dc08abf-c0d5-46a5-fc82-7087d89b781b"
normalized_educ.sort_values(by='Quantity', ascending=False).plot(kind='bar')
# + [markdown] id="7n6vik1YMNOK"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="j5NJNH-zL5Zd" outputId="1b17732f-b3cd-4b22-cec1-e32274aeb62e"
new_df['EdLevel'].value_counts(normalize=True).to_frame()
# + colab={"base_uri": "https://localhost:8080/", "height": 706} id="hJ1Ip-2oMRHC" outputId="31f0c27c-0b93-421d-d8c5-ad4dc875e19a"
new_df['EdLevel'].value_counts(normalize=True).plot(kind='bar')
# + [markdown] id="0N99TiD1MisC"
# Analyzing the education profile, we can clearly see that formal education is much more present in 2020 than in 2015 - no formal education is in the top of the ranking in 2015.
#
# Adding only BS and MS from 2020 dataset, we have almost 75% of our sampling, whereas we have only 40% (adding college that is not in CS + BS in CS + MS) in 2015.
# + [markdown] id="qZNtXEVVNjh7"
# **2.1 - Salary by profile Education**
# + [markdown] id="_Wa_kojLNnEF"
# **2015**
# + colab={"base_uri": "https://localhost:8080/"} id="p_vjsbUAMW1d" outputId="00608043-9025-414c-98c4-9b53f7252b13"
educ_2015_list = list(normalized_educ.index)
educ_2015_list
# + id="t6wmAupuNp6U"
#Salary by Educ 2015
avg_sal_educ_2015 = {}
for col in educ_2015_list:
col_total = list(new_df2.groupby([col])['Compensation: midpoint'].mean())[0]
avg_sal_educ_2015[col] = col_total
# + colab={"base_uri": "https://localhost:8080/"} id="lQ_57ISvNr6a" outputId="9b2435b5-688d-45a2-f876-5210286fe3ce"
avg_sal_educ_2015
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="Bboaet3kNtk7" outputId="8c7ac647-7ee9-4793-f8f2-1956727fa767"
avg_sal_educ_2015_df = pd.DataFrame(list(avg_sal_educ_2015.items()),columns = ['Education','Avg Salary'])
avg_sal_educ_2015_df.sort_values(by='Avg Salary', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 572} id="jG8saosQN86i" outputId="528fdcc8-b72b-4b6c-a11a-8ba50689aa5a"
avg_sal_educ_2015_df.set_index('Education').sort_values(by='Avg Salary', ascending=False).plot(kind='bar', title='Avg salary by education 2015')
# + [markdown] id="vFZMWI-dN2D5"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="J_twISZFNwFN" outputId="b7018e18-a19c-4f43-c1a3-2c97d0dd9d7b"
new_df.groupby('EdLevel')['ConvertedComp'].mean().to_frame().sort_values(by='ConvertedComp', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 736} id="fucN4v1BN6gx" outputId="0a41e293-df85-434e-feae-28cff9be1995"
new_df.groupby('EdLevel')['ConvertedComp'].mean().to_frame().sort_values(by='ConvertedComp', ascending=False).plot(kind='bar', title='Avg salary by education 2020')
# + [markdown] id="-DVQl1i4OpnR"
# Since the average salary is higher in 2020, it is also reflected in the average salary by profile education. People with formal education earns higher salaries in the sampling of 2020, but it is very interesting to note that in 2015 no formal education like industry certification and Boot Camps or night school, earned also elevated salaries, only below PHD developers.
# + [markdown] id="MgkrNR14O61B"
# **3 - Age Profile 2015 x 2020**
# + [markdown] id="f0I8WIa0PPR4"
# **2015**
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="st3YT-k6OecN" outputId="7bc10d35-9a43-4b08-92e0-3516ddc591cb"
new_df2.Age.value_counts(normalize=True, dropna=True).to_frame()
# + [markdown] id="bTMoRIB_PW-V"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="uyxhLuW8PTc_" outputId="1f8843b2-97fa-45d0-ef95-29d8cf948e79"
label_ranges = [0, 19, 24, 29, 34, 39, 50, 60, np.inf]
label_names = ['<20', '20-24', '25-29', '30-34', '35-39','40-50', '51-60', '>60']
new_df['Age group'] = pd.cut(new_df['Age'], bins = label_ranges, labels = label_names)
new_df['Age group'].value_counts(normalize=True, dropna=True).to_frame()
# + [markdown] id="xvgE9jNXPhJv"
# When it comes to compare the Age profile between 2015 and 2020, overall there is not that much difference, but we can note that it is getting a bit older in 2020.
# + [markdown] id="_Uduh9x6QUcl"
# **3.1 - Salary by Age**
# + [markdown] id="YjAaVXChQiDt"
# **2015**
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="nybSZP79Pckp" outputId="9d6a00be-3099-4346-f430-0a7a869bcdb2"
new_df2.groupby('Age')['Compensation: midpoint'].mean().to_frame().sort_values(by='Compensation: midpoint', ascending=False)
# + [markdown] id="dp55SYzGQobc"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="9YDdS5BmQg30" outputId="c6497e5d-a6cb-4d17-ac95-1b81068b84cc"
new_df.groupby('Age group')['ConvertedComp'].mean().to_frame().sort_values(by='ConvertedComp', ascending=False)
# + [markdown] id="grnst0KqQwJN"
# When it comes to compare the range that can be found in both samplings, we note that the ranking of average salary by age does not change significantly.
# + [markdown] id="3d8WiIcUQ27h"
# **4 - Years coding profile 2015 x 2020**
# + [markdown] id="AKD8tBCXQ6U3"
# **2015**
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="UZI2wbIyQrWM" outputId="ff704f67-aea1-480a-fd82-ef29c76806f0"
new_df2['Years IT / Programming Experience'].value_counts(normalize=True, dropna=True).to_frame()
# + [markdown] id="lzKvWw7BRBxp"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Lp4pdyMhQ9Na" outputId="a5f27d38-82fa-4eb9-b4a0-e0c45a0597c7"
label_ranges2 = [0, 1, 2, 5, 10, np.inf]
label_names2 = ['0-1', '1-2', '2-5', '6-10', '>11']
new_df['years coding group'] = pd.cut(new_df['YearsCodePro'], bins = label_ranges2, labels = label_names2)
new_df['years coding group'].value_counts(normalize=True, dropna=True).to_frame()
# + [markdown] id="bQiMRvnnpXf8"
# Looking to the tables above, we can note that we have more experienced developers in 2020 than in 2015.
# + [markdown] id="7S0eoneVRMHb"
# **4.1 - Salary by years coding**
# + [markdown] id="2-qAYHEMRO-L"
# **2015**
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="FhOk-yiCRFID" outputId="b56b1b2b-cf66-4b85-b086-549b72a43cc9"
new_df2.groupby('Years IT / Programming Experience')['Compensation: midpoint'].mean().to_frame().sort_values(by='Compensation: midpoint', ascending=False)
# + [markdown] id="dpnBTHX4VowW"
# **2020**
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="Zy7cmINsRTgn" outputId="a1e29cf0-ef53-49c2-f63c-c569a1c5c094"
new_df.groupby('years coding group')['ConvertedComp'].mean().to_frame().sort_values(by='ConvertedComp', ascending=False)
# + [markdown] id="RnPVn-UQV4_v"
# Comparing both samplings we do not see difference in the ranking, as expected, since higher is years of experience, the more we expect a higher salary. The only difference is in the salary in average for every group (higher for 2020).
# + [markdown] id="FULfCtgldFYz"
# ##**Data Modeling**
# + [markdown] id="zGWBHJPMWfYZ"
# **Using a linear regression model including only the variables we separed previously, let's check which one is the most important in terms of affecting salaries**
# + [markdown] id="RGWpOh3jXY6t"
# **2015**
# + [markdown] id="AmgXs6IVXk2-"
# Since the dataframe of 2015 survey has its numeric variables separated into groups, we are going to get their midpoint and transform them into int numbers
#
# Obs: it is important to note that we are doing this as a simple analysis, in order to create a more efficient model to predict the salaries, we'd better analyze other points
# + colab={"base_uri": "https://localhost:8080/"} id="7oiXUA-yVuS5" outputId="d07642f1-16f6-4a03-d4ad-faf8e6fb8eeb"
#Age data of 2015
new_df2['Age'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="kTFtHovGXXTf" outputId="85bcabfd-9244-4305-dd13-a9033ad778af"
#Eliminating 'prefer not to disclose'
new_df2 = new_df2[new_df2['Age'] != 'Prefer not to disclose']
new_df2['Age'].value_counts()
# + id="-FnDX82-XeVn"
#Transforming to numerical and int data
def label_age (row):
'''
INPUT:
row - row of a dataframe column "Age"
OUTPUT:
an integer - row value changed according to the input value
Ex.: If 25-29 then 27; 30-34: 32 ;
20-24: 22 ; 35-39: 37 ; 40-50: 45; 51-60: 56;
< 20: 18; > 60: 60
'''
if row['Age'] == '25-29' :
return '27'
if row['Age'] == '30-34' :
return '32'
if row['Age'] == '20-24' :
return '22'
if row['Age'] == '35-39' :
return '37'
if row['Age'] == '40-50' :
return '45'
if row['Age'] == '51-60' :
return '56'
if row['Age'] == '< 20' :
return '18'
if row['Age'] == '> 60' :
return '60'
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="BN14cM2iY8xk" outputId="154a82df-183b-4c3b-f98a-e5fcb08995fb"
new_df2['Age_midpoint'] = new_df2.apply (lambda row: label_age(row), axis=1)
new_df2.head()
# + colab={"base_uri": "https://localhost:8080/"} id="8IHJ2ejeZA1i" outputId="dccf8fb4-c587-4010-d42c-a3e0f869f17b"
#Dealing with years of experience
new_df2['Years IT / Programming Experience'].value_counts()
# + id="nrg8QvvOZE4K"
def label_yearscod (row):
'''
INPUT:
row - row of a dataframe column "Years IT / Programming Experience"
OUTPUT:
an integer - row value changed according to the input value
Ex.: If 11+ years then 11; 2 - 5 years: 4 ;
6 - 10 years: 8 ; 1 - 2 years: 2 ; Less than 1 year: 1
'''
if row['Years IT / Programming Experience'] == '11+ years' :
return '11'
if row['Years IT / Programming Experience'] == '2 - 5 years' :
return '4'
if row['Years IT / Programming Experience'] == '6 - 10 years' :
return '8'
if row['Years IT / Programming Experience'] == '1 - 2 years' :
return '2'
if row['Years IT / Programming Experience'] == 'Less than 1 year' :
return '1'
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="Sae477bBZT8p" outputId="66465605-454e-47f0-e0bf-36e3758cd578"
new_df2['yearscod_midpoint'] = new_df2.apply (lambda row: label_yearscod(row), axis=1)
new_df2.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="NaSm6dHDZVcD" outputId="128e3bd9-f822-4fa3-d72f-06eefdb4631e"
new_df2['yearscod_midpoint'] = new_df2['yearscod_midpoint'].astype('int')
new_df2['Age_midpoint'] = new_df2['Age_midpoint'].astype('int')
new_df2.describe()
# + id="FkS14Bo9Zbiv"
#Working with categorical variables
new_df2_cat = pd.get_dummies(new_df2[['Training & Education: No formal training',
'Training & Education: On the job',
'Training & Education: Boot camp or night school',
'Training & Education: Online Class',
'Training & Education: Mentorship',
'Training & Education: Industry certification',
'Training & Education: Some college, but no CS degree',
'Training & Education: BS in CS', 'Training & Education: Masters in CS',
'Training & Education: PhD in CS', 'Training & Education: Other']])
# + id="3Gak0lyJZwgN"
new_df2_cat2 = pd.get_dummies(new_df2['Country'], drop_first=True)
# + id="hQzgBLUCZw7m"
X = pd.concat([new_df2[['yearscod_midpoint', 'Age_midpoint']], new_df2_cat, new_df2_cat2], axis=1)
y = new_df2['Compensation: midpoint']
# + id="sTri_uQcZ5Bd"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
# + colab={"base_uri": "https://localhost:8080/"} id="568J5S-oZ77U" outputId="ea905910-e499-451c-9fa7-aace08c2a821"
#Apply a linear regression model
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
y_test_preds = lm_model.predict(X_test)
print(r2_score(y_test, y_test_preds))
# + id="GesMwO0QaPjG"
#Function to analyze the coefs that affects the salary the most
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
# + colab={"base_uri": "https://localhost:8080/", "height": 669} id="XvR_ZatKad_l" outputId="ad9b76f8-14f9-41f5-ae16-1ec4921d6f8b"
#Use the function
coef_df = coef_weights(lm_model.coef_, X_train)
#A quick look at the top results
coef_df.head(20)
# + [markdown] id="OwfoTgssadNF"
# **2020**
# + id="gUt_XgodatlZ"
#working with categorical
new_df_cat = pd.get_dummies(new_df[['EdLevel','Country']], drop_first=True)
# + id="W2wTu5exax-o"
X = pd.concat([new_df[['Age', 'YearsCodePro']], new_df_cat], axis=1)
y = new_df['ConvertedComp']
# + colab={"base_uri": "https://localhost:8080/"} id="rZ0sikI9az5z" outputId="286a7468-dd14-486e-d6af-e8b9793c6731"
#Apply linear regression model
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
y_test_preds = lm_model.predict(X_test)
print(r2_score(y_test, y_test_preds))
# + colab={"base_uri": "https://localhost:8080/", "height": 669} id="sNJsSI7ua7zm" outputId="bb9836da-aea6-4ee1-f6d1-35a4343815a7"
coef_df = coef_weights(lm_model.coef_, X_train)
#A quick look at the top results
coef_df.head(20)
# + [markdown] id="CjMVSyw9zDOl"
# ##**Evaluating our model**
# + [markdown] id="hICVIdU9bGUo"
# Looking into this comparison, coefficients from 2015 and 2020, we can note that Country was the most important variable to influence the salaries - **according to the variables chosen in this model.**
#
# In order to predict salaries, we built a model of 0.552 of r2 score for 2015 data, and 0.532 for 2020 data.
#
# Obs.: It is important to remember once more that our focus here is not to create the best model to predict salaries. We should analyze other factors if this was the case. For example, Age and years coding are very correlated variables, and we could eliminate one of them, but we decided to keep both since, as mentioned, the focus is not create a model to predict salaries, but rather compare 2015 data with 2020.
# + id="-5HQOrrHa8mq"
|
Project1_Thiago_Gavioli_UdacityDSV2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="a_VmAfWSBJs7"
# <img width=150 src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/NumPy_logo.svg/200px-NumPy_logo.svg.png"></img>
# + [markdown] colab_type="text" id="7WpTwrIPwPA5"
# * 教學目標:主要說明 matplotlib 的基礎操作
# 1. 使用常見的子圖與軸圖來做畫面配置
# 2. 等高線圖
# * 範例重點
# * 如何使用亂數, 資料集來操作
# + id="cu78CXS6mRpg"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
# # 【基礎19】
# 有效的視覺化可以幫助用戶分析和推理資料和證據。它使複雜的資料更容易理解、理解和使用。—— Wikipedia
# 為了要將資料視覺化所需的前置工作:
# * 制定好主題以及清楚目的
# * 釐清測量尺度
# * 準確的資料處理與探勘
# 情境:
# * 利用熱點圖可看出在Deep learning 的 model 在圖片中哪部分特徵較為重要
# * 在降為後將資料視覺化看資料空間的分布,進而決定分析的下一步該如何優化
# Python資料視覺化主要有三大套件:
# * Matplotlib
# * 是一個最基礎的Python 可視化庫
# * 作圖風格接近 MATLAB
# * 一般都是從 matplotlib 上手 Python 數據可視化,然後開始做縱向與橫向拓展
# * matplotlib 的圖像都位於 Figure 物件中
# * 套件:
# * 基礎的套件:經典圖表(直方、分箱、…)、多重子圖表(subplot)
# * 其他圖型:密度圖、等高線圖
# * Seaborn (靜態的套件)
# * 基於 matplotlib 的進階視覺化效果庫
# * 可用短小的代碼去繪製描述更多維度資料的可視化效果圖
# * 可搭配 Matplotlib 建構更直觀的視覺化效果
# * Plotly
# * 用於繪製互動式圖表的工具庫
# * 具備高互動性的圖表以及版面
# * 將複雜的圖表,用簡單的語法呈現出來
# * Bokeh (動態的套件,類似於 D3.js)
# * 交互資訊可視化的工具
# * 用於做瀏覽器端交互可視化的庫,實現分析師與數據的交互
# * 不再需要編輯 HTML 與 JavaScript 便能製作網頁前端視覺化
# * Basemap
# * 地理資訊數據
# * 傳統的 matplotlib/seaborn、互動屬性的 bokeh 可能無法很好地對這類數據進行處理
# 在 Matplotlib 有兩種主要的操作方式,分別為:
# * Matplotlib 的全域 pyplot 模組互動操作
# * 若是只有一張圖可以以下語法操作
# ```
# import matplotlib.pyplot as plt
# ```
# * 完整指令
# ```
# matplotlib.pyplot.plot(args, scalex=True, scaley=True, data=None, kwargs)
# ```
# * `args`:x、y的特徵
# * `kwargs`:指定線標籤、線寬、標記面顏色等屬性設定
# * 物件導向形式的操作方式
# * 若是有多張圖的話,透過控制 figure (*.SVG)和 axis (資料維度) 來操作。其中 figure 和全域 pyplot 部分屬性相同。例如: fig.text() 對應到 plt.fig_text()
# ## Matplotlib
# ### Plot
# * 指令
# ```
# plot(x, y, data=data, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=12)
# ```
# * 參數說明
# * x、y:特徵或數據
# * data:dataframe
# * color:標記顏色
#
# | 顏色字元 | 說明 | 顏色字元 | 說明 |
# |---------|:----:|:-------:|:----:|
# | 'b' | blue | 'm' | magenta |
# | 'g' | green | 'y' | yellow |
# | 'r' | red | 'k' | black |
# | 'c' | cyan | 'w' | white |
# | '#008000' | 'RGB色碼' | '0.8' | 灰度值字串 |
#
# * maker:標記的型態
#
# | 標記字元 | 說明 | 標記字元 | 說明 | 標記字元 | 說明 |
# |---------|:----:|:-------:|:----:|:-------:|:----:|
# | '.' | 點 | 'D' | 菱形 | 'h' | 豎六邊形 |
# | ',' | 像素(小點) | 'd' | 長菱形 | 'H' | 橫六邊形 |
# | 'o' | 實心圈 | '|' | 垂直線 | '+' | 十字 |
# | 'v' | 倒三角 | '1' | 下花三角 | 'x' | x |
# | '^' | 上三角 | '2' | 上花三角 | 's' | 實心方形 |
# | '>' | 右三角 | '3' | 左花三角 | 'p' | 實心五角 |
# | '<' | 左三角 | '4' | 右花三角 | `'*'` | 星形 |
#
# * linestyle:線條的型態
#
# | 風格字元 | 說明 | 風格字元 | 說明 |
# |---------|:----:|:-------:|:----:|
# | '-' | 實線 | ':' | 虛線 |
# | '—' | 破折線 | '"' | 無線條 |
# | '-.' | 點劃線 | | |
#
# * linewidth:線寬
# * markersize:標記的大小
# +
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.show()
# + colab={} colab_type="code" id="u_C6XjFlwPA5" outputId="6b27d09c-47b8-49e9-c412-6ce91b0b2b9a"
# 準備數據 ... 假設我要畫一個sin波 從0~180度
x = np.arange(0,180)
y = np.sin(x * np.pi / 180.0)
# 開始畫圖
# 設定要畫的的x,y數據list....
plt.plot(x,y)
# 在這個指令之前,都還在做畫圖的動作
# 這個指令算是 "秀圖"
plt.show()
# +
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.figure(figsize=(8,6),
facecolor='c')
plt.plot(x,y)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Plot with figsize (8,6)')
plt.show()
# + colab={} colab_type="code" id="HkerUPvOwPA-" outputId="22cda52b-1495-4a2f-ba5f-13ba3fe3b509"
# 準備數據 ... 假設我要畫一個sin波 從0~180度
x = np.arange(0,180)
y = np.sin(x * np.pi / 180.0)
# 開始畫圖
# 設定要畫的的x,y數據list....
plt.plot(x,y)
# 設定圖的範圍, 不設的話,系統會自行決定
plt.xlim(-30,390)
plt.ylim(-1.5,1.5)
# 照需要寫入x 軸和y軸的 label 以及title
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('The Title')
# 在這個指令之前,都還在做畫圖的動作
# 這個指令算是 "秀圖"
plt.show()
# -
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y, 'go-', label='line 1', linewidth=2)
plt.show()
x = np.arange(10)
plt.plot(x, x*1.5, 'go:'
, x, x*2.5, 'rx'
, x, x*3.5, '^'
, x, x*4.5, 'bd-.')
plt.show()
# ### Figure
# * 指令
# ```
# figure(num = None, figsize=None, dpi=None, facecolor=None, edgecolor=None, frameon=True, FigureClass=<class 'matplotlib.figure.Figure'>, clear=False, kwargs)
# ```
# * 參數說明<br>
#
# | 參數 | 型態 | 意義 | 預設值 |
# |------|:----:|:----:|:----:|
# | num | Integer(string) | 設定圖號 | 會自動給予 |
# | figsize | tuple | 設定圖形長寬(英吋) | figsize=(8,6) |
# | dpi | interger | 解析度(revolution) | dpi=800 |
# | facecolor | | 背景顏色 | |
# | edgecolor | | 邊線顏色 | |
# | frameon | Boolean/None | 設定邊框 | TRUE |
#
# ### Subplot
# * 指令
# ```
# subplot(nrows, ncols, index, kwargs)
# ```
# * 參數說明<br>
#
# | 參數 | 型態 | 意義 | 預設值 |
# |------|:----:|:----:|:----:|
# | nrows | integer/string | 設定列數 | 小於等於10欄時,使用subplot(2,2,1)/subplot(221) |
# | ncols | tuple | 設定欄數 | 同nrows |
# | nindex | integer | 子圖序號 | 同nrows |
# | facecolor | | 背景顏色 | 同colors |
# | polar | boolean/None | 極座標圖 | |
# | projection | string | 投影設定 | 同projections |
# +
fig, axes = plt.subplots(2, 2) # 建立 2*2 多維視窗
data = pd.Series(np.random.rand(5), index=list('12345'))
#ax為選擇畫圖視窗,color為顏色,alpha為透明度設定
data.plot.bar(ax=axes[0,1], color='b', alpha = 1)
data.plot.barh(ax=axes[1,1], color='b', alpha=0.5)
data.plot.bar(ax=axes[1,0], color='c', alpha = 0.8)
data.plot.barh(ax=axes[0,0], color='r', alpha=0.5)
plt.show()
# + [markdown] colab_type="text" id="3Xi7N-1vwPBA"
# ## 散點圖:Scatter Plots
# -
# * 指令
# ```
# scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=<deprecated parameter>, edgecolors=None, *, plotnonfinite=False, data=None, kwargs)
# ```
# * 參數說明<br>
#
# | 參數 | 意義 | 預設值 | 參數|
# |------|:----:|:----:|:----:|
# | x,y | 數值 | | |
# | s | 尺度 | | |
# | c | 顏色 | blue(藍色) | r:紅色;b:藍色;g:綠色 |
# | marker | 符號 | 'o' | s:方塊;^:三角形;--:虛線 |
# | cmap | 色彩 | | |
# | alpha | 透明度(0~1間) | | |
# | linewidths | 線寬 | | |
# | edgecolor | 邊緣顏色,用於空心符號| | |
#
# * 適用:呈現相關數值間的關係
# * 實際應用:性別與體重的關係
# + colab={} colab_type="code" id="8FpoOD91wPBB" outputId="2ddd92d3-0213-48f2-8773-a8a285f89cdd"
n = 1024
X = np.random.normal(0,1,n)
Y = np.random.normal(0,1,n)
plt.scatter(X,Y)
plt.title('Scatter plot')
plt.show()
# -
df = pd.DataFrame(np.random.rand(10,2), columns=['A', 'B'])
print(df)
df.plot.scatter(x='A', y='B')
X = np.random.normal(0, 1, 100)
Y = np.random.normal(0, 1, 100)
plt.scatter(X, Y, color='b', alpha=0.5, s=100, edgecolors='red')
plt.title('Scatter plot')
plt.show()
# + [markdown] colab_type="text" id="mYR7SBHAwPBD"
# ### 給定顏色與圖形形狀
#
# * 產出數值介於 0~10 的 array,並以 0.4 為間隔,畫出y=x, y=x², y=x³ 的圖表
# + colab={} colab_type="code" id="eyMCGKFHwPBE" outputId="cc94b06a-1adb-4c7f-b85c-9e7eb7978bfe"
t = np.arange(0., 10., 0.7)
print('t: '+str(t))
plt.plot(t, t, 'r--', t, t**2, 'bs', t**3, 'g^' )
plt.show()
# -
# # 【進階19】
# ## 長條圖:Bar Plots
# * 用 plt.bar 及 plt.scatter 裡面放資料
# * 適用:不同種類資料在不同時間點的變化
# * 實際應用:人口成長變化
# * 函數:.plot.bar()
# + colab={} colab_type="code" id="WiYAsSyHwPBG" outputId="de19ed0c-74b7-4a39-ec4c-43621bbdcbfb"
x = np.arange(0., 10., 0.7)
y = np.arange(0., 10., 0.7)
plt.bar(x, y)
plt.show()
# -
df = pd.DataFrame(np.random.rand(10,2), columns=['A', 'B'])
print(df)
df.plot.bar()
df.plot.bar(stacked=True)
# ## 圓餅圖:Pie Plots
# * 參數
# * index:圓餅圖的類別
# * name:圓餅圖的名稱
df = pd.Series(np.random.rand(4), index=['A','B','C','D'], name='title')
print(df)
df.plot.pie()
# ## 箱型圖
# * 適用:完整呈現數值分布的統計圖表
# * 實際應用:薪資水平
# * 函數:.boxplot()
# * 參數
# * index:x軸
df = pd.DataFrame(np.random.rand(10,2), columns=['A','B'])
print(df)
df.boxplot()
# ## 折線圖
#
# * 適用:會隨時間變動的值
# * 函數:.plot()
ts = pd.Series(np.random.randn(200), index=pd.date_range('1/1/2020', periods=200))
ts = ts.cumsum()
print(ts)
ts.plot()
#多個折線圖同時畫出
df = pd.DataFrame(np.random.randn(200, 3), index=pd.date_range('1/1/2020', periods=200), columns=["A","B","C"])
df = df.cumsum()
print(df)
df.plot()
|
Sample/Day_19_Sample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numbers and more in Python!
#
# In this lecture, we will learn about numbers in Python and how to use them.
#
# We'll learn about the following topics:
#
# 1.) Types of Numbers in Python
# 2.) Basic Arithmetic
# 3.) Differences between classic division and floor division
# 4.) Object Assignment in Python
# ## Types of numbers
#
# Python has various "types" of numbers (numeric literals). We'll mainly focus on integers and floating point numbers.
#
# Integers are just whole numbers, positive or negative. For example: 2 and -2 are examples of integers.
#
# Floating point numbers in Python are notable because they have a decimal point in them, or use an exponential (e) to define the number. For example 2.0 and -2.1 are examples of floating point numbers. 4E2 (4 times 10 to the power of 2) is also an example of a floating point number in Python.
#
# Throughout this course we will be mainly working with integers or simple float number types.
#
# Here is a table of the two main types we will spend most of our time working with some examples:
# <table>
# <tr>
# <th>Examples</th>
# <th>Number "Type"</th>
# </tr>
#
# <tr>
# <td>1,2,-5,1000</td>
# <td>Integers</td>
# </tr>
#
# <tr>
# <td>1.2,-0.5,2e2,3E2</td>
# <td>Floating-point numbers</td>
# </tr>
# </table>
#
#
# Now let's start with some basic arithmetic.
# ### Basic Arithmetic
# Addition
2+1
# Subtraction
2-1
# Multiplication
2*2
# Division
3/2
# Floor Division
7//4
# **Whoa! What just happened? Last time I checked, 7 divided by 4 equals 1.75 not 1!**
#
# The reason we get this result is because we are using "*floor*" division. The // operator (two forward slashes) truncates the decimal without rounding, and returns an integer result.
# **So what if we just want the remainder after division?**
# Modulo
7%4
# 4 goes into 7 once, with a remainder of 3. The % operator returns the remainder after division.
# ### Arithmetic continued
# Powers
2**3
# Can also do roots this way
4**0.5
# Order of Operations followed in Python
2 + 10 * 10 + 3
# Can use parentheses to specify orders
(2+10) * (10+3)
# ## Variable Assignments
#
# Now that we've seen how to use numbers in Python as a calculator let's see how we can assign names and create variables.
#
# We use a single equals sign to assign labels to variables. Let's see a few examples of how we can do this.
# Let's create an object called "a" and assign it the number 5
a = 5
# Now if I call *a* in my Python script, Python will treat it as the number 5.
# Adding the objects
a+a
# What happens on reassignment? Will Python let us write it over?
# Reassignment
a = 10
# Check
a
# Yes! Python allows you to write over assigned variable names. We can also use the variables themselves when doing the reassignment. Here is an example of what I mean:
# Check
a
# Use A to redefine A
a = a + a
# Check
a
# The names you use when creating these labels need to follow a few rules:
#
# 1. Names can not start with a number.
# 2. There can be no spaces in the name, use _ instead.
# 3. Can't use any of these symbols :'",<>/?|\()!@#$%^&*~-+
# 4. It's considered best practice (PEP8) that names are lowercase.
# 5. Avoid using the characters 'l' (lowercase letter el), 'O' (uppercase letter oh),
# or 'I' (uppercase letter eye) as single character variable names.
# 6. Avoid using words that have special meaning in Python like "list" and "str"
#
#
# Using variable names can be a very useful way to keep track of different variables in Python. For example:
# +
# Use object names to keep better track of what's going on in your code!
my_income = 100
tax_rate = 0.1
my_taxes = my_income*tax_rate
# -
# Show my taxes!
my_taxes
# So what have we learned? We learned some of the basics of numbers in Python. We also learned how to do arithmetic and use Python as a basic calculator. We then wrapped it up with learning about Variable Assignment in Python.
#
# Up next we'll learn about Strings!
|
00-Python Object and Data Structure Basics/01-Numbers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python388jvsc74a57bd07338908a6901250255932625ba4b5c32a9d91564d69b39dc5095100e5c96b0b4
# ---
import wandb
import nltk
from nltk.stem.porter import *
from torch.nn import *
from torch.optim import *
import numpy as np
import pandas as pd
import torch,torchvision
import random
from tqdm import *
from torch.utils.data import Dataset,DataLoader
stemmer = PorterStemmer()
PROJECT_NAME = 'NLP-with-Disaster-Tweets-V3'
device = 'cuda'
def tokenize(sentence):
return nltk.word_tokenize(sentence.lower())
tokenize('$100')
def stem(word):
return stemmer.stem(word.lower())
stem('organic')
def bag_of_words(tokenized_words,words):
tokenized_words = [stem(w) for w in tokenized_words]
bag = np.zeros(len(words))
for idx,w in enumerate(words):
if w in tokenized_words:
bag[idx] = 1.0
return bag
bag_of_words(['hi'],['hi','how','hi'])
data = pd.read_csv('./data.csv')[:5000].sample(frac=1)
X = data['text']
y = data['target']
words = []
data = []
labels = {}
labels_r = {}
idx = 0
for label in y:
if label not in list(labels.keys()):
idx += 1
labels[label] = idx
labels_r[idx] = label
print(idx,label)
labels
labels_r
for X_batch,y_batch in tqdm(zip(X,y)):
X_batch = tokenize(X_batch)
new_X = []
for Xb in X_batch:
new_X.append(stem(Xb))
words.extend(new_X)
data.append([
new_X,
np.eye(labels[y_batch],len(labels))[labels[y_batch]-1]
])
words = sorted(set(words))
np.random.shuffle(words)
np.random.shuffle(data)
X = []
y = []
for d in tqdm(data):
X.append(bag_of_words(d[0],words))
y.append(d[1])
from sklearn.model_selection import *
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125,shuffle=False)
X_train = torch.from_numpy(np.array(X_train)).to(device).float()
y_train = torch.from_numpy(np.array(y_train)).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).to(device).float()
y_test = torch.from_numpy(np.array(y_test)).to(device).float()
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
preds = model(X)
correct = 0
total = 0
for pred,yb in zip(preds,y):
pred = int(torch.argmax(pred))
yb = int(torch.argmax(yb))
if pred == yb:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
class Model(Module):
def __init__(self):
super().__init__()
self.hidden = 128
self.activation = ReLU()
self.input = Linear(len(words),self.hidden)
self.l1 = Linear(self.hidden,self.hidden)
self.l2 = Linear(self.hidden,self.hidden)
self.l3 = Linear(self.hidden,self.hidden)
self.l4 = Linear(self.hidden,self.hidden)
self.l5 = Linear(self.hidden,self.hidden)
self.output = Linear(self.hidden,len(labels))
def forward(self,X):
preds = self.input(X)
preds = self.activation(self.l1(preds))
preds = self.activation(self.l2(preds))
preds = self.activation(self.l3(preds))
preds = self.activation(self.l4(preds))
preds = self.activation(self.l5(preds))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = MSELoss()
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.cuda.empty_cache()
torch.save(model,'model.pt')
torch.save(model,'model.pth')
torch.save(model.state_dict(),'model-sd.pt')
torch.save(model.state_dict(),'model-sd.pth')
torch.save(words,'words.pt')
torch.save(words,'words.pth')
torch.save(data,'data.pt')
torch.save(data,'data.pth')
torch.save(labels,'labels.pt')
torch.save(labels,'labels.pth')
torch.save(idx,'idx.pt')
torch.save(idx,'idx.pth')
torch.save(y_train,'y_train.pt')
torch.save(y_test,'y_test.pth')
torch.save(y,'y.pt')
torch.save(y,'y.pth')
|
00.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## machine IRM intelligente: predection et dépistage des maladie
#
# # presentation génerale du project
#
# Suite à une absence de médicaments pour certaines maladies , il reste toujours nécessaire de les prévoir à un stade précause afin d'augmenter les chances de guérison.
# C'est pour ça on a pensé à créer un Modéle de détéction et dépistage à partir d'imagerie par exemple si un patient consulte un rhumatologue pour une fracture du crane et ce dernier recommande une IRM notre programme est capable de detecter une tumeur au niveau du cerveau s'il existe .
# Nons seulement cela , les IRM ne fournissent pas généralements toutes les informatrions suffisantes pour faire le diagnostic !
# Notre programme donnera la main pour aider :
#
# * Le radiologue : pour recomander un spécialiste
# * medcin : pour une meilleure diagnostic
# * patient : pour une détéction précause de maladie
#
# * c'est pour cela on a collecté une une dataset a partie de plusieurs dataset telque brain-tumor , alzheimer , covid_19 , chest tumor
# (a cause du contrainte de temp on a just collecter une dataset d'images IRM de 4 type de maladie différente mais on peut donner une base de donné enormes de tous les maldies puisque l'algorithme a montré ces performances) afin que l'IRM peut détecter tous types de maladies d'une maniére automatique donc un patient deviendra cappable de faire un diagnostique génerale sans l'intervention du medcin aau bout de quelques minutes.
#
# * ensuite nous avons construire le modéle deep learning a base CNN
# puis on a construit l'nterface homme machine qui sera implémenter sur le scannaire IRM
#
# # etude de marché
#
#
# aprés une etude de l'existant on a remarqué que tous les algorithmes traitesn un seul type de maladie(cancer de cerveau uniquement par example) d'ici on a pensé a un algorithme englobant touts types de maladies pouvant étre detectés par l'IRM
#
# # les etapes suivie dans le code :
#
# ### I - creation du dataset
#
# * 1- collecte des datasets des maladie qui peuvent étre detecter par le scannaire IRM
# * 2- création du dataset global qui englobe tout les datasets collecter
# * 3- nettoyage du dataset global
# * 4- redimentionnage des image
# * 5- visualisation du dataset
#
#
# ### II- creation du modele machine learning
#
# * 1- faire l'apprentissage du modéle d'apprentissage deep learning
# * 2- evaluation du modele
# * 3- test manuelle du model et visualisation des images
# * 4- enregistrement du model
#
# ### III- creation de l'interface homme machine
#
#
# ### IIII- test et validation de l'application
#
#
# (l'interface homme machine est developper sur pycharm avec le model créé mais on a mis tous le code dans la feuille notebook a fin que tous le code soit claire)
#
#
#
# # Resources des datasets
# * https://www.kaggle.com/legendahmed/alzheimermridataset
# * https://www.kaggle.com/hamdijarboui/comp-tition-script/edit
# * https://www.kaggle.com/mohamedhanyyy/chest-ctscan-images
# * https://www.kaggle.com/luisblanche/covidct
# * https://www.kaggle.com/hamdallak/the-iqothnccd-lung-cancer-dataset
#
# ## install turicreate platform
#
# Turi Create est une platforme developper par apple qui simplifie le développement de modèles d'apprentissage automatique personnalisés.
#
# * Facile à utiliser: concentrez-vous sur les tâches plutôt que sur les algorithmes
# * Visuel: visualisations intégrées en continu pour explorer vos données
# * Flexible: prend en charge le texte, les images, l'audio, la vidéo et les données de capteur
# * Rapide et évolutif: travaillez avec de grands ensembles de données sur une seule machine
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
#instalation des bibliotheque necissaire
# !pip install llvmlite --ignore-installed
# ! pip install turicreate
# -
#
# ## importation des libraries necissaire
import turicreate as tr
import numpy
import pandas
import tensorflow
import matplotlib
#
#
# ## add brain tumor to the global Sframe
# +
import turicreate as tc
# Load images
train_data = tc.image_analysis.load_images('../input/brain-tumor-detection', with_path=True)
image_sarray = train_data["image"]
resized_images = tc.image_analysis.resize(image_sarray, 28, 28, 1)
train_data['image']=resized_images
# From the path-name, create a label column
train_data['label'] = train_data['path'].apply(lambda path: 'brain_tumor positif' if '/yes' in path else 'brain_tumor negatif' if '/no' in path else 'none')
# Save the data for future use
train_data.save('/content/brain-tumor.sframe')
# -
train_data=train_data[train_data['label']=='brain_tumor positif']
train_data.explore()
# # add the Chest CT-Scan images Dataset to the global data
# +
# Load images
train_data_chest = tc.image_analysis.load_images('../input/chest-ctscan-images', with_path=True)
image_sarray = train_data_chest["image"]
resized_images = tc.image_analysis.resize(image_sarray, 28, 28, 1)
train_data_chest['image']=resized_images
# From the path-name, create a label column
train_data_chest['label'] = "chest_cancer"
# Save the data for future use
train_data_chest.save('/content/chest-tumor.sframe')
# -
train_data_chest.explore()
train_data=train_data.append(train_data_chest)
train_data['label'].show()
# ## add covid 19 data set to the global data
# +
# Load images
train_data_covid = tc.image_analysis.load_images('../input/covidct', with_path=True)
image_sarray = train_data_covid["image"]
resized_images = tc.image_analysis.resize(image_sarray, 28, 28, 1)
train_data_covid['image']=resized_images
# From the path-name, create a label column
train_data_covid['label'] = train_data_covid['path'].apply(lambda path: 'covid_19 positif' if '/CT_COVID' in path else 'covid_&9 negatif')
# Save the data for future use
train_data_covid.save('/content/covid.sframe')
# -
train_data_covid.show()
train_data_covid=train_data_covid[train_data_covid['label']=='covid_19 positif']
train_data_covid.show()
#
# add the train_data_covid to the global train_data
train_data=train_data.append(train_data_covid)
train_data['label'].show()
# # add the alzeimer dataset
# +
import turicreate as tc
# Load images
train_data_alzheimer = tc.image_analysis.load_images('../input/alzheimermridataset/Alzheimer_s Dataset', with_path=True)
image_sarray = train_data_alzheimer["image"]
resized_images = tc.image_analysis.resize(image_sarray, 28, 28, 1)
train_data_alzheimer['image']=resized_images
# From the path-name, create a label column
train_data_alzheimer['label'] = train_data_alzheimer['path'].apply(lambda path: 'Mild Demented alzeighmer' if '/MildDemented' in path else 'moderate Demented alzeighmer' if '/ModerateDemented' in path else 'verry mild demented alzeighmer' if '/VeryMildDemented' in path else 'not affected with alzheimer')
# Save the data for future use
train_data_alzheimer.save('/content/alzheimer.sframe')
# -
train_data_alzheimer.explore()
train_data_alzheimer.show()
train_data_alzheimer=train_data_alzheimer[train_data_alzheimer['label']=='Mild Demented alzeighmer']+train_data_alzheimer[train_data_alzheimer['label']=='verry mild demented alzeighmer']
train_data_alzheimer['label'].show()
train_data=train_data.append(train_data_alzheimer)
# # visualisation des donnés
#explorer la Sframe
train_data.explore()
# plot des donné de train_data distrubution des valeurs
train_data['label'].show()
# longuers de SFrmae train data
len(train_data)
# # II- création du modele machine learning
#
# dans cette partie nous créons un modéle deep learning a l'aide de l'algorithme image_classifier qui est a base un CNN qui utlise les solveur suivant de la regression
# * newton : Newton-Raphson
# * lbfgs : mémoire limitée BFGS
# * fista : descente de gradient accélérée
# nous entrons comme paramétre le nombre des époches a effectuer , le train_data et les autres paramétres seront selecter d'une maniére automatique afin d'obtenir les performance optimal (nous pouvent entré chaque paramétre manuellemnt)
# on a tourné le model plusieurs fois ont modeifions les paramétre pour atteindre cette performance de 96.12% de précision su le train et de 89% sur le test
# # building the model [](http://)
# +
#dévision du train data on test de 20% et train de 80%
train_data1,test_data1=train_data.random_split(0.8,seed=0)
# +
# utiliser tous les GPUS
tc.config.set_num_gpus(-1)
# Creation du modéle
model = tc.image_classifier.create(train_data1, target='label',max_iterations=100)
# -
# # evaluation du modéle
model.evaluate(test_data1)
# # 3- test manuelle du model et visualisation des images
# # test a positif au cancer du cerveau image
image=tc.image_analysis.load_images('../input/covidct/CT_COVID/2020.02.23.20026856-p17-115%4.png')
image.explore()
model.predict(image)
image=tc.image_analysis.load_images('../input/alzheimermridataset/Alzheimer_s Dataset/train/MildDemented/mildDem10.jpg')
model.predict(image)
#
# # enregistrement du modéle
# Export for use in Core ML
model.export_coreml('MyCustomImageClassifier.mlmodel')
# # III- creation de l'interface homme machine
#
# dans cette partie on a crée une interfacce homme machine qui va etre impémenter sur le scannaire IRM a l'aide de PYQT5
# c'est une interface simple qui prend comme paramétre l'image scannée et la prédiction comme sortie si une telle maladie existe
# 
# +
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog, QApplication, QFileDialog, QMainWindow
import sys
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon, QPixmap
from PIL import Image
class MyApp(QMainWindow):
def __init__(self):
super(MyApp, self).__init__()
self.label = QtWidgets.QLabel(self)
self.b1 = QtWidgets.QPushButton(self)
self.b2 = QtWidgets.QPushButton(self)
self.setGeometry(0, 0, 900, 600)
self.setWindowTitle("Smart scanner/MRI")
self.initUI()
def predicted(self):
if(model.predict(image)[1]=='bone tumor'):
var="brain tumor detected please consult a neuro surgeon"
else if (model.predict(model)[1]=='covid 19'):
var='covid_19 detected please consult a pneumologiste '
else if (model.predict(img)[1]=='alzheimer'):
var='alzheimer detected please consult a neurologist'
else if (model.predict(img)[1]=='chest'):
var='chest cancer detected please consult a pulmonologist'
else if (model.predict(img)[1]=='breast cancer'):
var="breast cancer detected please consult a gynecologist "
self.label.setText(var)
self.update()
def initUI(self):
self.label.setText("Result")
self.label.move(425, 250)
self.b1.setText("Predict")
self.b1.clicked.connect(self.predicted)
self.b1.move(400,400)
self.b1.setStyleSheet("QPushButton"
"{"
"background-color : lightblue;"
"}")
self.b2.setText("path")
self.b2.clicked.connect(self.getpath)
self.b2.move(400 , 100)
self.b2.setStyleSheet("QPushButton"
"{"
"background-color : green;"
"}")
def update(self):
self.label.adjustSize()
def getpath(self):
file = QFileDialog.getOpenFileName()
print(file)
def window():
app = QApplication(sys.argv)
win = MyApp()
win.show()
sys.exit(app.exec_())
window()
|
smart mri machine .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Support Vector Machines - Example Project
# Support Vector Machines (SVM) are a method that uses points in a transformed problem space that best separate classes into two groups. Classification for multiple classes is then supported by a one-vs-all method (just like we previously did for Logistic Regression for Multi Class Classification).
#
# This lecture section will be broken up by the following sections:
#
# <br>
# Part 1: Introduction to Support Vector Machines
# <br>
# Part 2: SVM General Explanation
# <br>
# Part 3: Computing the Hyperplane
# <br>
# Part 4: Additional Math Resources
# <br>
# Part 5: SVM with Sci Kit Learn
# <br>
# Part 6: Additional Resources
# <br>
# ## Part 1: Introduction to Support Vector Machines¶
# Formal Explanation:
#
# In machine learning, support vector machines (SVMs) are supervised learning models with associated learning algorithms that analyze data and recognize patterns, used for classification and regression analysis. Given a set of training examples, each marked for belonging to one of two categories, an SVM training algorithm builds a model that assigns new examples into one category or the other, making it a non-probabilistic binary linear classifier. An SVM model is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall on.
#
# The advantages of support vector machines are:
#
# Effective in high dimensional spaces.
# Still effective in cases where number of dimensions is greater than the number of samples.
# Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.
# Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.
# The disadvantages of support vector machines include:
#
# If the number of features is much greater than the number of samples, the method is likely to give poor performances.
# SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation (see Scores and probabilities, below).
#
# Let's go ahead and break down the main idea of SVMs!
# ## Part 2: SVM General Explanation
# We'll start by imagining a situation in which we want to seperate a training set with two classes. We have two classes in our set, blue and red. We plot them out in the feature space and we try to place a green line that seperates both classes.
#
# We have multiple ways of drawing this line (which we will call a hyperplane) that can seperate the classes completely. So which is the optimal line?
#
# We decide that the optimal hyperplane that seperates these two classes is the one with the maximum margin between the two classes.
#
# So how do we actually mathematically compute that optimal hyperplane? I'll explain with a very brief overview below in Part 3, but I highly suggest you check out the full explanation on <a href="I'll explain with a very brief overview below in Part 3, but I highly suggest you check out the full explanation on Wikipedia or in the lecture videos following Part 3." target="_blank">Wikipedia</a> or in the lecture videos following Part 3.
# ## Part 3: Computing The Hyperplane
# Let's go ahead and start by defining the Hyperplane in this case with the equation of a line, where Beta tranposed is the known weight vector of the features we've seen before and Beta nought is the bias.
#
# f(x)=β0+βTx
#
# There are an infinite number of ways we could scale the weight vector and the bias, but remember we want to maximize the margin between the two classes. So we realize through some math (explained in detail the videos below) can set this as:
#
# |β0+βTx|=1
#
# where x symbolizes the training examples closest to the hyperplane. In general, the training examples that are closest to the hyperplane are called support vectors. These support vectors are filled in with color in the image above. This representation is known as the canonical hyperplane.
#
# From geometry we know that the distance betweeen a point x and the hyperplane (Beta,Beta0)is: distance=|β0+βTx|||β||.
#
# In particular, for the canonical hyperplane, the numerator is equal to one and the distance to the support vectors is
# distance support vectors=|β0+βTx|||β||=1||β||
#
# Recall that the margin introduced in the previous section, here denoted as M, is twice the distance to the closest examples:
#
# M=2||β||
#
# Finally, the problem of maximizing M is equivalent to the problem of minimizing a function L(Beta) subject to some constraints. The constraints model the requirement for the hyperplane to classify correctly all the training examples xi.
#
# Formally,
#
# minβ,β0L(β)=12||β||2 subject to yi(βTxi+β0)≥1 ∀i
#
# where yi represents each of the labels of the training examples.
#
# This is a problem of Lagrangian optimization that can be solved using <a href="https://en.wikipedia.org/wiki/Lagrange_multiplier" target="_blank">Lagrange multipliers</a> to obtain the weight vector Beta and the bias Beta0 of the optimal hyperplane.
#
# If we want to do non-linear classification we can employ the <a href="https://en.wikipedia.org/wiki/Kernel_method" target="_blank">kernel trick</a>. Using the kernel trick we can "slice" the feature space with a Hyperplane. For a quick illustraion of what this looks like, check out both the image and the video below!
#
# Kernel Trick for the Feature Space
url='http://i.imgur.com/WuxyO.png'
Image(url)
# Kernel Trick Visualization
from IPython.display import YouTubeVideo
YouTubeVideo('3liCbRZPrZA')
# ## Part 4: Additional Math Resources
# Below are video links to free resources for a deeper dive into the mathematics of Support Vector Machines. First a lecture by <NAME> and then a lecture from MIT Open CourseWare
# MIT Lecture
YouTubeVideo('_PwhiWxHK8o')
# ## Part 5: SVM with Sci Kit Learn
# Now we are ready to jump into some Python code and Sci Kit Learn, we'll start with some basic imports and we will import Sci Kit Learn along the way while we use it.
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn import datasets
# +
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# -
print(iris.DESCR)
from sklearn.svm import SVC
model = SVC()
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.4,random_state=3)
model.fit(X_train,Y_train)
from sklearn import metrics
# +
predicted = model.predict(X_test)
expected = Y_test
# -
print(metrics.accuracy_score(expected,predicted))
from sklearn import svm
# +
X = iris.data[:,:2]
Y = iris.target
# -
C = 1.0
svc = svm.SVC(kernel='linear',C=C).fit(X,Y)
rbf_svc = svm.SVC(kernel='rbf',gamma=0.7,C=C).fit(X,Y)
poly_svc = svm.SVC(kernel='poly',degree=3,C=C).fit(X,Y)
lin_svc = svm.LinearSVC(C=C).fit(X,Y)
# +
h = 0.02
x_min = X[:,0].min() -1
x_max = X[:,0].max() +1
# -
y_min = X[:,1].min() -1
y_max = X[:,1].max() +1
xx, yy = np.meshgrid(np.arange(x_min,x_max),np.arange(y_min,y_max,h))
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i,clf in enumerate((svc,lin_svc,rbf_svc,poly_svc)):
plt.figure(figsize=(15,15))
plt.subplot(2,2,i+1)
plt.subplots_adjust(wspace=0.4,hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx,yy,Z,cmap=plt.cm.terrain,alpha=0.5)
plt.scatter(X[:,0],X[:,1],c=Y,cmap=plt.cm.Dark2)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.ylim(xx.min(),xx.max())
plt.ylim(yy.min(),yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
|
support-vector-machines/Support Vector Machines - Example Project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Connect to the database
import pymysql
from sqlalchemy import create_engine
import pandas as pd
import getpass # To get the password without showing the input
password = getpass.getpass()
engine = f'mysql+pymysql://root:{password}@localhost/sakila'
# Read the data into a Pandas dataframe.
query_1 = '''
select i.film_id,
round(avg(p.amount),2) avg_rental_cost,
round(avg(timestampdiff(hour, r.rental_date, r.return_date)),2) as hours_rented,
count(ifnull(r.rental_id, 0)) as num_rent_times
from rental r
join payment p on p.rental_id = r.rental_id
join inventory i on i.inventory_id = r.inventory_id
group by 1
order by 1,2,3,4;
'''
query_2 = '''
select
act2.film_id,
group_concat(act2.actor_id separator ',') actor_list,
sum(act2.actor_fame) total_actor_fame,
sum(act2.actor_influence) total_actor_influence
from (
select fa.film_id, act1.*
from (
select
fa1.actor_id,
count(distinct(fa1.film_id)) actor_fame,
count(distinct(fa2.actor_id)) actor_influence
from
film_actor fa1
join film_actor fa2 on fa2.film_id = fa1.film_id
group by fa1.actor_id
) act1
join film_actor fa on fa.actor_id = act1.actor_id
) act2
group by act2.film_id;
'''
query_3 = '''
select f.film_id,
f.title,
f.description,
fc.category_id,
f.language_id,
avg(f.rental_duration) * 24 as avg_hours_rental_allowed,
f.length / 60 as hours_length,
avg(f.replacement_cost) as avg_replacement_cost,
f.rating,
f.special_features,
count(fa.actor_id) actors_in_film
from film f
join film_category fc on fc.film_id = f.film_id
join film_actor fa on fa.film_id = f.film_id
group by 1,2,3,4,5,7,9,10
order by 1,4,5,6,7;'''
# +
data = pd.read_sql(query_1, engine)
data
# +
data_1 = pd.read_sql(query_2, engine)
data_1
# +
data_2 = pd.read_sql(query_3, engine)
data_2
# +
frames = [data, data_1, data_2]
df = pd.concat(frames)
# -
df
# +
frames = [data, data_1, data_2]
df = pd.concat(frames,axis=1)
# -
df
df.describe()
df.shape
df.isnull().sum()
df.columns
df.dropna(axis=0)
df
df.dropna(axis=0, inplace=True)
df
df.isnull().sum()
df.corr()
# +
#Splitting into test and train set?
# +
## 3- Analyze extracted features and transform them.
# You may need to encode some categorical variables, or scale numerical variables.
X = df[['avg_rental_cost', 'hours_rented', 'category_id', 'hours_length', 'actors_in_film', 'total_actor_fame']]
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
scaled_x = StandardScaler().fit_transform(X)
# +
#target query
target_query = '''
select rental_id, film_id, rental_date, rank() over (partition by film_id order by rental_date desc) recent
from film left join inventory using (film_id) left join rental using (inventory_id)
where rental_date > '2006-02-01'
'''
target = pd.read_sql(target_query, engine)
# -
target
target['rental_id'].isna().to_frame()
df.columns
df = df.drop('description', 1)
df.dtypes
df = df.drop("actor_list", 1)
df = df.drop("title", 1)
df = df.drop("rating", 1)
df = df.drop("special_features", 1)
df
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
corr_matrix=df.corr(method='pearson') # default
fig, ax = plt.subplots(figsize=(10, 8))
ax = sns.heatmap(corr_matrix, annot=True)
plt.show()
df = df.drop("language_id", 1)
df = df.drop("total_actor_influence", 1)
corr_matrix=df.corr(method='pearson') # default
fig, ax = plt.subplots(figsize=(10, 8))
ax = sns.heatmap(corr_matrix, annot=True)
plt.show()
df = df.drop("total_actor_fame", 1)
corr_matrix=df.corr(method='pearson') # default
fig, ax = plt.subplots(figsize=(10, 8))
ax = sns.heatmap(corr_matrix, annot=True)
plt.show()
df = df.drop("film_id", 1)
sns.distplot(df['actors_in_film'])
plt.show()
from sklearn.preprocessing import Normalizer
# from sklearn.preprocessing import StandardScaler
import numpy as np
X = df.select_dtypes(include = np.number)
# Normalizing data
transformer = Normalizer().fit(X)
x_normalized = transformer.transform(X)
x = pd.DataFrame(x_normalized)
sns.distplot(df['actors_in_film'])
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=100)
y = df["num_rent_times"]
X = df.drop("num_rent_times",1)
from sklearn.linear_model import LogisticRegression
classification = LogisticRegression(random_state=0, solver='lbfgs',
multi_class='ovr').fit(X_train, y_train)
classification.score(X_test, y_test)
predictions = classification.predict(X_test)
classification.score(X_test, y_test)
print(y_test.value_counts())
# +
pd.Series(predictions).value_counts()
# -
from sklearn.metrics import confusion_matrix
cf_matrix = confusion_matrix(y_test, predictions)
print(cf_matrix)
sns.heatmap(cf_matrix, annot=True)
sns.heatmap(cf_matrix/np.sum(cf_matrix), annot=True,
fmt='.2%', cmap='Blues')
group_names = ['True Neg','False Pos','False Neg','True Pos']
group_counts = ["{0:0.0f}".format(value) for value in
cf_matrix.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in
cf_matrix.flatten()/np.sum(cf_matrix)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in
zip(group_names,group_counts,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cf_matrix, annot=labels, fmt='', cmap='Blues')
# +
import sklearn.metrics as metrics
import matplotlib.pyplot as pyplt
y_pred_proba = classification.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba, pos_label='B')
pyplt.plot(fpr,tpr)
# -
|
week3/3.08_lab_predictions_logistic_regression_solution.md.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import shelve
import os
import scipy.stats as stats
from itertools import combinations
plt.style.use('seaborn-dark')
# -
clean_adult = pd.read_hdf('results/df1.h5', 'clean_adult/')
clean_adult.head()
# ## Sex, Race, and Education
#
# Is there a relationship between sex/race and education? Let's take a closer look.
ax = sns.boxplot(x="sex", y="education.num", data=clean_adult, palette="muted")
ax.set_title("Years of Education since 4th Grade vs. Sex");
plt.savefig("fig/education_sex.png")
# Wow! They look quite similar, with the same mean of education.num = 10 years, although it seems that males finish college more often (education.num = 13 years).
#
# What about race?
ax = sns.boxplot(x="race", y="education.num", data=clean_adult, palette="muted")
ax.set_title("Years of Education since 4th Grade vs Race")
plt.savefig("fig/education_race.png")
# Some observations are:
# * Black, Native American/Eskimo, and Other groups have a high variance and low mean compared to the White and Asian/Pacific Islander groups. This is not surprising. Unfortunately not much has changed since 1994.
# * The Asian/Pacific Islander group has the highest mean education. This reflects both the fact that immigrants in this group tend to have a high education and also the culture valuing education by this group.
#
# Now it's time to some hypothesis testing. We want to see if there is a difference in education between groups. For this case we will use the two sample t-test. The conditions for the test are:
# * The sampling method for each sample is simple random sampling.
# * The samples are independent.
# * Each population is at least 20 times larger than its respective sample.
# * The sampling distribution is approximately normal.
#
# We have no reason to believe that these assumptions are not met.
# +
def two_sample_t_test(group1, group2, group1_name, group2_name, alpha = 0.05):
"""Performs a two-sided two sample t-test to see if there is a difference in mean between
the value of two groups.
Parameters
----------
group1: Data for the first group. Can be list or array
group2: Data for the second group. Can be list or array
group1_name: Name of first group
group2_name: Name of second group
alpha: Significance level, default of 0.05 (Although this is very arbitrary as we saw in this class)
Return
------
(t, p, reject)
t: the t-statistic
p: the p-value
reject: whether we reject the null hypothesis
Example
-------
>>> group1 = [1, 2, 3]
... group2 = [1, 2, 3]
... two_sample_t_test(group1, group2, "group1", "group2")
There is no statistically significant difference between Group group1 and Group group2
(0.0, 1.0)
"""
n1 = len(group1)
n2 = len(group2)
assert(n1 > 0)
assert(n2 > 0)
s12 = np.var(group1)
s22 = np.var(group2)
m1 = np.mean(group1)
m2 = np.mean(group2)
se = np.sqrt((s12/n1) + (s22/n2))
df = (np.square(s12/n1 + s22/n2) / (( np.square(s12 / n1) / (n1 - 1) ) + (np.square(s22 / n2) / (n2 - 1)))).astype(int)
t = ((m1 - m2)) / se
p = stats.t.sf(np.abs(t), df)*2
if (p < alpha):
print("The mean difference is statistically significant for Group " + group1_name +" and Group " + group2_name)
print("p-value is " + str(p))
print()
else:
print("There is no statistically significant difference between Group " + group1_name +" and Group " + group2_name)
print()
return (t, p, p < alpha)
# -
male = clean_adult[clean_adult["sex"] == "Male"]
female = clean_adult[clean_adult["sex"] == "Female"]
t, p, reject = two_sample_t_test(male["education.num"], female["education.num"], "Male", "Female")
# As expected from looking at the box plot, males and females do not have statistically different years of education.
races = clean_adult.groupby("race")
pairs = [",".join(map(str, comb)).split(",") for comb in combinations(races.groups.keys(), 2)]
for pair in pairs:
race1_name = pair[0]
race2_name = pair[1]
race1 = races.get_group(pair[0])
race2 = races.get_group(pair[1])
two_sample_t_test(race1["education.num"], race2["education.num"], race1_name, race2_name)
# The results match with looking at the box plots again: The only non-statistically significant difference is the Black group and the Native American/Eskimo group.
#
# Thus, we saw that there is a relationship between education and race, but not a relationship between education and sex.
# ## Testing Section
import unittest
class MyTests(unittest.TestCase):
def test_same_population(self):
group1 = [1, 2, 3]
group2 = group1
t, p, reject = two_sample_t_test(group1, group2, "group1", "group2")
self.assertAlmostEqual(0, t)
self.assertAlmostEqual(1, p)
self.assertTrue(not reject)
def test_obvious_difference(self):
group1 = [1, 2, 3]
group2 = [1000, 1001, 1001]
t, p, reject = two_sample_t_test(group1, group2, "group1", "group2")
self.assertAlmostEqual(0, p)
self.assertTrue(reject)
def test_significance_level(self):
t, p, reject = two_sample_t_test([1, 2, 3], [4,9, 5], "group1", "group2", 0.1)
self.assertAlmostEqual(0.1, p, places = 1)
self.assertTrue(reject)
t, p, reject = two_sample_t_test([1, 2, 3], [4,9, 5], "group1", "group2")
self.assertAlmostEqual(0.1, p, places = 1)
self.assertTrue(not reject)
def test_same_population_different_order(self):
group1 = [1, 2, 4]
group2 = [2, 4, 1]
t, p, reject = two_sample_t_test(group1, group2, "group1", "group2")
self.assertAlmostEqual(0, t)
self.assertAlmostEqual(1, p)
unittest.main(argv=["foo"], exit = False, verbosity = 2)
|
demographics-p3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Engineering
#
# In this notebook we'll start with a lightly cleaned version of the CSC dataset. We'll go through the following steps:
#
# * Examine and visualize missing data.
# * Determine where it's appropriate to drop records, and the right way to impute missing values for remaining records.
# * Bin some of our numeric features.
# * Condense our offence descriptions.
# * OneHot & ordinal encode our variables.
# * Reshape dataframe so that each record is an inmate in a particular year.
#
# Let's start by importing our packages and reading in the data.
# +
# Import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import missingno as msno
# +
#Read in CSV
df = pd.read_csv('cleaned_data.csv', index_col=0)
#Make sure all rows are displayed
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
# -
df.head()
# ## Visualizing Missing Data
#
# Let's get a summary of our null values, and then visualize it using the `missingno` package.
df.shape
df.isnull().sum()
msno.matrix(df)
plt.show()
# From the counts we can see that the 'supervision type' and 'instutitional security level' columns have the most NaNs, with a couple more in the 'offender security level' and 'religion' columns. Interestingly, our NaN matrix shows us that there are a couple of records that are missing data in many of our records of interest ('offender security level', 'dynamic/need', 'static/risk', 'reintegration potential', 'motivation').
#
# Let's plot a NaN heatmap to look at the correlation in the existence of NaNs across columns.
msno.heatmap(df)
plt.show()
# We have confirmation about the correlation between missing scores of various kinds - if a score is missing in one column, it's likely to be missing in others. Additionally, we have a strong negative correlation between 'institutional security level' and 'supervision type'. This is because if an offender is supervised, they are out in the community, which means they shouldn't have an institutional security level assigned since they're not in custody.
#
# We have some NaNs in the 'court' and 'religion' columns. Let's see how many values there are in these columns.
print(df['COURT'].nunique(), df['RELIGION'].nunique())
# There are quite a number of values in both of these columns - from the EDA we've also seen that there isn't an overwhelming number of individuals practising a particular religion/ attending a particular court, so it would be difficult to impute these values. That said, it's more likely that the features around an individual's security risk, prison term, etc. are indicative of their reintegration potential than their faith or the particular court they were convicted in, so I'm not too troubled by this result. But we can include a smaller dataframe with the records that don't have NaNs in these two columns as one of our modelling options to ensure we're exploring all possibilities.
#
# After examining the structure of our NaNs, we have a good sense of next steps:
#
# * Since there's such a strong relationship between NaNs in the 'offender security level', 'dynamic/need', 'static/risk', 'reintegration potential', and 'motivation' columns, we're going to drop all records that have are NaNs for 'reintegration potential', which will take care of the majority of NaNs in the remaining columns. We've chosen this column since it's going to be our target.
# * We'll code the 'supervision type' NaNs to return the category of 'In Custody' and the institutional security level NaNs to return the category of 'Community'.
# * We'll drop the 'religion' and 'court' columns.
# * Where NaNs remain in the categorical columns, we'll impute using KNN imputer.
#
# ## Dealing with NaNs
#
# Let's begin by dropping the records that have NaNs in the 'reintegration potential' column.
df.dropna(subset = ["REINTEGRATION POTENTIAL"], inplace=True)
df.shape
df.isnull().sum()
# Now we have significantly fewer NaNs in the other assessment columns. We'll impute these with KNN imputer down the line. Let's move on to accounting for the NaNs in 'supervision type' and 'institutional security level'.
# +
#Define function to replace supervision NaNs
def replace_sup_nans(x):
if pd.isnull(x):
return 'In Custody'
else:
return x
#Define function to replace security level NaNs
def replace_security_nans(x):
if pd.isnull(x):
return 'Community'
else:
return x
#Map our functions on their respective columns
df['SUPERVISION TYPE'] = df['SUPERVISION TYPE'].map(replace_sup_nans)
df['INSTITUTIONAL SECURITY LEVEL'] = df['INSTITUTIONAL SECURITY LEVEL'].map(replace_security_nans)
df.isnull().sum()
# -
# Now we're left with the offender security level before imputing. Let's look at what this column looks like:
df.groupby('OFFENDER SECURITY LEVEL')['OFFENDER NUMBER'].count()
# It looks like the majority of individuals are in the 'Medium' classification of offender security level so our first instinct might be to assign that to our NaN values. However, from our EDA notebook we saw that 93% of offenders had the same offender security level as their institutional security level. Since we have no NaNs in that column, let's use those values to fill the NaNs for offender security level.
df['OFFENDER SECURITY LEVEL'].fillna(df['INSTITUTIONAL SECURITY LEVEL'],inplace=True)
df.isnull().sum()
# Finally, we will impute the couple of hundred remaining values in the 'dynamic/need', 'static/risk', and 'motivation' columns by taking the mode of the column when grouped by the reintegration potential.
# +
df['STATIC/RISK'] = df.groupby(['OFFENDER SECURITY LEVEL','REINTEGRATION POTENTIAL'], sort=False)['STATIC/RISK'].apply(lambda x: x.fillna(x.mode().iloc[0]))
df['DYNAMIC/NEED'] = df.groupby(['OFFENDER SECURITY LEVEL','REINTEGRATION POTENTIAL'], sort=False)['DYNAMIC/NEED'].apply(lambda x: x.fillna(x.mode().iloc[0]))
df['MOTIVATION'] = df.groupby(['OFFENDER SECURITY LEVEL','REINTEGRATION POTENTIAL'], sort=False)['MOTIVATION'].apply(lambda x: x.fillna(x.mode().iloc[0]))
df.isnull().sum()
# -
# Now we only have NaNs in our 'religion' and 'court' columns.
#
# ## Feature Engineering
#
# In this section of the exploration we will:
#
# * Bin our numeric features (Age, Sentence Length).
# * Condense our offence descriptions.
# * Condense the number of racial categories.
#
# We'll be using adaptive binning for our age and sentence length columns. This way we can avoid having overly sparse or packed bins, which would happen if we were manually binning. The adaptive binning technique that's used below is quantile based binning, using 20% quantiles throughout.
# +
#Get list of quantiles
quantile_list = [0, .2, .4, .6, .8, 1.]
#Labels for our values
quantile_labels = ['0-20Q', '20-40Q', '40-60Q', '60-80Q', '80-100Q']
#Create new columns with quantile cuts
df['AGE_QRANGE'] = pd.qcut(df['AGE'], q=quantile_list, labels=quantile_labels)
df['SL_QRANGE'] = pd.qcut(df['SENTENCE LENGTH (YEARS)'], q=quantile_list, labels=quantile_labels)
df.head()
# -
# Now that we've dealt with binning the continuous numeric variables, we can move onto the categorical variables. Let's start by condensing our 'racial category' column from 10 unique values to 4 (grouping as 'White', 'Indigenous', 'Black', and 'Other'). We will then look at the top offence descriptions and keep the unique values that account for the majority of our offenders.
# +
#Define a dict to map races to 'other' category
race_cat = {('Middle Eastern/ West Asian','East Asian','South Asian','Hispanic', 'Oceania', 'Multi-Ethic'):'Other'}
race_dict = {}
for k, v in race_cat.items():
for key in k:
race_dict[key] = v
race_dict['Other'] = 'Other'
race_dict['White'] = 'White'
race_dict['Black'] = 'Black'
race_dict['Indigenous'] = 'Indigenous'
#Map dict to current 'racial category' column
df['RACIAL CATEGORY'] = df['RACIAL CATEGORY'].map(race_dict)
#Look at unique values - should be 4
df['RACIAL CATEGORY'].unique()
# -
# Let's now get a sense of how many unique offence descriptions we have.
df['OFFENCE DESCRIPTION'].nunique()
# Quite a bit - that would be computationally expensive to encode into dummies, and might make our models inefficient. Let's see how many records are accounted for if we take the top 100 offence descriptions.
top_desc = df.groupby('OFFENCE DESCRIPTION')['OFFENCE ID'].count().sort_values(ascending=False)[:100]
bottom_desc = df.groupby('OFFENCE DESCRIPTION')['OFFENCE ID'].count().sort_values(ascending=False)[100:]
top_desc.sum()
# The top 100 offence descriptions account for 87% of our records - let's keep these and encode the rest of the offence descriptions as 'other'.
# +
#Turn our groupby object into a list
not_list_desc = bottom_desc.index.tolist()
#Map all descriptions in the list to the 'other' category
off_desc = {tuple(not_list_desc):'Other'}
off_dict = {}
for k, v in off_desc.items():
for key in k:
off_dict[key] = v
#Create a condensed offence description column mapped to our 'other' description, then fill the NaNs with their original values
df['CONDENSED OFFENCE DESCRIPTION'] = df['OFFENCE DESCRIPTION'].map(off_dict)
df['CONDENSED OFFENCE DESCRIPTION'].fillna(df['OFFENCE DESCRIPTION'],inplace=True)
#Show that we have 101 descrptions - our 100 top counts plus 1 for 'other'
df['CONDENSED OFFENCE DESCRIPTION'].nunique()
# -
# Finally, let's change our 'year' field to something more readable:
df['YEAR'] = df['FISCAL YEAR'].apply(lambda x: '20' + x[-2:])
# Now that we've dealt with NaNs and finished up our feature engineering, let's move on to encoding our features.
#
# ## Encoding
#
# Before we encode, we'll filter our records and drop some columns that aren't going to be used in the exploration. We will filter for:
#
# * Male offenders
# * Federal offenders
#
# We will then drop the following columns:
#
# * Age
# * Fiscal Year
# * Gender
# * Jurisdiction
# * Sentence ID
# * Offence ID
# * Race
# * Race grouping
# * Sentence type
# * Aggregate sentence length
# * Province
# * Religion
# * Warrant ID
# * Court
# * Offence Description
# * Sentence length (years)
#
# We'll make the following adjustments to our variables:
#
# * Ordinal encode 'institutional security level', 'offender security level', 'dynamic/need', 'static/risk', 'reintegration potential', and 'motivation'.
# * One Hot encode 'racial category', 'age qrange', 'sentence length qrange', 'in custody/community', 'supervision type', and 'condensed offence description'.
#
# Let's filter our records:
# +
#Filter for only male records
df = df[df['GENDER']=='MALE'].reset_index(drop=True)
#Filter for only federal offenders
df = df[df['JURISDICTION']=='FEDERAL'].reset_index(drop=True)
# -
# Now let's condense our dataframe to only relevant columns
df_clean = df.drop(['GENDER', 'AGE', 'JURISDICTION', 'SENTENCE ID', 'LOCATION TYPE', 'OFFENCE ID', 'RACE', 'RACE GROUPING', 'SENTENCE TYPE', 'AGGREGATE SENTENCE LENGTH', 'PROVINCE', 'RELIGION', 'WARRANT ID', 'COURT', 'OFFENCE DESCRIPTION', 'SENTENCE LENGTH (YEARS)', 'FISCAL YEAR'], axis=1)
# Finally, let's make encode our variables so we can feed them into our models. We'll one-hot encode and ordinal encode where appropriate.
#
# #### Ordinal Encoding
#
# We will be ordinal encoding 'institutional security level', 'offender security level', 'dynamic/need', 'static/risk', 'reintegration potential', and 'motivation'.
# +
#Define dicts for ordinal encoding
security_dict = {'Community': 0, 'MINIMUM': 1, 'MEDIUM':2, 'MULTI-LEVEL': 3, 'MAXIMUM':4}
dyn_stat_dict = {'LOW': 0, 'MEDIUM': 1, 'HIGH': 2}
reint_motiv_dict = {'HIGH':0, 'MEDIUM':1, 'LOW': 2}
#Security dict mapping
df_clean['OFFENDER SECURITY LEVEL'] = df_clean['OFFENDER SECURITY LEVEL'].map(security_dict)
df_clean['INSTITUTIONAL SECURITY LEVEL'] = df_clean['INSTITUTIONAL SECURITY LEVEL'].map(security_dict)
#Dynamic/ static mapping
df_clean['DYNAMIC/NEED'] = df_clean['DYNAMIC/NEED'].map(dyn_stat_dict)
df_clean['STATIC/RISK'] = df_clean['STATIC/RISK'].map(dyn_stat_dict)
#Reintegration potential/ motivation mapping
df_clean['REINTEGRATION POTENTIAL'] = df_clean['REINTEGRATION POTENTIAL'].map(reint_motiv_dict)
df_clean['MOTIVATION'] = df_clean['MOTIVATION'].map(reint_motiv_dict)
# -
# #### One-Hot Encoding
#
# We will be one-hot encoding 'racial category', 'age qrange', 'sentence length qrange', 'in custody/community', 'supervision type', and 'condensed offence description'.
#
# First, we will one-hot encode the 'condensed offence description', grouping by offender number and year given that the reintegration potential score is calculated annually for each offender.
# +
# Dummify offence description column
offence_dummies = pd.get_dummies(df_clean['CONDENSED OFFENCE DESCRIPTION'], prefix='offence')
# Select all columns other than Offence Description
df_other = df_clean.drop(['CONDENSED OFFENCE DESCRIPTION'], axis=1)
# Group dataframe by Offender ID and year (i.e., one row for each unique reintegration potential assessment)
# All values are the same across each set of grouped rows, so taking the max will retrieve those values
df_other_grouped = df_other.groupby(['OFFENDER NUMBER', 'YEAR']).max()
# Add dummy columns to dataframe's Offender ID & year columns
df_off_dummies = pd.concat([df_clean[['OFFENDER NUMBER', 'YEAR']], offence_dummies], axis=1)
# Group dataframe and take the sum
df_off_sum = df_off_dummies.groupby(['OFFENDER NUMBER', 'YEAR']).sum()
# Add summed dummy columns to others
df_final = pd.concat([df_other_grouped, df_off_sum], axis=1).reset_index(drop= True)
# -
# Now we will one-hot encode the remaining columns.
df_final = pd.get_dummies(df_final, prefix = ['RACE','AGE','SENLENGTH','CUSTODY','SUPERVISION'], columns=['RACIAL CATEGORY','AGE_QRANGE','SL_QRANGE','IN CUSTODY/COMMUNITY','SUPERVISION TYPE'])
df_final.head()
# Now we will write our final dataframe into a CSV.
df_final.to_csv('modelling_data.csv', index=False)
# ## Conclusion
#
# In this notebook we've completed the following steps:
#
# * Examined and visualized missing data.
# * Determined where it's appropriate to drop records, and the right way to impute missing values for remaining records.
# * Binned some of our numeric features.
# * Condensed our offence descriptions.
# * OneHot & ordinal encoded our variables.
# * Reshaped dataframe so that each record is an inmate in a particular year.
|
working_notebooks/anastasias_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#libraries
import requests
import json
import time
import pandas as pd
import numpy as np
import os
from bs4 import BeautifulSoup
# # Data Cleaning
#
# ## *Classify together*
# - violent & gore & sexual content & nudity -- 18+
# - action & adventure -- Act_Adv
#
#
#
#
#
# ## *Remove*
# - accounting
# - animation & modeling
# - game developlemnt
# - video production
# - photo editing
# - web-publisting
# - utilities
# - autdio production
# - software training
# +
#file is file to be manipulated, ANSI Encoding
""" NOTE: YOU WILL GENERATE A NEW CSV FILE, NOT OVERWRITE YOUR CSV """
genres = ["genre1","genre2","genre3","genre4","genre5","genre6","genre7"]
#file = open("GameData_cleaned.txt", "a" , encoding="ANSI")
#################### INSERT YOUR FILE HERE ################
file = pd.read_csv('GameData.csv' ,dtype = {'name': str,'owners':int,'developer ' : str,'positive':int ,'publisher':str , 'price ':float ,'initialprice ':float }, header=0 , keep_default_na=False , encoding='ANSI')
""" PART 1 Removing data from Name"""
count = 0 #for index of rows
tempList = [] #temp list to store index of rows to drop later
for entry in file['name'].values.tolist():
if("Test Server" in entry or 'Dedicated Server' in entry or 'Playtest' in entry or 'Game Development' in entry):
tempList.append(count)
count += 1
#REMOVE DATA
file = file.drop(tempList)
""" PART 2 Removing data from Genre """
count = 0
tempList = [] #new tempList
for entry in file['genre'].values.tolist():
if("accounting" in entry or 'Animation & Modeling' in entry or 'Game Development' in entry or 'Video Production' in entry or 'Animation & Modeling' in entry or 'Photo Editing' in entry or 'Web Publishing' in entry or 'Utilities' in entry or 'Audio Production' in entry or 'Software Training' in entry):
tempList.append(count)#append
count += 1 #increment count
#drop row
file = file.drop(tempList)
""" PART 3 Combining data """
tempList = [] #new tempList
for entry in file['genre'].values.tolist():
if("Violent" in entry or "Gore" in entry or "Sexual Content" in entry or "Nudity" in entry):
tempList.append(1)
else:
tempList.append(0)
#one HoT!
file['18+'] = tempList
tempList = [] #new tempList
for entry in file['genre'].values.tolist():
if("Action" in entry or "Adventure" in entry):
tempList.append(1)
else:
tempList.append(0)
#one HoT!
file['Act_Adv'] = tempList
##################### SAVE CSV FILE, NEW CSV FILE NAME ######################
file = file.to_csv('GameData_cleaned1.csv') #New CSV file
print(file)
#DEBUG, CAN IGNORE
# count = 0
# tempList = [] #new tempList
# for entry in file[genres].values.tolist():
# print(entry)
#print(file)
# -
|
.ipynb_checkpoints/Cleanupdatajw-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Name: <NAME>
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
df = pd.read_excel(r'C:\Users\Harryzhu\Desktop\Study Files\Graduate UChicago\FINM 36700\MIDTERM\proshares_analysis_data.xlsx', sheet_name ='hedge_fund_series').set_index('date')
# 1.Short Answers
# 1)False
# The MV optimization doesn't tell you to long the highest Sharpe Ratio assets and shorts the lowest Sharpe Ratio assets. Rather, it tells you that the corelation between each single asset is important to look at. Investors love assets with low volatility and high returns, and MV methods build a tangency portfolio that can give you the optimal choice based on volatility and mean returns. The MV optimization tells you that you should invest in assets that has small corelation with other assets within your portfolio.
# 2)True
# It is because long term is more profitable than short term
# 3)
# I suggest that we should include an intercept here because we want the regressors to variance of BITO, not both the mean and the variance of BITO, given the fact the we don't trust the estimate of the mean return. Therefore, we should help the regressors and let it only estimate the variance of BITO.
# 4)
# HDG is not very effetive when tracking HFRI in-sample. The Sharpe Ratio and mean of HDG are very different from those of HFRI. However, it is good for out of sample since it has high correlation with the target HFRI.
# 5)
# If the alpha is negative, it means the hedge funds do not beat the market. This could suggest that this hedge fund does not explain the market.
# 2. Allocation
# +
rets = pd.read_excel(r'C:\Users\Harryzhu\Desktop\Study Files\Graduate UChicago\FINM 36700\MIDTERM\proshares_analysis_data.xlsx', sheet_name ='merrill_factors').set_index('date')
retsx = rets.subtract(rets["USGG3M Index"], axis=0)
retsx = retsx.drop(columns=["USGG3M Index"])
retsx.head()
# +
#1)calculate tangency portfolio
retsx_annual = retsx * 12
def compute_tangency(retsx):
#variance-covariance matrix
Sigma = retsx.cov()
# N is the number of assets
N = Sigma.shape[0]
Sigma_adj = Sigma.copy()
mu_tilde = retsx.mean()
Sigma_inv = np.linalg.inv(Sigma_adj)
weights = Sigma_inv @ mu_tilde / (np.ones(N) @ Sigma_inv @ mu_tilde)
omega_tangency = pd.Series(weights, index=mu_tilde.index)
return omega_tangency, mu_tilde, Sigma
omega_tangency, mu_tilde, Sigma = compute_tangency(retsx)
omega_tangency
# +
#2)
def target_mv_portfolio(df_tilde, target_return):
omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde)
Sigma_adj = Sigma.copy()
Sigma_inv = np.linalg.inv(Sigma_adj)
N = Sigma_adj.shape[0]
delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return
omega_star = delta_tilde * omega_tangency
return omega_star
# TODO: Annualized target return in footnote is not up to date
omega_star = target_mv_portfolio(retsx, target_return=0.02)
omega_star
# +
# No, the optimal portfolio doesn't invested in the risk_free rate since we drop the risk free assets "USGG3M Index"
# -
#3)
opt_portf = retsx @ omega_star
#mean annualized
mean_annual = opt_portf.mean() * 12
print(mean_annual)
#volatility annualized
vol_annual = opt_portf.std() * np.sqrt(12)
print(vol_annual)
#Sharp Ratio
SR = mean_annual/vol_annual
print(SR)
# +
#4)
def target_mv_portfolio(df_tilde, target_return):
omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde)
Sigma_adj = Sigma.copy()
Sigma_inv = np.linalg.inv(Sigma_adj)
N = Sigma_adj.shape[0]
delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return
omega_star = delta_tilde * omega_tangency
return omega_star
# TODO: Annualized target return in footnote is not up to date
omega_star = target_mv_portfolio(retsx, target_return=0.02)
omega_star
# -
df_temp_18 = retsx.loc[:'2018',:]
df_temp_18
# +
def target_mv_portfolio(df_tilde, target_return):
omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde)
Sigma_adj = Sigma.copy()
Sigma_inv = np.linalg.inv(Sigma_adj)
N = Sigma_adj.shape[0]
delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return
omega_star = delta_tilde * omega_tangency
return omega_star
omega_star_18 = target_mv_portfolio(df_temp_18, target_return=0.02)
omega_star_18
# +
df_temp_19 = retsx.loc['2019':, :]
omega_tangency_19, mu_tilde_19, Sigma_19 = compute_tangency(df_temp_19)
mean_19 = omega_star_18 @ mu_tilde_19
vol_19 = np.sqrt(omega_star_18 @ Sigma_19 @ omega_star_18)/np.sqrt(12)
sharpe_ratio_out_of_sample = mean_19/vol_19
print('The mean of 2019 is')
print(mean_19)
print('The volatility of 2019 is')
print(vol_19)
print('The sharp ratio of 2019 is')
print(sharpe_ratio_out_of_sample)
# +
df_temp_20 = retsx.loc['2020':, :]
omega_tangency_20, mu_tilde_20, Sigma_20 = compute_tangency(df_temp_20)
mean_20 = omega_star_18 @ mu_tilde_20
vol_20 = np.sqrt(omega_star_18 @ Sigma_20 @ omega_star_18)/np.sqrt(12)
sharpe_ratio_out_of_sample = mean_20/vol_20
print('The mean of 2020 is')
print(mean_20)
print('The volatility of 2020 is')
print(vol_20)
print('The sharp ratio of 2020 is')
print(sharpe_ratio_out_of_sample)
# +
df_temp_21 = retsx.loc['2021':, :]
omega_tangency_21, mu_tilde_21, Sigma_21 = compute_tangency(df_temp_21)
mean_21 = omega_star_18 @ mu_tilde_21
vol_21 = np.sqrt(omega_star_18 @ Sigma_21 @ omega_star_18)/np.sqrt(12)
sharpe_ratio_out_of_sample = mean_21/vol_21
print('The mean of 2021 is')
print(mean_21)
print('The volatility of 2021 is')
print(vol_21)
print('The sharp ratio of 2021 is')
print(sharpe_ratio_out_of_sample)
# -
# #5)
# The out of sample would be better than those five risky assets. Becuase those commondities are not risky, and it is safe to invest on those. Thus the out of sample will be good.
# 3.Hedging & Replication
#1)
retsx.head()
X = retsx['SPY US Equity']
y = retsx['EEM US Equity']
model1 = sm.OLS(y,X).fit()
model1.params
# for every dollar invested in EEM, you should invest 0.927 dollar in SPY
# +
#2)
hedged_position = retsx['SPY US Equity']
#mean
mean1 = hedged_position.mean() * 12
#vol
vol1 = hedged_position.std()/np.sqrt(12)
#SR
SR1 = mean1/vol1
print('The mean is')
print(mean1)
print('The volatility is')
print(vol1)
print('The sharp ratio is')
print(SR1)
# -
hedged_position1 = retsx['EEM US Equity']
#mean
mean2 = hedged_position1.mean() * 12
#vol
vol2 = hedged_position1.std()/np.sqrt(12)
#SR
SR2 = mean2/vol2
print('The mean is')
print(mean2)
print('The volatility is')
print(vol2)
print('The sharp ratio is')
print(SR2)
# It doesn't have the same mean as EEM because there might be some skills or other factors that can not be replicated.
# #4)
# The reason why it is difficult to use multifactor regression to hedge is that when including every assets in one LDF, those assets have corraltion with each other, which could make beta lose its power.
#4)Modeling Risk
rets = pd.read_excel(r'C:\Users\Harryzhu\Desktop\Study Files\Graduate UChicago\FINM 36700\MIDTERM\proshares_analysis_data.xlsx', sheet_name ='merrill_factors').set_index('date')
rets.head()
# +
#1)
def prob(mu,sigma,h,val):
z = (val - mu) / (sigma/np.sqrt(h))
p = scipy.stats.norm.cdf(z)
return p
mu_spy = np.log(1 + rets).mean()['SPY US Equity'] * 12
mu_efa = np.log(1 + rets).mean()['EFA US Equity'] * 12
sigma_spy = np.log(1 + rets).std()['SPY US Equity'] *np.sqrt(12)
prob_smaller_than_real = prob(mu_spy,sigma_spy,10,mu_efa)
print(prob_smaller_than_real)
# +
#2)
|
solutions/mid1/submissions/zhuzihan_170941_6241852_Midterm Zihan Zhu.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="../../img/ods_stickers.jpg">
# ## Open Machine Learning Course
# <center>Author: [<NAME>](https://www.linkedin.com/in/festline/), Data Scientist @ Mail.Ru Group <br>All content is distributed under the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license.
# # <center> Assignment #10 (demo)
# ## <center> Gradient boosting
#
# Your task is to beat at least 2 benchmarks in this [Kaggle Inclass competition](https://www.kaggle.com/c/flight-delays-spring-2018). Here you won’t be provided with detailed instructions. We only give you a brief description of how the second benchmark was achieved using Xgboost. Hopefully, at this stage of the course, it's enough for you to take a quick look at the data in order to understand that this is the type of task where gradient boosting will perform well. Most likely it will be Xgboost, however, we’ve got plenty of categorical features here.
#
# <img src='../../img/xgboost_meme.jpg' width=40% />
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score
train = pd.read_csv('../../data/flight_delays_train.csv')
test = pd.read_csv('../../data/flight_delays_test.csv')
train.head()
test.head()
# Given flight departure time, carrier's code, departure airport, destination location, and flight distance, you have to predict departure delay for more than 15 minutes. As the simplest benchmark, let's take Xgboost classifier and two features that are easiest to take: DepTime and Distance. Such model results in 0.68202 on the LB.
# +
X_train = train[['Distance', 'DepTime']].values
y_train = train['dep_delayed_15min'].map({'Y': 1, 'N': 0}).values
X_test = test[['Distance', 'DepTime']].values
X_train_part, X_valid, y_train_part, y_valid = \
train_test_split(X_train, y_train,
test_size=0.3, random_state=17)
# -
# We'll train Xgboost with default parameters on part of data and estimate holdout ROC AUC.
# +
xgb_model = XGBClassifier(seed=17)
xgb_model.fit(X_train_part, y_train_part)
xgb_valid_pred = xgb_model.predict_proba(X_valid)[:, 1]
roc_auc_score(y_valid, xgb_valid_pred)
# -
# Now we do the same with the whole training set, make predictions to test set and form a submission file. This is how you beat the first benchmark.
# +
xgb_model.fit(X_train, y_train)
xgb_test_pred = xgb_model.predict_proba(X_test)[:, 1]
pd.Series(xgb_test_pred,
name='dep_delayed_15min').to_csv('xgb_2feat.csv',
index_label='id', header=True)
# -
# The second benchmark in the leaderboard was achieved as follows:
#
# - Features `Distance` and `DepTime` were taken unchanged
# - A feature `Flight` was created from features `Origin` and `Dest`
# - Features `Month`, `DayofMonth`, `DayOfWeek`, `UniqueCarrier` and `Flight` were transformed with OHE (`LabelBinarizer`)
# - Logistic regression and gradient boosting (xgboost) were trained. Xgboost hyperparameters were tuned via cross-validation. First, the hyperparameters responsible for model complexity were optimized, then the number of trees was fixed at 500 and learning step was tuned.
# - Predicted probabilities were made via cross-validation using `cross_val_predict`. A linear mixture of logistic regression and gradient boosting predictions was set in the form $w_1 * p_{logit} + (1 - w_1) * p_{xgb}$, where $p_{logit}$ is a probability of class 1, predicted by logistic regression, and $p_{xgb}$ – the same for xgboost. $w_1$ weight was selected manually.
# - A similar combination of predictions was made for test set.
#
# Following the same steps is not mandatory. That’s just a description of how the result was achieved by the author of this assignment. Perhaps you might not want to follow the same steps, and instead, let’s say, add a couple of good features and train a random forest of a thousand trees.
#
# Good luck!
|
assignments/demo/assignment10_flight_delays_kaggle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# ### 26-Jan-2020
import os
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.holtwinters import ExponentialSmoothing
sns.set_style('whitegrid')
sns.set(rc={'figure.figsize':(16, 4)})
plt.rc('figure',figsize=(16,4))
plt.rc('font',size=13)
# import warnings
# warnings.filterwarnings("ignore")
# ## Data Preparation
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train_original = train.copy()
test_original = test.copy()
train.shape, test.shape
train.columns
test.columns
train.dtypes
test.dtypes
# +
# change epoch to datetime and id, sat_id to unsigned integers
train['epoch'] = pd.to_datetime(train['epoch'], infer_datetime_format=True)
test['epoch'] = pd.to_datetime(test['epoch'], infer_datetime_format=True)
train['id'] = pd.to_numeric(train['id'], downcast='unsigned')
test['id'] = pd.to_numeric(test['id'], downcast='unsigned')
train['sat_id'] = pd.to_numeric(train['sat_id'], downcast='unsigned')
test['sat_id'] = pd.to_numeric(test['sat_id'], downcast='unsigned')
# +
# set indices to epoch
#train = train.set_index('epoch')
#test = test.set_index('epoch')
# -
# alternative method of setting indices without dropping epoch
train.index = train['epoch']
test.index = test['epoch']
# +
#train = train_original.copy()
#test = test_original.copy()
# -
train.to_pickle('train.pkl')
test.to_pickle('test.pkl')
# select the 300 satellites from train that are in test
sat_ids = test['sat_id'].unique()
# for the satellite number of each observation in train, is it also in the array of unique satellite numbers in test?
boolean = [sat_no in sat_ids for sat_no in train['sat_id']]
train_300 = train[boolean]
# check if 300 satellites in train exactly match the 300 satellites in test
sum(train_300['sat_id'].unique() != test['sat_id'].unique())
# make another dataset for the remaining 300 satellites
booleanB = [sat_no not in sat_ids for sat_no in train['sat_id']]
train_300B = train[booleanB]
# save the prepared datasets for future use
train_300.to_csv('clean_train_300.csv')
train_300B.to_csv('clean_train_300B.csv')
test.to_csv('clean_test.csv')
train_300.to_pickle('clean_train_300.pkl')
train_300B.to_pickle('clean_train_300B.pkl')
test.to_pickle('clean_test.pkl')
del train, test, train_original, test_original, train_300, train_300B
# ### Start here
# load pickle files
train = pd.read_pickle('clean_train_300.pkl')
test = pd.read_pickle('clean_test.pkl')
# make a copy
train_original = train.copy()
test_original = test.copy()
train.columns
test.columns
# remove simulated kinematic states
train.drop(train.columns[8:], axis=1, inplace=True)
test.drop(test.columns[2:], axis=1, inplace=True)
train.shape, test.shape
train.head()
train.info()
test.head()
test.info()
# index of first observation of satellite e.g. Sat_1 is 0, Sat_2 is 2108
first_indices_train = list()
for sat_no in train['sat_id'].unique():
first_id = train['sat_id'].searchsorted(sat_no)
first_indices_train.append(first_id[0])
first_indices_test = list()
for sat_no in test['sat_id'].unique():
first_id = test['sat_id'].searchsorted(sat_no)
first_indices_test.append(first_id[0])
# assign satellite numbers to their corresponding start locations in the main dataframes
train_ids = pd.DataFrame({'sat_id': train['sat_id'].unique(), 'start_location': first_indices_train})
test_ids = pd.DataFrame({'sat_id': test['sat_id'].unique(), 'start_location': first_indices_test})
train_ids.head()
# for example, sat515 and sat516 start at 283856 and 283967
# how can i do this with less code?
a,b = (train_ids[train_ids['sat_id'] == 515]['start_location'].iloc[0],
train_ids[train_ids['sat_id'] == 516]['start_location'].iloc[0])
c,d = (test_ids[test_ids['sat_id'] == 515]['start_location'].iloc[0],
test_ids[test_ids['sat_id'] == 516]['start_location'].iloc[0])
a,b,c,d
# i want to get data for sat515
plt.plot(train.iloc[a:b,:]['x'])
result = seasonal_decompose(train.iloc[a:b,:]['x'], model='additive', period=24)
result.plot()
plt.show()
train.head()
# calculate offsets based on max - min for each kinematic state and shift them by that amount
offsets = list()
for k in range(2,8):
offsets.append(max(train.iloc[:,k]) - min(train.iloc[:,k]))
offsets
#offsets = list()
cols = train.columns
for k in range(2,8):
train[cols[k]] = train[cols[k]] + offsets[k-2] #(max(train.iloc[:,k]) - min(train.iloc[:,k]))
train.head()
# check that all data are strictly positive
# any() returns False iff all elements are False
(train <= 0).any()
result = seasonal_decompose(train.iloc[a:b,:]['x'], model='multiplicative', period=24)
result.plot()
plt.show()
testt = test.copy()
testt['x'] = np.nan
testt.head()
fit = ExponentialSmoothing(np.asarray(train.iloc[a:b,:]['x']),
trend=None, seasonal='add', seasonal_periods=24, damped=False).fit()
# forecast into arrays first, then combine them and put them into Test
fc = fit.forecast(d-c)
len(testt.iloc[c:d,:])
len(fit.forecast(d-c))
plt.plot(train.iloc[a:b,:]['x'])
plt.plot(c)
|
3. Data Exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial IV: Constructing variational algorithms
# Variational quantum algorithms are a broad set of methods which involve optimizing a parameterized quantum circuit ansatz applied to some initial state (called the "reference") in order to minimize a cost function defined with respect to the output state. In the context of quantum simulation, very often the goal is to prepare ground states and the cost function is the expectation value of a Hamiltonian. Thus, if we define the reference (initial state) as $\lvert \psi\rangle$, the Hamiltonian as $H$ and the parameterized quantum circuit as $U(\vec{\theta})$ where $\vec{\theta}$ are the varaitional parameters, then the goal is to minimize the cost function
# $$
# E(\vec \theta) = \langle \psi \rvert
# U^\dagger(\vec{\theta}) H U(\vec{\theta})
# \lvert \psi\rangle.
# $$
# A classical optimization algorithm can be used to find the $\vec{\theta}$ that minimizes the value of the expression. The performance of a variational algorithm depends crucially on the choice of ansatz circuit $U(\vec{\theta})$, the choice of reference, and the strategy for choosing the initial parameters $\vec{\theta}$ since typically global optimizing is challenging and one needs to begin reasonably close to the intended state. One possibility is to use an ansatz of the form
# $$
# U(\vec{\theta}) = \prod_j \exp(-i \theta_j H_j)
# $$
# where the $H = \sum_j H_j$. This ansatz is inspired by a low Trotter-number Trotter-Suzuki based approximation to adiabatic state preparation. OpenFermion-Cirq contains routines for constructing ansatzes of this form which use as templates the Trotter step algorithms implemented in the `trotter` module.
# ### Jellium with a Linear Swap Network
#
# We will first demonstrate the construction and optimization of a variational ansatz for a jellium Hamiltonian. We will use an ansatz based on the `LINEAR_SWAP_NETWORK` Trotter step, which takes as input a DiagonalCoulombHamiltonian. Later, we will show how one can create a custom circuit ansatz and apply it to the H$_2$ molecule in a minimal basis.
# +
import openfermion
import openfermioncirq
# Set parameters of jellium model.
wigner_seitz_radius = 5. # Radius per electron in Bohr radii.
n_dimensions = 2 # Number of spatial dimensions.
grid_length = 2 # Number of grid points in each dimension.
spinless = True # Whether to include spin degree of freedom or not.
n_electrons = 2 # Number of electrons.
# Figure out length scale based on Wigner-Seitz radius and construct a basis grid.
length_scale = openfermion.wigner_seitz_length_scale(
wigner_seitz_radius, n_electrons, n_dimensions)
grid = openfermion.Grid(n_dimensions, grid_length, length_scale)
# Initialize the model and compute its ground energy in the correct particle number manifold
fermion_hamiltonian = openfermion.jellium_model(grid, spinless=spinless, plane_wave=False)
hamiltonian_sparse = openfermion.get_sparse_operator(fermion_hamiltonian)
ground_energy, _ = openfermion.jw_get_ground_state_at_particle_number(
hamiltonian_sparse, n_electrons)
print('The ground energy of the jellium Hamiltonian at {} electrons is {}'.format(
n_electrons, ground_energy))
# Convert to DiagonalCoulombHamiltonian type.
hamiltonian = openfermion.get_diagonal_coulomb_hamiltonian(fermion_hamiltonian)
# Define the objective function
objective = openfermioncirq.HamiltonianObjective(hamiltonian)
# Create a swap network Trotter ansatz.
iterations = 1 # This is the number of Trotter steps to use in the ansatz.
ansatz = openfermioncirq.SwapNetworkTrotterAnsatz(
hamiltonian,
iterations=iterations)
print('Created a variational ansatz with the following circuit:')
print(ansatz.circuit.to_text_diagram(transpose=True))
# -
# In the last lines above we instantiated a class called SwapNetworkTrotterAnsatz which inherits from the general VariationalAnsatz class in OpenFermion-Cirq. A VariationalAnsatz is essentially a parameterized circuit that one constructs so that parameters can be supplied symbolically. This way one does not (necessarily) need to recompile the circuit each time the variational parameters change. We also instantiated a HamiltonianObjective which represents the objective function being the expectation value of our Hamiltonian.
#
# Optimizing an ansatz requires the creation of a VariationalStudy object. A VariationalStudy is responsible for performing optimizations and storing the results. By default, it evaluates parameters by simulating the quantum circuit and computing the objective function, in this case the expectation value of the Hamiltonian, on the final state. It includes an optional state preparation circuit to be applied prior to the ansatz circuit. For this example, we will prepare the initial state as an eigenstate of the one-body operator of the Hamiltonian. Since the one-body operator is a quadratic Hamiltonian, its eigenstates can be prepared using the `prepare_gaussian_state` method. The SwapNetworkTrotterAnsatz class also includes a default setting of parameters which is inspired by the idea of state preparation by adiabatic evolution from the mean-field state.
# +
# Use preparation circuit for mean-field state
import cirq
preparation_circuit = cirq.Circuit(
openfermioncirq.prepare_gaussian_state(
ansatz.qubits,
openfermion.QuadraticHamiltonian(hamiltonian.one_body),
occupied_orbitals=range(n_electrons)))
# Create a Hamiltonian variational study
study = openfermioncirq.VariationalStudy(
'jellium_study',
ansatz,
objective,
preparation_circuit=preparation_circuit)
print("Created a variational study with {} qubits and {} parameters".format(
len(study.ansatz.qubits), study.num_params))
print("The value of the objective with default initial parameters is {}".format(
study.value_of(ansatz.default_initial_params())))
print("The circuit of the study is")
print(study.circuit.to_text_diagram(transpose=True))
# -
# As we can see, our initial guess isn't particularly close to the target energy. Optimizing the study requires the creation of an OptimizationParams object. The most import component of this object is the optimization algorithm to use. OpenFermion-Cirq includes a wrapper around the the `minimize` method of Scipy's `optimize` module and more optimizers will be included in the future. Let's perform an optimization using the COBYLA method. Since this is just an example, we will set the maximum number of function evaluations to 100 so that it doesn't run too long.
# Perform an optimization run.
from openfermioncirq.optimization import ScipyOptimizationAlgorithm, OptimizationParams
algorithm = ScipyOptimizationAlgorithm(
kwargs={'method': 'COBYLA'},
options={'maxiter': 100},
uses_bounds=False)
optimization_params = OptimizationParams(
algorithm=algorithm)
result = study.optimize(optimization_params)
print(result.optimal_value)
# In practice, the expectation value of the Hamiltonian cannot be measured exactly due to errors from finite sampling. This manifests as an error, or noise, in the measured value of the energy which can be reduced at the cost of more measurements. The HamiltonianVariationalStudy class incorporates a realistic model of this noise (shot-noise). The OptimizationParams object can have a `cost_of_evaluate` parameter which in this case represents the number of measurements used to estimate the energy for a set of parameters. If we are interested in how well an optimizer performs in the presence of noise, then we may want to repeat the optimization several times and see how the results vary between repetitions.
#
# Below, we will perform the same optimization, but this time using the noise model. We will allow one million measurements per energy evaluation and repeat the optimization three times. Since this time the function evaluations are noisy, we'll also indicate that the final parameters of the study should be reevaluated according to a noiseless simulation. Finally, we'll print out a summary of the study, which includes all results obtained so far (including from the previous cell).
optimization_params = OptimizationParams(
algorithm=algorithm,
cost_of_evaluate=1e6)
study.optimize(
optimization_params,
identifier='COBYLA with maxiter=100, noisy',
repetitions=3,
reevaluate_final_params=True,
use_multiprocessing=True)
print(study)
# We see then that in the noisy study the optimizer fails to converge to the final result with high enough accuracy. Apparently then one needs more measurements, a more stable optimizer, or both!
# ### H$_2$ with a custom ansatz
# The above example shows one of the nice built-in ansatz offered in OpenFermion-Cirq that can be applied to many different types of physical systems without the need for much input by the user. In some research cases, however, one may wish to design their own paramterized ansatz. Here will give an example of how to do this for the simple case of the H$_2$ molecule in a minimal basis.
#
# To provide some brief background, in a minimal basis H$_2$ is discretized into two slater-type spatial orbitals, each of which is expressed as a sum of 3 Gaussians (STO-3G). After pre-processing with a mean-field, Hartree-Fock, procedure, the best meanfield approximation of the ground state is found to be the symmetric superposition of these two spatial orbitals. After including spin in the problem by assigning each spatial orbital an alpha and beta spin, or equivalently the tensor product of the spatial and spin-$1/2$ degree of freedom, the mean-field state is expressed as
# \begin{equation}
# \vert \Psi_{\text{initial}} \rangle = a^\dagger_1 a^\dagger_0 \vert \rangle.
# \end{equation}
#
# Within the Jordan-Wigner encoding of fermionic systems, this is equivalent to a computational basis state with the first two qubits being in the 1 state and the second two qubits in the 0 state. This can be prepared via a simple circuit as
# \begin{equation}
# | \Psi_{\text{initial}} \rangle = X_1 X_0 \vert 0 0 0 0 \rangle = \vert 1 1 0 0 \rangle.
# \end{equation}
#
# As a result of the symmetries present in this system, only one transition is allowed, and it completely characterizes the freedom required to move from this initial guess to the exact ground state solution for all geometries of H$_2$ in the minimal basis. That is the concerted transitions of electrons from spin-orbitals 0, 1 to 2, 3. This corresponds to the fermionic operator $a_3^\dagger a_2^\dagger a_1 a_0$, which is of course not unitary, but one may lift this operation to the anti-hermitian generator of a rotation as in unitary coupled cluster to yield the unitary
# \begin{equation}
# \exp \left[ \theta \left(a_3^\dagger a_2^\dagger a_1 a_0 - a_0^\dagger a_1^\dagger a_2 a_3\right) \right]
# \end{equation}
#
# which may be decomposed exactly using a combination of the Jordan-Wigner transformation and standard identites from Nielsen and Chuang. However, as has been noted before, the essential action of concerted electron movement can be captured in only a single of the Jordan-Wigner terms, hence the simpler operation
# \begin{equation}
# \exp \left[ -i \theta Y_3 X_2 X_1 X_0 \right]
# \end{equation}
# suffices. This is what we use here in combination with standard gate identities to parameterize an ansatz for H$_2$.
#
# In the following code we first load up one example geometry of the H$_2$ molecule, as this data is included with OpenFermion. To compute such Hamiltonians for arbitrary molecules in different basis sets geometries, etc., one can use plugins such as [OpenFermion-Psi4](https://github.com/quantumlib/OpenFermion-Psi4) or [OpenFermion-PySCF](https://github.com/quantumlib/OpenFermion-PySCF). Later we will use these same techniques to load and evaluate the full curve with our ansatz.
# +
import openfermion
diatomic_bond_length = .7414
geometry = [('H', (0., 0., 0.)),
('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
charge = 0
description = format(diatomic_bond_length)
molecule = openfermion.MolecularData(
geometry,
basis,
multiplicity,
description=description)
molecule.load()
hamiltonian = molecule.get_molecular_hamiltonian()
print("Bond Length in Angstroms: {}".format(diatomic_bond_length))
print("Hartree Fock (mean-field) energy in Hartrees: {}".format(molecule.hf_energy))
print("FCI (Exact) energy in Hartrees: {}".format(molecule.fci_energy))
# -
# Now we design a custom ansatz with a single parameter based on the simplfied unitary above. The ansatz class makes convenient use of named parameters which are specified by the params routine. The parameterized circuit then makes use of these parameters within its operations method.
# +
import cirq
import openfermioncirq
import sympy
class MyAnsatz(openfermioncirq.VariationalAnsatz):
def params(self):
"""The parameters of the ansatz."""
return [sympy.Symbol('theta_0')]
def operations(self, qubits):
"""Produce the operations of the ansatz circuit."""
q0, q1, q2, q3 = qubits
yield cirq.H(q0), cirq.H(q1), cirq.H(q2)
yield cirq.XPowGate(exponent=-0.5).on(q3)
yield cirq.CNOT(q0, q1), cirq.CNOT(q1, q2), cirq.CNOT(q2, q3)
yield cirq.ZPowGate(exponent=sympy.Symbol('theta_0')).on(q3)
yield cirq.CNOT(q2, q3), cirq.CNOT(q1, q2), cirq.CNOT(q0, q1)
yield cirq.H(q0), cirq.H(q1), cirq.H(q2)
yield cirq.XPowGate(exponent=0.5).on(q3)
def _generate_qubits(self):
"""Produce qubits that can be used by the ansatz circuit."""
return cirq.LineQubit.range(4)
# -
# After this custom ansatz is designed, we can instantiate it and package it into a variational study class along with an initial state preparation cirucit that makes it more convenient to study parts of an ansatz. In this case our initial state is the doubly occupied computational basis state mentioned above.
ansatz = MyAnsatz()
objective = openfermioncirq.HamiltonianObjective(hamiltonian)
q0, q1, _, _ = ansatz.qubits
preparation_circuit = cirq.Circuit(
cirq.X(q0),
cirq.X(q1))
study = openfermioncirq.VariationalStudy(
name='my_hydrogen_study',
ansatz=ansatz,
objective=objective,
preparation_circuit=preparation_circuit)
print(study.circuit)
# With this this paramterized circuit and state preparation packaged into a variational study, it is now straightfoward to attach an optimizer and find the optimal value as was done in the example above. Note that we can also set an initial guess for the angle as determined by any number of methods, and we demonstrate this here. Note that as the built-in simulator for Cirq is based on single precision, the solution may appear sub-variational past this precision due to round off errors that accumlate, however it is far below the accuracy one is typically concerned with for this type of problem.
# Perform optimization.
import numpy
from openfermioncirq.optimization import COBYLA, OptimizationParams
optimization_params = OptimizationParams(
algorithm=COBYLA,
initial_guess=[0.01])
result = study.optimize(optimization_params)
print("Initial state energy in Hartrees: {}".format(molecule.hf_energy))
print("Optimized energy result in Hartree: {}".format(result.optimal_value))
print("Exact energy result in Hartees for reference: {}".format(molecule.fci_energy))
# Using this same circuit and approach, we can now build a curve for the length of the H$_2$ molecule and plot it in the following way. Note that running the code in the cell above is required for this example.
# +
bond_lengths = ['{0:.1f}'.format(0.3 + 0.1 * x) for x in range(23)]
hartree_fock_energies = []
optimized_energies = []
exact_energies = []
for diatomic_bond_length in bond_lengths:
geometry = [('H', (0., 0., 0.)),
('H', (0., 0., diatomic_bond_length))]
description = format(diatomic_bond_length)
molecule = openfermion.MolecularData(geometry, basis,
multiplicity, description=description)
molecule.load()
hamiltonian = molecule.get_molecular_hamiltonian()
study = openfermioncirq.VariationalStudy(
name='my_hydrogen_study',
ansatz=ansatz,
objective=openfermioncirq.HamiltonianObjective(hamiltonian),
preparation_circuit=preparation_circuit)
result = study.optimize(optimization_params)
hartree_fock_energies.append(molecule.hf_energy)
optimized_energies.append(result.optimal_value)
exact_energies.append(molecule.fci_energy)
print("R={}\t Optimized Energy: {}".format(diatomic_bond_length, result.optimal_value))
# -
# Now that we've collected that data, we can easily visualize it with standard matplotlib routines
# +
import matplotlib
import matplotlib.pyplot as pyplot
# %matplotlib inline
# Plot the energy mean and std Dev
fig = pyplot.figure(figsize=(10,7))
bkcolor = '#ffffff'
ax = fig.add_subplot(1, 1, 1)
pyplot.subplots_adjust(left=.2)
ax.set_xlabel('R (Angstroms)')
ax.set_ylabel(r'E Hartrees')
ax.set_title(r'H$_2$ bond dissociation curve')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
bond_lengths = [float(x) for x in bond_lengths]
ax.plot(bond_lengths, hartree_fock_energies, label='Hartree-Fock')
ax.plot(bond_lengths, optimized_energies, '*', label='Optimized')
ax.plot(bond_lengths, exact_energies, '--', label='Exact')
ax.legend(frameon=False)
pyplot.show()
|
examples/tutorial_4_variational.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import time
import argparse
import numpy as np
from pandas import DataFrame
import torch.nn.functional as F
# Device configuration
print(torch.__version__)
# -
def generate(name):
# If you what to replicate the DeepLog paper results(Actually, I have a better result than DeepLog paper results),
# you should use the 'list' not 'set' to obtain the full dataset, I use 'set' just for test and acceleration.
hdfs = set()
# hdfs = []
with open('data/' + name, 'r') as f:
for ln in f.readlines():
ln = list(map(lambda n: n - 1, map(int, ln.strip().split())))
ln = ln + [-1] * (window_size + 1 - len(ln))
hdfs.add(tuple(ln))
# hdfs.append(tuple(ln))
print('Number of sessions({}): {}'.format(name, len(hdfs)))
return hdfs
# +
class DL(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_keys):
super(DL, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_keys)
size = 0
for p in self.parameters():
size += p.nelement()
print('Total param size: {}'.format(size))
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
class Att(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_keys):
super(Att, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_keys)
size = 0
for p in self.parameters():
size += p.nelement()
print('Total param size: {}'.format(size))
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
out, (final_hidden_state, final_cell_state) = self.lstm(x, (h0, c0))
final_hidden_state = final_hidden_state[-1]
out, att_weight = self.attention_net(out, final_hidden_state)
out = self.fc(out)
return out, att_weight
def attention_net(self, lstm_output, final_state):
"""
Now we will incorporate Attention mechanism in our LSTM model. In this new model, we will use attention to compute soft alignment score corresponding
between each of the hidden_state and the last hidden_state of the LSTM. We will be using torch.bmm for the batch matrix multiplication.
Arguments
---------
lstm_output : Final output of the LSTM which contains hidden layer outputs for each sequence.
final_state : Final time-step hidden state (h_n) of the LSTM
---------
Returns : It performs attention mechanism by first computing weights for each of the sequence present in lstm_output and and then finally computing the
new hidden state.
Tensor Size :
hidden.size() = (batch_size, hidden_size)
attn_weights.size() = (batch_size, num_seq)
soft_attn_weights.size() = (batch_size, num_seq)
new_hidden_state.size() = (batch_size, hidden_size)
"""
hidden = final_state
attn_weights = torch.bmm(lstm_output, hidden.unsqueeze(2)).squeeze(2)
soft_attn_weights = F.softmax(attn_weights, 1)
new_hidden_state = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)
return new_hidden_state, soft_attn_weights
class Train_Att(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_keys):
super(Train_Att, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_keys)
self.att_w = nn.Parameter(torch.tensor(torch.randn(hidden_size)))
size = 0
for p in self.parameters():
size += p.nelement()
print('Total param size: {}'.format(size))
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
out, (final_hidden_state, final_cell_state) = self.lstm(x, (h0, c0))
final_hidden_state = final_hidden_state[-1]
att_out, att_weights = self.attention_net(out, final_hidden_state)
out = self.fc(att_out)
return out, att_weights
def attention_net(self, lstm_output, final_state):
batch_size = lstm_output.shape[0]
hidden = final_state.squeeze(0)
attn_weights = torch.bmm(torch.tanh(lstm_output), self.att_w.expand(batch_size, self.att_w.shape[0]).unsqueeze(2)).squeeze(2)
soft_attn_weights = F.softmax(attn_weights, 1)
new_hidden_state = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)
new_hidden_state = torch.tanh(new_hidden_state)
return new_hidden_state, soft_attn_weights
# -
num_classes = 28
input_size = 1
model_path = 'model/window_size=7'
# parser = argparse.ArgumentParser()
# parser.add_argument('-num_layers', default=2, type=int)
# parser.add_argument('-hidden_size', default=64, type=int)
# parser.add_argument('-window_size', default=10, type=int)
# parser.add_argument('-num_candidates', default=9, type=int)
# args = parser.parse_args()
num_layers = 2
hidden_size = 64
window_size = 7
num_candidates = 3
# +
model_type = 'train_att'
if model_type == 'dl':
model = DL(input_size, hidden_size, num_layers, num_classes)
elif model_type == 'att':
model = Att(input_size, hidden_size, num_layers, num_classes)
elif model_type == 'train_att':
model = Train_Att(input_size, hidden_size, num_layers, num_classes)
model_path = model_path + '_' + model_type + '.pt'
check = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(check)
model.eval()
print('model_path: {}'.format(model_path))
test_normal_loader = generate('hdfs_test_normal')
test_abnormal_loader = generate('hdfs_test_abnormal')
TP = 0
FP = 0
# -
# Test the model
with torch.no_grad():
for index, line in enumerate(test_normal_loader):
if index % 2000 == 0:
print(chr(27) + "[2J")
print(str(index/len(test_normal_loader) * 100)+'%')
for i in range(len(line) - window_size):
seq = line[i:i + window_size]
label = line[i + window_size]
seq = torch.tensor(seq, dtype=torch.float).view(-1, window_size, input_size)
label = torch.tensor(label).view(-1)
output, _ = model(seq)
predicted = torch.argsort(output, 1)[0][-num_candidates:]
if label not in predicted:
FP += 1
break
seq_list = []
attention_list = []
label_list = []
with torch.no_grad():
for index, line in enumerate(test_abnormal_loader):
if index % 2000 == 0:
print(chr(27) + "[2J")
print(str(index/len(test_abnormal_loader) * 100)+'%')
for i in range(len(line) - window_size):
seq_raw = line[i:i + window_size]
label_raw = line[i + window_size]
seq = torch.tensor(seq_raw, dtype=torch.float).view(-1, window_size, input_size)
label = torch.tensor(label_raw).view(-1)
output, attention_weights = model(seq)
predicted = torch.argsort(output, 1)[0][-num_candidates:]
if label not in predicted:
TP += 1
seq_list.append(seq_raw)
attention_list.append(attention_weights.data.numpy())
label_list.append(label_raw)
break
# Compute precision, recall and F1-measure
FN = len(test_abnormal_loader) - TP
P = 100 * TP / (TP + FP)
R = 100 * TP / (TP + FN)
F1 = 2 * P * R / (P + R)
print('false positive (FP): {}, false negative (FN): {}, Precision: {:.3f}%, Recall: {:.3f}%, F1-measure: {:.3f}%'.format(FP, FN, P, R, F1))
print('Finished Predicting')
a = []
index = dict()
for i in range(1,41):
index[seq_list[i*11]] = i*11
for key, value in index.items():
i = value
df = DataFrame({'weights':attention_list[value][0], 'key':key})
a.append(df.plot.bar(x='key', y='weights'))
cases = []
for i, item in enumerate(attention_list):
for j, value in enumerate(item[0]):
tenth = item[0][4]
if j !=9 and item[0][j]>tenth:
cases.append(i)
break
c = []
i=0
for case in cases:
if i == 19:
break
df = DataFrame({'weights':attention_list[case][0], 'key':seq_list[case]})
c.append(df.plot.bar(x='key', y='weights'))
i+=1
|
deepgravewell/att_vis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### CALCULATE FEATURES BASED ON FOOD INSPECTION DATA
# +
import numpy as np
import pandas as pd
import os.path
root_path = os.path.dirname(os.getcwd())
# Load food inspection data
inspections = pd.read_csv(os.path.join(root_path, "DATA/food_inspections.csv"))
# Create basis for model_data
data = inspections.loc[:, ["inspection_id", "license", "inspection_date", "facility_type"]]
# -
# Create pass / fail flags
data["pass_flag"] = inspections.results.apply(lambda x: 1 if x == "Pass" else 0)
data["fail_flag"] = inspections.results.apply(lambda x: 1 if x == "Fail" else 0)
# Create risk flags
data["risk_1"] = inspections.results.apply(lambda x: 1 if x == "Risk 1 (High)" else 0)
data["risk_2"] = inspections.results.apply(lambda x: 1 if x == "Risk 2 (Medium)" else 0)
data["risk_3"] = inspections.results.apply(lambda x: 1 if x == "Risk 3 (Low)" else 0)
# +
# Load violation data
values = pd.read_csv(os.path.join(root_path, "DATA/violation_values.csv"))
counts = pd.read_csv(os.path.join(root_path, "DATA/violation_counts.csv"))
# Merge with violation data, filtering missing data
data = pd.merge(data, values, on="inspection_id")
data = pd.merge(data, counts, on="inspection_id")
# +
# Sort inspections by date
license_groups = data.sort_values("inspection_date").groupby("license")
# Find previous inspections by shifting each sorted group
past_data = license_groups.shift(1)
# +
# Add past fails, with 0 for first inspections
data["past_fail"] = past_data.fail_flag.fillna(0)
# Add past violation counts, with 0 for first records
data["past_critical"] = past_data.critical_count.fillna(0)
data["past_serious"] = past_data.serious_count.fillna(0)
data["past_minor"] = past_data.minor_count.fillna(0)
# +
# Select past violation values, remove past inspection id
past_values = past_data[values.columns].drop("inspection_id", axis=1).add_prefix("p")
# Add past values to model data, with 0 for first records
data = data.join(past_values.fillna(0))
# +
# Calculate time since previous inspection
deltas = pd.to_datetime(data.inspection_date) - pd.to_datetime(past_data.inspection_date)
# Add years since previous inspection, with 2 for first records
data["time_since_last"] = deltas.apply(lambda x: x.days / 365.25).fillna(2)
# -
# Check if first record
data["first_record"] = past_data.inspection_id.map(lambda x: 1 if pd.isnull(x) else 0)
# ### CALCULATE FEATURES BASED ON BUSINESS LICENSE DATA
# Load business license data
licenses = pd.read_csv(os.path.join(root_path, "DATA/business_licenses.csv"))
# +
# Business licenses have numbers on end preventing simple match
# so using street number instead
def get_street_number(address):
return address.split()[0]
licenses["street_number"] = licenses.address.apply(get_street_number)
inspections["street_number"] = inspections.address.apply(get_street_number)
# +
# Match based on DBA name and street number
venue_matches = pd.merge(inspections, licenses, left_on=["dba_name", "street_number"], right_on=["doing_business_as_name", "street_number"])
# Match based on license numbers
license_matches = pd.merge(inspections, licenses, left_on="license", right_on="license_number")
# Join matches, reset index, drop duplicates
matches = venue_matches.append(license_matches, sort=False)
matches.reset_index(drop=True, inplace=True)
matches.drop_duplicates(["inspection_id", "id"], inplace=True)
# Restrict to matches where inspection falls within license period
matches = matches.loc[matches.inspection_date.between(matches.license_start_date, matches.expiration_date)]
# +
# Select retail food establishment inspection IDs
retail = matches.loc[matches.license_description == "Retail Food Establishment", ["inspection_id"]]
retail.drop_duplicates(inplace=True)
# FILTER: ONLY CONSIDER INSPECTIONS MATCHED WITH RETAIL LICENSES
data = pd.merge(data, retail, on="inspection_id")
# +
# Convert dates to datetime format
matches.inspection_date = pd.to_datetime(matches.inspection_date)
matches.license_start_date = pd.to_datetime(matches.license_start_date)
def get_age_data(group):
min_date = group.license_start_date.min()
deltas = group.inspection_date - min_date
group["age_at_inspection"] = deltas.apply(lambda x: x.days / 365.25)
return group[["inspection_id", "age_at_inspection"]]
# Calculate (3 mins), drop duplicates
age_data = matches.groupby("license").apply(get_age_data).drop_duplicates()
# -
# Merge in age_at_inspection
data = pd.merge(data, age_data, on="inspection_id", how="left")
# +
# Translate categories to snake-case titles
categories = {
"Consumption on Premises - Incidental Activity": "consumption_on_premises_incidental_activity",
"Tobacco": "tobacco",
"Package Goods": "package_goods",
"Limited Business License": "limited_business_license",
"Outdoor Patio": "outdoor_patio",
"Public Place of Amusement": "public_place_of_amusement",
"Children's Services Facility License": "childrens_services_facility_license",
"Tavern": "tavern",
"Regulated Business License": "regulated_business_license",
"Filling Station": "filling_station",
"Caterer's Liquor License": "caterers_liquor_license",
"Mobile Food License": "mobile_food_license"
}
# Create binary markers for license categories
def get_category_data(group):
df = group[["inspection_id"]].iloc[[0]]
for category in group.license_description:
if category in categories:
df[categories[category]] = 1
return df
# group by inspection, get categories (2 mins)
category_data = matches.groupby("inspection_id").apply(get_category_data)
# Reset index, set absent categories to 0
category_data.reset_index(drop=True, inplace=True)
category_data.fillna(0, inplace=True)
# -
# Merge in category data, fill nan with 0
data = pd.merge(data, category_data, on="inspection_id", how="left").fillna(0)
# ### ATTACH KDE DATA
# +
# Load violation data
burglary_kde = pd.read_csv(os.path.join(root_path, "DATA/burglary_kde.csv"))
cart_kde = pd.read_csv(os.path.join(root_path, "DATA/cart_kde.csv"))
complaint_kde = pd.read_csv(os.path.join(root_path, "DATA/complaint_kde.csv"))
# FILTER: only consider data since 2012 (with good kde data)
data = pd.merge(data, burglary_kde, on="inspection_id")
data = pd.merge(data, cart_kde, on="inspection_id")
data = pd.merge(data, complaint_kde, on="inspection_id")
# -
# ### ATTACH WEATHER DATA, SAVE RESULT
# +
# Load weather data
weather = pd.read_csv(os.path.join(root_path, "DATA/weather.csv"))
# Merge weather data with model data
data = pd.merge(data, weather, on="inspection_id")
# -
# Save Result
data.to_csv(os.path.join(root_path, "DATA/model_data.csv"), index=False)
for thing in data.columns:
print(thing)
data.v_1.sum()
|
CODE/23_generate_model_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Machine Intelligence II - Team MensaNord
# ## Sheet 08
# - <NAME>
# - <NAME>
# - <NAME>
# - <NAME>
# - <NAME>
from __future__ import division, print_function
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy.stats
import numpy as np
# ## Exercise 1
def E(W, s):
N = len(s)
return -0.5 * np.sum(W[i, j] * s[i] * s[j] for i, j in np.ndindex(N, N))
N = 6
beta_0 = 0.007
tau = 1.06
epsilon = 1e-20
t_max = 150
# +
W = np.random.random(size=(N, N))
W = (W + W.T) / 2 # make symmetric
for i in range(N):
W[i, i] = 0
plt.imshow(W)
# -
# ### Simulation with M=1
M = 1
beta = beta_0
s = np.random.choice([-1, 1], N)
temperatures = np.zeros(t_max)
energies = np.zeros(t_max)
# +
# %%time
validation_min = E(W, s)
for t in range(t_max):
for m in range(M):
i = np.random.randint(0, 6)
s_local = np.copy(s)
s_local[i] *= -1
E_1 = E(W, s)
E_2 = E(W, s_local)
E_d = E_2 - E_1
P = 1 / (1 + np.exp(beta*E_d))
# print("\nt:", t, " i:", i, "\n s1:", s, "\tE1:", E_1, "\n s2:", s_local, "\tE2:", E_2)
if np.random.random() < P:
s = np.copy(s_local)
# print("new s")
if E(W, s) < validation_min:
validation_min = E(W, s)
temperatures[t] = 1 / beta
energies[t] = E(W, s)
beta *= tau
# -
plt.figure(figsize=(10, 5))
plt.plot(temperatures)
plt.xlabel('t')
plt.ylabel('Temperature')
plt.figure(figsize=(10, 5))
plt.plot(energies, '.-')
plt.xlabel('t')
plt.ylabel('Energy')
s
# ### Simulation with M=500
M = 500
beta = beta_0
s = np.random.choice([-1, 1], N)
temperatures = np.zeros(t_max)
energies = np.zeros(t_max)
# +
# %%time
validation_min = E(W, s)
for t in range(t_max):
for m in range(M):
i = np.random.randint(0, 6)
s_local = np.copy(s)
s_local[i] *= -1
E_1 = E(W, s)
E_2 = E(W, s_local)
E_d = E_2 - E_1
P = 1 / (1 + np.exp(beta*E_d))
# print("\nt:", t, " i:", i, "\n s1:", s, "\tE1:", E_1, "\n s2:", s_local, "\tE2:", E_2)
if np.random.random() < P:
s = np.copy(s_local)
# print("new s")
if E(W, s) < validation_min:
validation_min = E(W, s)
temperatures[t] = 1 / beta
energies[t] = E(W, s)
beta *= tau
# -
plt.figure(figsize=(10, 5))
plt.plot(temperatures)
plt.xlabel('t')
plt.ylabel('Temperature')
plt.figure(figsize=(10, 5))
plt.plot(energies, '.-')
plt.xlabel('t')
plt.ylabel('Energy')
s
# ### All possible states
# +
# generate all posible states & energies
all_states = [[0, 0, 0, 0, 0, 0] for i in range(2**6)]
all_energies = [0.0 for i in range(2**6)]
for si in range(2**6):
all_states[si] = [int(x) for x in list('{0:06b}'.format(si))]
all_energies[si] = E(W, all_states[si])
plt.figure(figsize=(10, 5))
plt.scatter(range(2**6), all_energies)
plt.title('histogram of all possible energies')
plt.grid()
plt.show()
probab_beta = [0.005, 1, 3]
for beta in probab_beta:
Z = 0
for en in all_energies:
Z += np.exp(-beta * en)
all_probabilities = [0.0 for i in range(2**6)]
for si in range(2**6):
all_probabilities[si] = np.exp(-beta * all_energies[si])
plt.figure(figsize=(10, 5))
plt.scatter(range(2**6), all_probabilities)
plt.title('histogram of all possible probabilities for beta {}'.format(beta))
plt.grid()
plt.show()
# -
# ## Exercise 2
# Other parameters and W from exercise 1.
epsilon = 1e-50
s = np.random.choice([-1., 1.], N)
e = np.zeros_like(s)
beta = beta_0
temperatures = np.zeros(t_max)
energies = np.zeros(t_max)
# +
# %%time
for t in range(t_max):
#print('t =', t, '- beta =', beta)
distance = np.inf
while distance >= epsilon:
e_old = e.copy()
for i in range(N):
neighbors = range(N)
neighbors.remove(i)
e[i] = -np.sum(W[i, j] * s[j] for j in neighbors)
s[i] = np.tanh(-beta * e[i])
#print(distance)
distance = np.linalg.norm(e - e_old)
temperatures[t] = 1 / beta
energies[t] = E(W, s)
beta *= tau
#print('-'*10)
# -
plt.figure(figsize=(10, 5))
plt.plot(temperatures)
plt.xlabel('t')
plt.ylabel('Temperature')
plt.figure(figsize=(10, 5))
plt.plot(energies, '.-')
plt.xlabel('t')
plt.ylabel('Energy')
s
# ## Comparison
# **Number of iterations until convergence:**
#
# Simulated annealing converges more quickly (ca 105 iterations for M=1 and 85 iterations for M=500), mean-field annealing takes a bit longer (ca 130 iterations).
#
# **Total runtime:**
#
# Simulated annealing takes ca 40 ms for M=1 and 15 s for M=500. Mean-field annealing takes ca 18 ms. Therefore, simulated annealing is slower and depends heavily on the parameter M.
#
# **Final result:**
# As can be seen from the outputs above, the final state $s$ is the same for all methods ($s_i$ = 1 for all $i$).
|
sheet08/sheet08.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
def clean_ma(ma):
ma = re.sub("([\(\[]).*?([\)\]])", "\g<1>\g<2>", ma).replace('[] ','').strip(' []').replace(' ac','').replace(' ps','').replace('sgpl','sg').replace('sgdu','sg')
ma = ma.replace('i.','inst.').replace('.','').replace(' ','')
return ma
ma_set = set()
word = None
with open('../data/multilingual_word_embeddings/pos_embedding_1_hot.475', 'r', encoding='utf-8') as f:
# skip first line
for i, line in enumerate(f):
if i == 0:
continue
word, vec = line.split('@', 1)
ma_set.add(clean_ma(word))
# ma_set
import os
import pandas as pd
paths= ['../data/train_20k/']
cng_map = dict()
ma_20 = set()
for path in paths:
for file in os.listdir(path):
df = pd.read_csv(path+file,sep=',')
for i in range(0,len(df)):
ma_20.add(clean_ma(df.iloc[i,3]))
# if df.iloc[i,3] not in cng_map.keys():
# cng_map[df.iloc[i,3]] = []
# if df.iloc[i,-1] == df.iloc[i,-2]:
# if df.iloc[i,-1] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-1])
# else:
# if df.iloc[i,-1] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-1])
# if df.iloc[i,-2] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-2])
# else:
# if df.iloc[i,-1] == df.iloc[i,-2]:
# if df.iloc[i,-1] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-1])
# else:
# if df.iloc[i,-1] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-1])
# if df.iloc[i,-2] not in cng_map[df.iloc[i,3]]:
# cng_map[df.iloc[i,3]].append(df.iloc[i,-2])
i=0
for ma in ma_20:
if ma not in list(ma_set):
print(clean_ma(ma))
i=i+1
i
lines[0].split('\t')
cng_set = set()
for line in lines:
if line == '\n':
continue
cng_set.add(line.split('\t')[2])
n= '''0 Verbs
1 not set
2 indecl.
3 comp.
28 xt?
29 nom. sg. m.
30 nom. sg. f.
31 nom. sg. n.
32 nom. sg. *.
33 xt?
34 nom. du. m.
35 nom. du. f.
36 nom. du. n.
37 nom. du. *.
38 xt?
39 nom. pl. m.
40 nom. pl. f.
41 nom. pl. n.
42 nom. pl. *.
48 xt?
49 voc. sg. m.
50 voc. sg. f.
51 voc. sg. n.
54 voc. du. m.
55 x`
56 voc. du. n.
58 xt?
59 voc. pl. m.
60 voc. pl. f.
61 voc. pl. n.
68 xt?
69 acc. sg. m.
70 acc. sg. f.
71 acc. sg. n.
72 acc. sg. *.
73 xt?
74 acc. du. m.
75 acc. du. f.
76 acc. du. n.
77 acc. du. *.
78 xt?
79 acc. pl. m.
80 acc. pl. f.
81 acc. pl. n.
82 acc. pl. *.
88 xt?
89 i. sg. m.
90 i. sg. f.
91 i. sg. n.
92 i. sg. *.
93 xt?
94 i. du. m.
95 i. du. f.
96 i. du. n.
97 i. du. *.
98 xt?
99 i. pl. m.
100 i. pl. f.
101 i. pl. n.
102 i. pl. *.
108 xt?
109 dat. sg. m.
110 dat. sg. f.
111 dat. sg. n.
112 dat. sg. *.
114 dat. du. m.
115 dat. du. f.
116 dat. du. n.
117 dat. du. *.
118 xt?
119 dat. pl. m.
120 dat. pl. f.
121 dat. pl. n.
122 dat. pl. *.
128 xt?
129 abl. sg. m.
130 abl. sg. f.
131 abl. sg. n.
132 abl. sg. *.
134 abl. du. m.
135 abl. du. f.
136 abl. du. n.
137 abl. du. *.
138 xt?
139 abl. pl. m.
140 abl. pl. f.
141 abl. pl. n.
142 abl. pl. *.
148 xt?
149 g. sg. m.
150 g. sg. f.
151 g. sg. n.
152 g. sg. *.
153 xt?
154 g. du. m.
155 g. du. f.
156 g. du. n.
157 g. du. *.
158 xt?
159 g. pl. m.
160 g. pl. f.
161 g. pl. n.
162 g. pl. *.
168 xt?
169 loc. sg. m.
170 loc. sg. f.
171 loc. sg. n.
172 loc. sg. *.
173 xt?
174 loc. du. m.
175 loc. du. f.
176 loc. du. n.
177 loc. du. *.
178 xt?
179 loc. pl. m.
180 loc. pl. f.
181 loc. pl. n.
182 loc. pl. *.'''
noun = dict()
for l in n.split('\n'):
a,b = l.split('\t',1)
noun[int(a)] = b
# +
# noun
# +
v = '''1 pr.
2 opt.
3 imp.
4 impft.
5 fut.
6 cond.
7 per. fut.
8 aor.
9 aor.
10 aor.
11 aor.
12 aor.
13 aor.
14 ben.
15 pft.
16 per. pft.
19 pp.
20 ppa.
21 pfp.
22 inf.
23 abs.
24 pr.
26 imp.
27 impft.
28 aor.
29 opt.
30 ou.'''
verb = dict()
for l in v.split('\n'):
a,b = l.split('\t',1)
verb[int(a)] = b
# -
no_person = {0:'',1:'sg. 1',2:'sg. 2',3:'sg. 3',4:'du. 1',5:'du. 2',6:'du. 3',7:'pl. 1',8:'pl. 2',9:'pl. 3'}
def cng_to_UOH_tag(no):
no = int(no)
if no>=0:
return clean_ma(noun[no])
else:
no = no*(-1)
v= verb[int(no/10)] + ' '+no_person[no%10]
return clean_ma(v)
for c in cng_set:
if cng_to_UOH_tag(c) not in ma_set:
print(cng_to_UOH_tag(c))
# ### Prepare 50k data
files = ['train.conll','test.conll','dev.conll']
for file in files:
f = open('../../../DCST_scratch/data/'+file)
w = open('../../../DCST_scratch/data/prepare_50k_'+file,'w')
lines =f.readlines()
f.close()
id=0
for line in lines:
if line=='\n':
w.write('\n')
id =0
continue
line = line.split('\t')
# get_case_from_244(cng_to_UOH_tag(line[2]))
temp = [str(id),line[0],cng_to_UOH_tag(line[2]),line[1],'0', '0',line[4]]
id+=1
w.write('\t'.join(temp) + '\n')
w.close()
def get_case_from_244(ma):
indeclinable = ['ind','prep','interj','prep','conj','part','indecl']
case_list = ['nom','voc','acc','inst','dat','abl','loc','i','g']
gender_list = ['n','f','m','*']
person_list = ['1','2','3']
no_list = ['du','sg','pl']
ma=ma.replace('sgpl','sg').replace('sgdu','sg')
# Remove active passive
case=''
if ma == 'comp':
case ='comp'
for tag in indeclinable:
if tag in ma:
case= "IND"
# Get case
if case =='':
for tag in case_list:
if tag in ma:
if tag == 'i':
if tag+'sg' in ma or tag+'du' in ma or tag+'pl' in ma:
case = 'i'
elif tag == 'g':
if tag+'sg' in ma or tag+'du' in ma or tag+'pl' in ma:
case = 'g'
else:
case = tag
if case == '':
if 'adv' in ma:
case = 'adv'
if case =='':
for tag in no_list:
if tag in ma:
case = 'FV'
if case=='':
for tag in no_list:
if tag in ma:
case = 'FV'
if case=='':
for tag in person_list:
if tag in ma:
case = 'FV'
if case=='':
case='IV'
return case
s = []
for m in ma_set:
s.append([m,get_case_from_244(m)])
sorted(s)
|
notebooks/multi_task_setup.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tdBxw5Nn5ySP"
# # Import stuffs
# + id="nHpz38mv5c5y" executionInfo={"status": "ok", "timestamp": 1625406276585, "user_tz": 240, "elapsed": 170, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import os
def print_(dataset):
for item in dataset:
print(item)
# + [markdown] id="vSiC0GHQ_D3g"
# # ***Dataset methods do not do anything to the original dataset***
# + [markdown] id="TacY2jgk534c"
# # Examples
# + colab={"base_uri": "https://localhost:8080/"} id="2tnwVtNl56uZ" executionInfo={"status": "ok", "timestamp": 1625404251771, "user_tz": 240, "elapsed": 150, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="0dfcb917-0293-4466-9d8a-895b035bf7c4"
x = tf.range(10)
dataset = tf.data.Dataset.from_tensor_slices(x) # basically slices x up so that each of x's elements is individually in dataset.
print_(dataset)
# + colab={"base_uri": "https://localhost:8080/"} id="7z6Wdxxv6bb-" executionInfo={"status": "ok", "timestamp": 1625404272682, "user_tz": 240, "elapsed": 162, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="0bb69267-906b-44ae-e04c-1dbd8c44463f"
dataset = dataset.repeat(3).batch(7, drop_remainder=True)
# dataset.repeat(3) => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, . . .9]
# dataset.repeat(3).batch(7) => ||
# \/
print_(dataset)
# + [markdown] id="EVmLOU1U5xrF"
# # Use lambda as a function
# + colab={"base_uri": "https://localhost:8080/"} id="enChsLl5-k6c" executionInfo={"status": "ok", "timestamp": 1625404278968, "user_tz": 240, "elapsed": 167, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="66694981-bf45-4b53-da8b-f6feebd9cec5"
x = tf.range(6)
dataset = tf.data.Dataset.from_tensor_slices(x)
dataset = dataset.map(lambda x: x*2)
print_(dataset)
# + [markdown] id="Ceu3tv2s_qli"
# ## Filter function
# + colab={"base_uri": "https://localhost:8080/"} id="vnLKMtM__tM7" executionInfo={"status": "ok", "timestamp": 1625404287542, "user_tz": 240, "elapsed": 180, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="c141dcc7-d707-4d4b-f307-6f01a9596768"
dataset = dataset.filter(lambda x: x < 10) # Filters everything that is not x < 10
print_(dataset)
# + [markdown] id="hd9W6n9VorQG"
# ## Shuffling data
# + colab={"base_uri": "https://localhost:8080/"} id="__55BXNzotED" executionInfo={"status": "ok", "timestamp": 1625404389358, "user_tz": 240, "elapsed": 164, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="78be9232-dbc4-4bbe-b031-057038bf18ff"
dataset = tf.data.Dataset.range(10).repeat(3)
dataset = dataset.shuffle(buffer_size=5, seed=42).batch(7)
print_(dataset)
# + [markdown] id="o8Tft_bCucEH"
# # California stuffs
# + [markdown] id="vP-Y9Da6wV-J"
# ## Preprocessing pg 292-294
# + [markdown] id="dgxEjtfTwZpc"
# ### Import stuffs
# + colab={"base_uri": "https://localhost:8080/"} id="h0_hifjewYIF" executionInfo={"status": "ok", "timestamp": 1625406169777, "user_tz": 240, "elapsed": 3177, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="4916d47c-2ba3-4793-8187-ac779079b4e9"
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target.reshape(-1, 1), random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
scaler.fit(X_train)
X_mean = scaler.mean_
X_std = scaler.scale_
# + [markdown] id="mOJgjWAmwhlY"
# ### Split up the training into smaller csv files for tensorflow to read because if the files are initially huge, it's a good thing to do
# + id="lIxWqINAwq3O" executionInfo={"status": "ok", "timestamp": 1625406248065, "user_tz": 240, "elapsed": 166, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10):
housing_dir = os.path.join("datasets", "housing")
os.makedirs(housing_dir, exist_ok=True)
path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv")
filepaths = []
m = len(data)
for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)):
part_csv = path_format.format(name_prefix, file_idx)
filepaths.append(part_csv)
with open(part_csv, "wt", encoding="utf-8") as f:
if header is not None:
f.write(header)
f.write("\n")
for row_idx in row_indices:
f.write(",".join([repr(col) for col in data[row_idx]]))
f.write("\n")
return filepaths
# + id="x_lzZQTywsp2" executionInfo={"status": "ok", "timestamp": 1625406280071, "user_tz": 240, "elapsed": 339, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
train_data = np.c_[X_train, y_train]
valid_data = np.c_[X_valid, y_valid]
test_data = np.c_[X_test, y_test]
header_cols = housing.feature_names + ["MedianHouseValue"]
header = ",".join(header_cols)
train_filepaths = save_to_multiple_csv_files(train_data, "train", header, n_parts=20)
valid_filepaths = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10)
test_filepaths = save_to_multiple_csv_files(test_data, "test", header, n_parts=10)
# + [markdown] id="6kqJdWizw6Ii"
# ### Preprocessing
# + id="eJqP6POCw78y" executionInfo={"status": "ok", "timestamp": 1625406328426, "user_tz": 240, "elapsed": 161, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
n_inputs = 8 # X_train.shape[-1]
@tf.function
def preprocess(line):
defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)]
fields = tf.io.decode_csv(line, record_defaults=defs)
x = tf.stack(fields[:-1])
y = tf.stack(fields[-1:])
return (x - X_mean) / X_std, y
# + id="jPwqYQB-xEUT" executionInfo={"status": "ok", "timestamp": 1625406329665, "user_tz": 240, "elapsed": 226, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
def csv_reader_dataset(filepaths, repeat=1, n_readers=5,
n_read_threads=None, shuffle_buffer_size=10000,
n_parse_threads=5, batch_size=32):
dataset = tf.data.Dataset.list_files(filepaths).repeat(repeat)
dataset = dataset.interleave(
lambda filepath: tf.data.TextLineDataset(filepath).skip(1),
cycle_length=n_readers, num_parallel_calls=n_read_threads)
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads)
dataset = dataset.batch(batch_size)
return dataset.prefetch(1)
# + [markdown] id="hjEwxzDF2LOt"
# # TFRecord and Proto(pg 297-300)
# + [markdown] id="Da8tEkXG2TXs"
# # For standardization you can either use:
# 1. StandardScaler
# 2. tf.keras.layers.Normalization
# - Have to first create a normalization layer, pass in stuff to its .adapt() function, and then you can add it to your model
# 3. Create your own(pg 301)
# + [markdown] id="sT_4g2iZ2xVD"
#
# + [markdown] id="KyHq0bVj3MF0"
# # One-hot encoding with Cali ocean_proximity
# + id="xmBQs9lE3Q-_" executionInfo={"status": "ok", "timestamp": 1625408187185, "user_tz": 240, "elapsed": 152, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
# Because there weren't that many categroies in ocean_proximity, we can just make a map-dictionary
vocab = ["<1H OCEAN", "INLAND", "NEAR OCEAN", "NEAR BAY", "ISLAND"]
indices = tf.range(len(vocab), dtype=tf.int64)
table_init = tf.lookup.KeyValueTensorInitializer(vocab, indices) # If in a text file, use TextFileInitializer
num_oov_buckets = 2
table = tf.lookup.StaticVocabularyTable(table_init, num_oov_buckets=num_oov_buckets)
# + colab={"base_uri": "https://localhost:8080/"} id="YR5G2bzD5u_n" executionInfo={"status": "ok", "timestamp": 1625408729891, "user_tz": 240, "elapsed": 174, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="c4c642ff-fe59-4002-fc89-82c43e7ad2d1"
categories = tf.constant(["NEAR BAY","DESERT", "INLAND", "INLAND"])
indices = table.lookup(categories)
print(indices)
cat_1hot = tf.one_hot(indices, len(vocab) + num_oov_buckets)
print(cat_1hot)
# What the oov does is that it will create an extra category for every word that was not found in the vocab list
# However, if the the amount of words not found within the vocab list exceeds the oov, then collisions.
# If the word was not found, then its category starts at the len(vocab) => 5 in this case
# + [markdown] id="RYq6ToWw6xe7"
# # Embeddings
# - Use when there are a bunch of categories that you need to one_hot encode but it would be too much to do with one_hot encoding.
# - If the number of categories is less than 10, do one_hot.
# - If 10<x<50, if you think you can do it manually, then try both
# -x>50, use embeddings
# + [markdown] id="ChFahcxV7_gY"
# ## Notes on Embeddings and what they do
# - Form of representation learning.
# - ***Categories in embeddings are trainable dense vectors which means they can learn similarities between one another.***
# - Initially, categories are spread out randomly in the embedding space.
# - As the model learns, it sees that some categories are similar to each other while others are just completely different.
# - "Rain" and "Water" would be pushed closer together while "Fire" would be pushed farther away.
# + [markdown] id="eBfNGHWi9-jL"
# ## Manual Embeddings
# + colab={"base_uri": "https://localhost:8080/"} id="rpg0IuGS99WG" executionInfo={"status": "ok", "timestamp": 1625410022029, "user_tz": 240, "elapsed": 146, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="cc5034c2-36b7-42eb-8b97-cdef71515300"
embedding_dim = 2
embed_init = tf.random.uniform([len(vocab) + num_oov_buckets, embedding_dim])
embedding_matrix = tf.Variable(embed_init)
embedding_matrix, len(vocab) + num_oov_buckets # Creates an embedding matrix--where each categorical word is at in the 2D embedding space
# + colab={"base_uri": "https://localhost:8080/"} id="uBNTDq1b_AFb" executionInfo={"status": "ok", "timestamp": 1625410029213, "user_tz": 240, "elapsed": 195, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="ebe4d096-5c8e-4a7b-b8fb-8934fac1a7cc"
tf.nn.embedding_lookup(embedding_matrix, indices)
# Basically just looks up the positions at the given indices.
# First was 3 so what is at index=3 in the embedding_matrix
# + [markdown] id="A6hV3y5N_nes"
# ## Keras Embeddings
# + colab={"base_uri": "https://localhost:8080/"} id="jEkDc3WZAh08" executionInfo={"status": "ok", "timestamp": 1625410381074, "user_tz": 240, "elapsed": 171, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="dffbe4dd-2072-478b-b1dc-c808b19abf7f"
embedding = tf.keras.layers.Embedding(input_dim=len(vocab) + num_oov_buckets,
output_dim=2) # Basically just does whatever we did above.
embedding(indices)
# + [markdown] id="6Oz-B0sfAyE_"
# ## Full model with embeddings
# + id="sbHq-jVyAz71" executionInfo={"status": "ok", "timestamp": 1625410774017, "user_tz": 240, "elapsed": 218, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
numerical_inputs = tf.keras.layers.Input(shape=[8])
categories = tf.keras.layers.Input(shape=[], dtype=tf.string)
indices = tf.keras.layers.Lambda(lambda cats: table.lookup(cats))(categories)
cat_embed = embedding(indices)
encoded_inputs = tf.keras.layers.concatenate([numerical_inputs, cat_embed])
outputs = tf.keras.layers.Dense(1)(encoded_inputs)
model = tf.keras.Model(inputs=[numerical_inputs, categories],
outputs=[outputs])
# If you don't want to manually take in the inputs, use TextVectorization
# Call its method to make it extract the vocabulary from a data sample (it will take care of creating the lookup table for you).
# Then you can add it to your model, and it will perform the index lookup (replacing the layer in the previous code example).
# + [markdown] id="wSUS2XarQpp7"
# # More preprocessing using Keras
# + id="mw90lk0GQr8e" executionInfo={"status": "ok", "timestamp": 1625415005509, "user_tz": 240, "elapsed": 220, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}}
from keras.layers.experimental.preprocessing import Normalization, Discretization, PreprocessingLayer
normalization = Normalization()
discretization = Discretization()
pipeline = PreprocessingLayer([normalization, discretization])
pipeline.adapt(data_sample)
# Word stuff with text vectorization on pg 306
|
(13)Play_With_Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Whale off the Port(folio)
# ---
#
# In this assignment, you'll get to use what you've learned this week to evaluate the performance among various algorithmic, hedge, and mutual fund portfolios and compare them against the S&P 500 Index.
# +
# Initial imports
import pandas as pd
import numpy as np
import datetime as dt
from pathlib import Path
# %matplotlib inline
# -
# # Data Cleaning
#
# In this section, you will need to read the CSV files into DataFrames and perform any necessary data cleaning steps. After cleaning, combine all DataFrames into a single DataFrame.
#
# Files:
#
# * `whale_returns.csv`: Contains returns of some famous "whale" investors' portfolios.
#
# * `algo_returns.csv`: Contains returns from the in-house trading algorithms from Harold's company.
#
# * `sp500_history.csv`: Contains historical closing prices of the S&P 500 Index.
# ## Whale Returns
#
# Read the Whale Portfolio daily returns and clean the data
# Reading whale returns
# Count nulls
# Drop nulls
# ## Algorithmic Daily Returns
#
# Read the algorithmic daily returns and clean the data
# Reading algorithmic returns
# Count nulls
# Drop nulls
# ## S&P 500 Returns
#
# Read the S&P 500 historic closing prices and create a new daily returns DataFrame from the data.
# Reading S&P 500 Closing Prices
# Check Data Types
# Fix Data Types
# Calculate Daily Returns
# Drop nulls
# Rename `Close` Column to be specific to this portfolio.
# ## Combine Whale, Algorithmic, and S&P 500 Returns
# Join Whale Returns, Algorithmic Returns, and the S&P 500 Returns into a single DataFrame with columns for each portfolio's returns.
# ---
# # Conduct Quantitative Analysis
#
# In this section, you will calculate and visualize performance and risk metrics for the portfolios.
# ## Performance Anlysis
#
# #### Calculate and Plot the daily returns.
# Plot daily returns of all portfolios
# #### Calculate and Plot cumulative returns.
# +
# Calculate cumulative returns of all portfolios
# Plot cumulative returns
# -
# ---
# ## Risk Analysis
#
# Determine the _risk_ of each portfolio:
#
# 1. Create a box plot for each portfolio.
# 2. Calculate the standard deviation for all portfolios
# 4. Determine which portfolios are riskier than the S&P 500
# 5. Calculate the Annualized Standard Deviation
# ### Create a box plot for each portfolio
#
# Box plot to visually show risk
# ### Calculate Standard Deviations
# Calculate the daily standard deviations of all portfolios
# ### Determine which portfolios are riskier than the S&P 500
# +
# Calculate the daily standard deviation of S&P 500
# Determine which portfolios are riskier than the S&P 500
# -
# ### Calculate the Annualized Standard Deviation
# Calculate the annualized standard deviation (252 trading days)
# ---
# ## Rolling Statistics
#
# Risk changes over time. Analyze the rolling statistics for Risk and Beta.
#
# 1. Calculate and plot the rolling standard deviation for all portfolios using a 21-day window
# 2. Calculate the correlation between each stock to determine which portfolios may mimick the S&P 500
# 3. Choose one portfolio, then calculate and plot the 60-day rolling beta between it and the S&P 500
# ### Calculate and plot rolling `std` for all portfolios with 21-day window
# +
# Calculate the rolling standard deviation for all portfolios using a 21-day window
# Plot the rolling standard deviation
# -
# ### Calculate and plot the correlation
# +
# Calculate the correlation
# Display de correlation matrix
# -
# ### Calculate and Plot Beta for a chosen portfolio and the S&P 500
# +
# Calculate covariance of a single portfolio
# Calculate variance of S&P 500
# Computing beta
# Plot beta trend
# -
# ## Rolling Statistics Challenge: Exponentially Weighted Average
#
# An alternative way to calculate a rolling window is to take the exponentially weighted moving average. This is like a moving window average, but it assigns greater importance to more recent observations. Try calculating the [`ewm`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html) with a 21-day half-life.
# Use `ewm` to calculate the rolling window
# ---
# # Sharpe Ratios
# In reality, investment managers and thier institutional investors look at the ratio of return-to-risk, and not just returns alone. After all, if you could invest in one of two portfolios, and each offered the same 10% return, yet one offered lower risk, you'd take that one, right?
#
# ### Using the daily returns, calculate and visualize the Sharpe ratios using a bar plot
# Annualized Sharpe Ratios
# Visualize the sharpe ratios as a bar plot
# ### Determine whether the algorithmic strategies outperform both the market (S&P 500) and the whales portfolios.
#
# Write your answer here!
# ---
# # Create Custom Portfolio
#
# In this section, you will build your own portfolio of stocks, calculate the returns, and compare the results to the Whale Portfolios and the S&P 500.
#
# 1. Choose 3-5 custom stocks with at last 1 year's worth of historic prices and create a DataFrame of the closing prices and dates for each stock.
# 2. Calculate the weighted returns for the portfolio assuming an equal number of shares for each stock
# 3. Join your portfolio returns to the DataFrame that contains all of the portfolio returns
# 4. Re-run the performance and risk analysis with your portfolio to see how it compares to the others
# 5. Include correlation analysis to determine which stocks (if any) are correlated
# ## Choose 3-5 custom stocks with at last 1 year's worth of historic prices and create a DataFrame of the closing prices and dates for each stock.
#
# For this demo solution, we fetch data from three companies listes in the S&P 500 index.
#
# * `GOOG` - [Google, LLC](https://en.wikipedia.org/wiki/Google)
#
# * `AAPL` - [Apple Inc.](https://en.wikipedia.org/wiki/Apple_Inc.)
#
# * `COST` - [Costco Wholesale Corporation](https://en.wikipedia.org/wiki/Costco)
# Reading data from 1st stock
# Reading data from 2nd stock
# Reading data from 3rd stock
# Combine all stocks in a single DataFrame
# Reset Date index
# Reorganize portfolio data by having a column per symbol
# +
# Calculate daily returns
# Drop NAs
# Display sample data
# -
# ## Calculate the weighted returns for the portfolio assuming an equal number of shares for each stock
# +
# Set weights
weights = [1/3, 1/3, 1/3]
# Calculate portfolio return
# Display sample data
# -
# ## Join your portfolio returns to the DataFrame that contains all of the portfolio returns
# Join your returns DataFrame to the original returns DataFrame
# Only compare dates where return data exists for all the stocks (drop NaNs)
# ## Re-run the risk analysis with your portfolio to see how it compares to the others
# ### Calculate the Annualized Standard Deviation
# Calculate the annualized `std`
# ### Calculate and plot rolling `std` with 21-day window
# +
# Calculate rolling standard deviation
# Plot rolling standard deviation
# -
# ### Calculate and plot the correlation
# Calculate and plot the correlation
# ### Calculate and Plot Rolling 60-day Beta for Your Portfolio compared to the S&P 500
# Calculate and plot Beta
# ### Using the daily returns, calculate and visualize the Sharpe ratios using a bar plot
# Calculate Annualzied Sharpe Ratios
# Visualize the sharpe ratios as a bar plot
# ### How does your portfolio do?
#
# Write your answer here!
|
whale_analysis_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import myfitnesspal
import datetime
import ast
import json
from flask import g, Flask, render_template, request, session, redirect, url_for, jsonify
from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
# -
# # MyFitnessPal
client = myfitnesspal.Client('groupb3', password='<PASSWORD>')
# +
date = datetime.date.today()
day = client.get_date(date)
print(day)
print(day.exercises[0].get_as_list())
if day.exercises[0].get_as_list() == []:
calories = 0
else:
calories = day.exercises[0].get_as_list()[0]['nutrition_information']['calories burned']
print('You burned ' + str(calories) + ' calories today')
# -
current_weight = client.get_measurements(upper_bound = datetime.date.today())[datetime.date(2022, 3, 1)]
print('Your current weight is: ' + str(current_weight) + 'kg')
# +
# calories = 0
# -
# # WebApp
## Stores (prefer get from DB ofcourse later on)
#Create list or dict from multiple stores
#52.351476166967736, 4.93562849866492
stores = [{'lat' : 52.356, 'lng' : 4.953, 'name' : "Science park", 'description' : 'Campus of Science Park'},
{'lat' : 52.351476166967736, 'lng' : 4.93562849866492, 'name' : "<NAME>", 'description' : 'Snackbar at the middenweg'}]
# lat = 52.356
# lng = 4.953
# loc = "Science park"
# content_string = 'DESCRIPTION OF THIS STORE'
# +
## Hotspots
#Create list or dict for multiple hotspots
52.353580934267285, 4.955375250247711
hotspots = [{'lat' : 52.353580934267285, 'lng' : 4.955375250247711, 'name' : "<NAME>", 'description' : 'Een onverwacht groen paradijsje midden op het Science Park.'},
{'lat' : 52.349141869498425, 'lng' : 4.944225460426125, 'name' : "<NAME>", 'description' : 'Lekker schaatsen, erg koud wel'}]
#hotspots = [{'lat' : 52.353580934267285, 'lng' : 4.955375250247711}]
lat = 52.353580934267285
lng = 4.955375250247711
lat2 = 52.349141869498425
lng2 = 4.944225460426125
loc = "Science park"
content_string = 'DESCRIPTION OF THIS HOTSPOT'
# { lat: {{lat}}, lng: {{lng}} }
# [{ location: {'lat' : 52.353580934267285, 'lng' : 4.4.955375250247711}}]
# -
app = Flask(__name__)
app.secret_key = "abc"
# +
# @app.route("/")
# def index(name=None):
# return render_template('map.html', lng = lng, lat = lat, content_string = content_string)
@app.route("/", methods=['POST', 'GET'])
def index(name=None):
if request.method == 'POST':
#print(request.form['storelist'])
if request.form['storelist']:
cur_store = request.form['storelist']
print(cur_store)
json_acceptable_string = cur_store.replace("'", "\"")
dict_store = json.loads(json_acceptable_string)
if calories < 300:
session['points'] = 0
print(session['points'])
session['distance'] = 0
session['calories_burned'] = calories
session['duration'] = 0
session['steps'] = 0
return render_template('direction_scenic.html', store_list = dict_store, hotspots = hotspots, lng = lng, lat = lat, lng2 = lng2, lat2 = lat2, content_string = content_string, loc = loc)
if calories > 300:
print(session['points'])
return render_template('direction_short.html', store_list = dict_store, hotspots = hotspots, lng = lng, lat = lat, lng2 = lng2, lat2 = lat2, content_string = content_string, loc = loc)
# elif request.form['storelist1']:
# cur_store = request.form['storelist1']
# print(cur_store)
# json_acceptable_string = cur_store.replace("'", "\"")
# dict_store = json.loads(json_acceptable_string)
# return render_template('direction_short.html', store_list = dict_store, hotspots = hotspots, lng = lng, lat = lat, lng2 = lng2, lat2 = lat2, content_string = content_string, loc = loc)
else:
if calories < 300:
session['points'] = 0
print(session['points'])
session['distance'] = 0
session['calories_burned'] = calories
session['duration'] = 0
session['steps'] = 0
return render_template('index.html', stores = stores, name = name)
# -
@app.route("/map")
def maps():
return render_template('map.html', lng = lng, lat = lat, content_string = content_string)
@app.route("/direction_scenic")
def direction_scenic():
session['distance'] = 0
return render_template('direction_scenic.html', hotspots = hotspots, lng = lng, lat = lat, content_string = content_string, loc = loc)
@app.route("/direction_short")
def direction_short():
return render_template('direction_short.html', hotspots = hotspots, lng = lng, lat = lat, content_string = content_string, loc = loc)
@app.route("/profile", methods = ['POST', 'GET'])
def profile(calories = calories, current_weight = current_weight):
user_name = request.form.get('exampleInputEmail1')
print(session['distance'])
return render_template('profile.html', steps = session['steps'], points = session['points'], distance = session['distance'], calories_burned = session['calories_burned'], duration = session['duration'], calories = calories, current_weight = current_weight)
@app.route("/finish_page", methods=['POST', 'GET'])
def finish_page():
if request.method == 'POST':
if request.form['save']:
new_stats = request.form['save']
# print(new_stats)
# print(type(new_stats))
new_stats2 = ast.literal_eval(new_stats)
# print(new_stats2)
# print(type(new_stats2))
#session['distance'] = new_stats[1]
points = new_stats2[0]
distance = new_stats2[1]
calories_burned = new_stats2[2]
duration = new_stats2[3]
steps = new_stats2[4]
session['points'] += new_stats2[0]
print(session['points'])
session['distance'] += new_stats2[1]
session['calories_burned'] += new_stats2[2]
session['duration'] += new_stats2[3]
session['steps'] += new_stats2[4]
return render_template('profile.html', steps = session['steps'], points = session['points'], distance = session['distance'], calories_burned = session['calories_burned'], duration = session['duration'], calories = calories, current_weight = current_weight, new_stats = new_stats2)
else:
distance = session.get('distance')
return render_template('finish_page.html', hotspots = len(hotspots), distance = distance, lng = lng, lat = lat, content_string = content_string, loc = loc)
run_simple('localhost', 9000, app)
|
WebApp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:action-models]
# language: python
# name: conda-env-action-models-py
# ---
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.insert(0, '../src')
# +
import logging
import pandas as pd
import numpy as np
import torch
from gulpio import GulpDirectory
from torch.utils.data import Subset
from tqdm import tqdm
from torchvideo.samplers import frame_idx_to_list
from config.jsonnet import load_jsonnet
from config.application import FeatureConfig
from attribution.online_shapley_value_attributor import OnlineShapleyAttributor
from subset_samplers import ConstructiveRandomSampler, ExhaustiveSubsetSampler
from ipython_media import display_video
# -
config_dict = load_jsonnet("../configs/feature_multiscale_trn.jsonnet")
config = FeatureConfig(**config_dict)
device = torch.device('cuda:0')
train_sampler = config.frame_samplers.train.instantiate()
test_sampler = config.frame_samplers.test.instantiate()
dataset = config.dataset.instantiate()
val = dataset.validation_dataset(sampler=test_sampler)
model = config.get_model().to(device).eval()
class2str = dataset.class2str()
gulp_dir = GulpDirectory('../datasets/ssv2/gulp/validation/')
class_priors = pd.read_csv('../datasets/ssv2/class-priors.csv', index_col='class', squeeze=True).sort_index().values
class_priors.shape
subset_sampler = ExhaustiveSubsetSampler(device=device)
#subset_sampler = ConstructiveRandomSampler(max_samples=128, device=device)
attributor = OnlineShapleyAttributor(
list(model.single_scale_models),
priors=class_priors,
n_classes=config.dataset.class_count,
device=device,
subset_sampler=subset_sampler,
)
# +
frame_features, label_dict = val[8]
uid = label_dict['uid']
gt = label_dict['action']
video = np.stack(gulp_dir[uid][0])
sampled_frame_idxs = frame_idx_to_list(test_sampler.sample(len(video)))
print(frame_features.shape)
print(label_dict)
# -
display_video(video, fps=14)
display_video(video[sampled_frame_idxs], fps=2)
with torch.no_grad():
logits = model(torch.from_numpy(frame_features).to(device))
probs = torch.softmax(logits, -1)
pred = logits.argmax().item()
pred, logits[pred].item(), probs[pred].item()
class2str[label_dict['action']], class2str[pred]
frame_features.shape
esvs, scores = attributor.explain(torch.from_numpy(frame_features).to(device))
esvs.shape
esvs[:, label_dict['action']].cpu().numpy()
|
notebooks/multiscale-trn-shapley-values.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `git`, `GitHub`, `GitKraken`
#
# <img style="float: left; margin: 15px 15px 15px 15px;" src="http://conociendogithub.readthedocs.io/en/latest/_images/Git.png" width="180" height="50" />
# <img style="float: left; margin: 15px 15px 15px 15px;" src="https://c1.staticflickr.com/3/2238/13158675193_2892abac95_z.jpg" title="github" width="180" height="50" />
# <img style="float: left; margin: 15px 15px 15px 15px;" src="https://www.gitkraken.com/downloads/brand-assets/gitkraken-keif-teal-sq.png" title="gitkraken" width="180" height="50" />
# ___
# ## ¿Qué es `git`?
# <img style="float: right; margin: 15px 15px 15px 15px;" src="http://conociendogithub.readthedocs.io/en/latest/_images/Git.png" title="git" width="180" height="50">
# ### Control de versiones
#
# <p align = "justify"> `git` es un software (de código abierto) de control de versiones diseñado por *<NAME>*, pensando en la eficiencia y la confiabilidad del mantenimiento de versiones de aplicaciones cuando éstas tienen un gran número de archivos de código fuente.</p>
#
# <p align = "justify"> Se llama control de versiones a la gestión de los diversos cambios que se realizan sobre los elementos de algún producto o una configuración del mismo. Una versión, revisión o edición de un producto, es el estado en el que se encuentra el mismo en un momento dado de su desarrollo o modificación.</p>
#
# <p align = "justify"> `git` es independiente de `GitHub`, y es el programa que se usa para gestionar todos los cambios en las versiones de un proyecto (individual o colaborativo, local o de acceso remoto).
# **Descargar `git`**
#
# https://git-scm.com/downloads
# Referencias:
# - Documentación oficial de `git`: https://git-scm.com/doc
# - Control de versiones wiki: https://es.wikipedia.org/wiki/Control_de_versiones
# - `git` wiki: https://es.wikipedia.org/wiki/Git
# - Tutorial git: https://try.github.io/
# - Tutorial branching en `git`: http://learngitbranching.js.org
# **¿Cómo usarlo?**
#
# Mostrar que `git` se puede usar desde la ventana de comandos, **nada más para asustarlos**.
# ___
# ## ¿Qué es `GitHub`?
# <img style="float: right; margin: 15px 15px 15px 15px;" src="https://c1.staticflickr.com/3/2238/13158675193_2892abac95_z.jpg" title="git" width="180" height="50">
# <p align = "justify"> `GitHub` es una compañía que ofrece servicios de *hosting* para repositorios de `git`, y es la forma más popular para trabajar con el control de versiones que brinda `git` de forma colaborativa. En pocas palabras, permite que tanto tú como otras personas del mundo trabajen juntos en proyectos, haciendo control de versiones con `git`.
#
# <p align = "justify"> De forma que usando `git` y `GitHub` en conjunto se puede trabajar tanto:
# - De forma local: repositorios en tu equipo, sin necesidad de conexión a internet (usando únicamente `git`).
# - De forma remota: repositorios remotos (compartidos) que no están necesariamente en tu máquina.
# Referencias:
# - Página oficial de `GitHub`: https://github.com
# - http://conociendogithub.readthedocs.io/en/latest/data/introduccion/
# **¿Cómo usarlo?**
#
# <p align = "justify">Vamos a seguir la guía oficial para aprender a manejar `GitHub`.
#
# <p align = "justify">Para esto, primero nos pedirá crear una cuenta de `GitHub`.
#
# <p align = "justify">**Importante**: se recomienda crear una cuenta *seria*, no solo porque la vamos a utilizar continuamente en el curso, sino porque probablemente sea parte importante de su carrera profesional.
from IPython.display import HTML
HTML('<iframe src=https://guides.github.com/activities/hello-world/ width=700 height=350></iframe>')
# ___
# ## ¿Qué es `GitKraken`?
# <img style="float: right; margin: 15px 15px 15px 15px;" src="https://www.gitkraken.com/downloads/brand-assets/gitkraken-keif-teal-sq.png" title="git" width="180" height="50">
# ### Usamos `git` desde una interfaz gráfica :)
#
# Ya vimos que todos los procedimientos con `git` se pueden correr desde la *consola*. Sin embargo, este modo de utilizar `git` puede ser tedioso para algunos (esto no solo ocurre con `git`).
#
# Por ello, `git` viene con algunas herramientas *GUI* (Graphical User Interface) por defecto con su instalación. Aún así, existen varias GUI desarrolladas por otras compañías para brindar una experiencia más cómoda según el sistema operativo.
#
# `GitKraken` es entonces una de estas GUI, **gratuita para el uso no comercial**, que tiene versiones para los sistemas operativos *Windows*, *Mac* y *Linux*. La estaremos usando en el curso por su versatilidad.
# **Descargar `GitKraken`**
#
# https://www.gitkraken.com/
# Referencias:
# - https://git-scm.com/downloads/guis
# - https://support.gitkraken.com/getting-started/guide
# **¿Cómo usarlo?**
#
# Se explicará como usar directamente `GitKraken` con `GitHub`. Para ello **ya deben tener la cuenta de `GitHub` creada, junto con el repositorio `hello-world`**
#
# Explicación de clase basada en el siguiente *webinar* de `GitKraken` en `YouTube`.
from IPython.display import YouTubeVideo
YouTubeVideo('f0y_xCeM1Rk')
# ### Receta
#
# 1. Descargar e instalar `GitKraken`.
# 2. Abrir `GitKraken`. Cuando nos pida autenticación, nos autenticamos con la cuenta existente de `GitHub`.
# 3. Ir a la pestaña superior derecha de *Preferences* para ver *Profiles* (por defecto el de GitHub) y *Authentication*.
# - En *Authentication*, como estamos autenticados con `GitHub` ya deberíamos estar conectados.
# - Picarle en *Generate SSH key and add to GitHub*. Esto nos ahorrará estarnos autenticando en `GitHub` para cuando queramos actualizar el repositorio remoto.
# - En la parte superior izquierda picarle en *Exit Preferences*.
# 4. Picarle a la carpeta en la esquina superior izquierda. Tres opciones:
# - *Open*: abrir un repositorio local ya existente.
# - ** *Clone*: clonar un repositorio remoto.**
# - *Init*: empezar a hacer control de versiones en un proyecto desde cero.
# 5. Picarle en *Clone*. Como ya estamos autenticados en `GitHub`, aparecerán todos los repositorios disponibles en nuestra cuenta de `GitHub` (en este caso solo tendremos `hello-world`).
# - En *Browse*, buscar el directorio/carpeta donde quieren que el repositorio sea clonado.
# - Picar en `hello-world`.
# - Picar en *Clone the repo*.
# 6. Abrir el directorio `hello-world`, modificar el archivo `README.md` y guardar los cambios.
# - Cuando volvamos a `GitKraken`, veremos que se ha actualizado la modificación.
# - Picamos en el paso de la actualización y en la derecha veremos el archivo modificado.
# - Picamos en `README.md` y podremos ver la modificación que hicimos en el archivo.
# - Antes de llevar a cabo los cambios, ir al repositorio remoto en `GitHub` y ver que el archivo `README.md` no se ha modificado. Solo clonamos los archivos en el repositorio local.
# - Volvemos a `GitKraken`, picamos en *Stage file* y comentamos los cambios.
# - Picar en *Commit*.
# 7. Revisar el arbol de cambios. Hay un ícono que revela hasta donde están los cambios en `GitHub` y el otro revela que vamos un paso más adelante en el repositorio local. Este cambio aún no se hace en el repositorio remoto en `GitHub`.
# 8. Para sincronizar los cambios en el repositorio de `GitHub` debemos picar en *Push* en la parte superior. Los íconos deberían juntarse.
# 9. Ir a `GitHub` y ver que el cambio en efecto se realizó.
# <img src="https://raw.githubusercontent.com/louim/in-case-of-fire/master/in_case_of_fire.png" title="In case of fire (https://github.com/louim/in-case-of-fire)" width="200" height="50" align="center">
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
|
Modulo1/Clase2_GitTutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculating pi using Monte Carlo methods
# ## Relevant formulas
#
# - square area: $s = (2 r)^2$
# - circle area: $c = \pi r^2$
# - $c/s = (\pi r^2) / (4 r^2) = \pi / 4$
# - $\pi = 4 * c/s$
# ## Image to visualize the concept
#
# 
#
# +
# importing modules that we will need
import random
import matplotlib.pyplot as plt
# +
# initializing the number of "throws"
num_points = 50000
# +
# here we "throw darts" and count the number of hits
points = []
hits = 0
for _ in range(num_points):
x, y = random.random(), random.random()
if x*x + y*y < 1.0:
hits += 1
points.append((x, y, "orange"))
else:
points.append((x, y, "blue"))
# +
# unzip points into 3 lists
x, y, colors = zip(*points)
# define figure dimensions
fig, ax = plt.subplots()
fig.set_size_inches(6.0, 6.0)
# plot results
ax.scatter(x, y, c=colors)
# +
# compute and print the estimate
fraction = hits / num_points
4 * fraction
# -
num_points = 250
|
darts.ipynb
|
# # Importance of decision tree hyperparameters on generalization
#
# In this notebook, we will illustrate the importance of some key
# hyperparameters on the decision tree; we will demonstrate their effects on
# the classification and regression problems we saw previously.
#
# First, we will load the classification and regression datasets.
# +
import pandas as pd
data_clf_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_clf_column = "Species"
data_clf = pd.read_csv("../datasets/penguins_classification.csv")
# -
data_reg_columns = ["Flipper Length (mm)"]
target_reg_column = "Body Mass (g)"
data_reg = pd.read_csv("../datasets/penguins_regression.csv")
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# ## Create helper functions
#
# We will create some helper functions to plot the data samples as well as the
# decision boundary for classification and the regression line for regression.
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from helpers.plotting import DecisionBoundaryDisplay
def fit_and_plot_classification(model, data, feature_names, target_names):
model.fit(data[feature_names], data[target_names])
if data[target_names].nunique() == 2:
palette = ["tab:red", "tab:blue"]
else:
palette = ["tab:red", "tab:blue", "black"]
DecisionBoundaryDisplay.from_estimator(
model, data[feature_names], response_method="predict",
cmap="RdBu", alpha=0.5
)
sns.scatterplot(data=data, x=feature_names[0], y=feature_names[1],
hue=target_names, palette=palette)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
def fit_and_plot_regression(model, data, feature_names, target_names):
model.fit(data[feature_names], data[target_names])
data_test = pd.DataFrame(
np.arange(data.iloc[:, 0].min(), data.iloc[:, 0].max()),
columns=data[feature_names].columns,
)
target_predicted = model.predict(data_test)
sns.scatterplot(
x=data.iloc[:, 0], y=data[target_names], color="black", alpha=0.5)
plt.plot(data_test.iloc[:, 0], target_predicted, linewidth=4)
# -
# ## Effect of the `max_depth` parameter
#
# The hyperparameter `max_depth` controls the overall complexity of a decision
# tree. This hyperparameter allows to get a trade-off between an under-fitted
# and over-fitted decision tree. Let's build a shallow tree and then a deeper
# tree, for both classification and regression, to understand the impact of the
# parameter.
#
# We can first set the `max_depth` parameter value to a very low value.
# +
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
max_depth = 2
tree_clf = DecisionTreeClassifier(max_depth=max_depth)
tree_reg = DecisionTreeRegressor(max_depth=max_depth)
# -
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(f"Shallow classification tree with max-depth of {max_depth}")
fit_and_plot_regression(
tree_reg, data_reg, data_reg_columns, target_reg_column)
_ = plt.title(f"Shallow regression tree with max-depth of {max_depth}")
# Now, let's increase the `max_depth` parameter value to check the difference
# by observing the decision function.
max_depth = 30
tree_clf = DecisionTreeClassifier(max_depth=max_depth)
tree_reg = DecisionTreeRegressor(max_depth=max_depth)
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(f"Deep classification tree with max-depth of {max_depth}")
fit_and_plot_regression(
tree_reg, data_reg, data_reg_columns, target_reg_column)
_ = plt.title(f"Deep regression tree with max-depth of {max_depth}")
# For both classification and regression setting, we observe that
# increasing the depth will make the tree model more expressive. However, a
# tree that is too deep will overfit the training data, creating partitions
# which are only correct for "outliers" (noisy samples). The `max_depth` is one
# of the hyperparameters that one should optimize via cross-validation and
# grid-search.
# +
from sklearn.model_selection import GridSearchCV
param_grid = {"max_depth": np.arange(2, 10, 1)}
tree_clf = GridSearchCV(DecisionTreeClassifier(), param_grid=param_grid)
tree_reg = GridSearchCV(DecisionTreeRegressor(), param_grid=param_grid)
# -
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(f"Optimal depth found via CV: "
f"{tree_clf.best_params_['max_depth']}")
fit_and_plot_regression(
tree_reg, data_reg, data_reg_columns, target_reg_column)
_ = plt.title(f"Optimal depth found via CV: "
f"{tree_reg.best_params_['max_depth']}")
# With this example, we see that there is not a single value that is optimal
# for any dataset. Thus, this parameter is required to be optimized for each
# application.
#
# ## Other hyperparameters in decision trees
#
# The `max_depth` hyperparameter controls the overall complexity of the tree.
# This parameter is adequate under the assumption that a tree is built is
# symmetric. However, there is no guarantee that a tree will be symmetric.
# Indeed, optimal generalization performance could be reached by growing some of
# the branches deeper than some others.
#
# We will built a dataset where we will illustrate this asymmetry. We will
# generate a dataset composed of 2 subsets: one subset where a clear separation
# should be found by the tree and another subset where samples from both
# classes will be mixed. It implies that a decision tree will need more splits
# to classify properly samples from the second subset than from the first
# subset.
# +
from sklearn.datasets import make_blobs
data_clf_columns = ["Feature #0", "Feature #1"]
target_clf_column = "Class"
# Blobs that will be interlaced
X_1, y_1 = make_blobs(
n_samples=300, centers=[[0, 0], [-1, -1]], random_state=0)
# Blobs that will be easily separated
X_2, y_2 = make_blobs(
n_samples=300, centers=[[3, 6], [7, 0]], random_state=0)
X = np.concatenate([X_1, X_2], axis=0)
y = np.concatenate([y_1, y_2])
data_clf = np.concatenate([X, y[:, np.newaxis]], axis=1)
data_clf = pd.DataFrame(
data_clf, columns=data_clf_columns + [target_clf_column])
data_clf[target_clf_column] = data_clf[target_clf_column].astype(np.int32)
# -
sns.scatterplot(data=data_clf, x=data_clf_columns[0], y=data_clf_columns[1],
hue=target_clf_column, palette=["tab:red", "tab:blue"])
_ = plt.title("Synthetic dataset")
# We will first train a shallow decision tree with `max_depth=2`. We would
# expect this depth to be enough to separate the blobs that are easy to
# separate.
max_depth = 2
tree_clf = DecisionTreeClassifier(max_depth=max_depth)
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(f"Decision tree with max-depth of {max_depth}")
# As expected, we see that the blue blob on the right and the red blob on the
# top are easily separated. However, more splits will be required to better
# split the blob were both blue and red data points are mixed.
#
# Indeed, we see that red blob on the top and the blue blob on the right of
# the plot are perfectly separated. However, the tree is still making mistakes
# in the area where the blobs are mixed together. Let's check the tree
# representation.
# +
from sklearn.tree import plot_tree
_, ax = plt.subplots(figsize=(10, 10))
_ = plot_tree(tree_clf, ax=ax, feature_names=data_clf_columns)
# -
# We see that the right branch achieves perfect classification. Now, we
# increase the depth to check how the tree will grow.
max_depth = 6
tree_clf = DecisionTreeClassifier(max_depth=max_depth)
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(f"Decision tree with max-depth of {max_depth}")
_, ax = plt.subplots(figsize=(11, 7))
_ = plot_tree(tree_clf, ax=ax, feature_names=data_clf_columns)
# As expected, the left branch of the tree continue to grow while no further
# splits were done on the right branch. Fixing the `max_depth` parameter would
# cut the tree horizontally at a specific level, whether or not it would
# be more beneficial that a branch continue growing.
#
# The hyperparameters `min_samples_leaf`, `min_samples_split`,
# `max_leaf_nodes`, or `min_impurity_decrease` allows growing asymmetric trees
# and apply a constraint at the leaves or nodes level. We will check the effect
# of `min_samples_leaf`.
min_samples_leaf = 60
tree_clf = DecisionTreeClassifier(min_samples_leaf=min_samples_leaf)
fit_and_plot_classification(
tree_clf, data_clf, data_clf_columns, target_clf_column)
_ = plt.title(
f"Decision tree with leaf having at least {min_samples_leaf} samples")
_, ax = plt.subplots(figsize=(10, 7))
_ = plot_tree(tree_clf, ax=ax, feature_names=data_clf_columns)
# This hyperparameter allows to have leaves with a minimum number of samples
# and no further splits will be search otherwise. Therefore, these
# hyperparameters could be an alternative to fix the `max_depth`
# hyperparameter.
|
notebooks/trees_hyperparameters.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="OCrpI0flVW78" colab_type="text"
# # Set up run
# + id="PCYcJAdm5t29" colab_type="code" outputId="b0e99f63-c948-4ec4-8bc8-4dd2492f49cb" executionInfo={"status": "ok", "timestamp": 1583747658506, "user_tz": 420, "elapsed": 5656, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
# !pip install rasterio
# + id="k78IROuVLJ8g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="787778df-d2c2-46ba-cfc0-648bb45c2a27" executionInfo={"status": "ok", "timestamp": 1583747658507, "user_tz": 420, "elapsed": 5649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}}
#-- Set up configurations / parameters
retrain = False # retrain previously existing model
ndown = 4 # number of 'down' steps
ninit = 32 #number of channels to start with
dropout_frac = 0.2 # dropout fraction
n_batch = 10
n_epochs = 60
dilation_kernel = 3 #-- kernel size for increasing thickness of training labels
dilate = False
augment = False
suffix = ''
if dilate:
suffix += '_dilated'
if augment:
suffix += '_augmented'
aug_num = 4
else:
aug_num = 1
mod_lbl = 'atrous' #'unet'
if mod_lbl == 'unet':
mod_str = '{0}_{1}init_{2}down_drop{3:.1f}{4}'.format(mod_lbl,ninit,ndown,
dropout_frac,suffix)
elif mod_lbl == 'atrous':
mod_str = '{0}_{1}init_drop{2:.1f}{3}'.format(mod_lbl,ninit,dropout_frac,
suffix)
else:
print('model label not matching.')
print(mod_str)
# + id="k0iqhtdnvws0" colab_type="code" outputId="998ac351-0a24-4085-863d-e8daec1102ad" executionInfo={"status": "ok", "timestamp": 1583747660626, "user_tz": 420, "elapsed": 7763, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 80}
#-- Import Modules
import os
import imp
# import cv2
import numpy as np
import rasterio
# import geopandas as gpd
import matplotlib.pyplot as plt
from google.colab import drive
import keras
from keras import backend as K
from keras.preprocessing import image
from tensorflow.python.client import device_lib
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from sklearn.utils import class_weight
from skimage.morphology import binary_dilation
# + id="1w5mQR80QP0j" colab_type="code" outputId="506d1828-7b28-4f97-a9ee-c8c4cfdd98c8" executionInfo={"status": "ok", "timestamp": 1583747660627, "user_tz": 420, "elapsed": 7758, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 717}
#-- Print backend information
print(device_lib.list_local_devices())
print(K.tensorflow_backend._get_available_gpus())
# + id="2sR2mHxpv5NU" colab_type="code" outputId="c1e45510-be1e-4369-a5f2-6551b62d075a" executionInfo={"status": "ok", "timestamp": 1583747660628, "user_tz": 420, "elapsed": 7753, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
#-- Mount Google Drive
drive.mount('/content/gdrive')
# + [markdown] id="PjMd5aaWVaZS" colab_type="text"
# # Train on data slant range imagery
# + id="5VEY8FLTI-WM" colab_type="code" colab={}
#-- Directory setup
colabdir = '/content/gdrive/My Drive/Colab Notebooks/'
ddir = '/content/gdrive/Shared drives/GROUNDING_LINE_TEAM_DRIVE/ML_Yara/'
train_dir = os.path.join(ddir,'Slant_Imagery','training_data')
test_dir = os.path.join(ddir,'Slant_Imagery','testing_data')
# + id="pJkQl4knwzvp" colab_type="code" colab={}
#-- Get list of images
fileList = os.listdir(train_dir)
train_list = [f for f in fileList if f.endswith('_coco.tif')]
fileList = os.listdir(test_dir)
test_list = [f for f in fileList if f.endswith('_coco.tif')]
# + id="D7qXz33n4ppm" colab_type="code" outputId="b421a9ae-f982-4e5f-928b-ec4e020c1dbc" executionInfo={"status": "ok", "timestamp": 1583747660629, "user_tz": 420, "elapsed": 7737, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
#-- Read sample image to get dimensions
raster = rasterio.open(os.path.join(train_dir,train_list[15]))
im = raster.read(1)
channels = raster.count
raster.meta
# + id="yBEejZDx42UN" colab_type="code" colab={}
if dilate:
#-- Experiment with dilation training labels
raster = rasterio.open(os.path.join(train_dir,train_list[15].replace('_coco.tif','_mask.tif')))
tr1 = raster.read(1)
#kernel = np.ones((dilation_kernel,dilation_kernel),np.uint8)
#tr2 = cv2.dilate(tr1,kernel,iterations = 1)
tr2 = binary_dilation(tr1)
plt.figure(1,figsize=(8,8))
plt.imshow(tr1)
plt.figure(2,figsize=(8,8))
plt.imshow(tr2)
plt.show()
# + id="_1SZivgc6o2Q" colab_type="code" colab={}
#-- Read training images
#-- Read training data with augmentation
train_imgs = np.ones((len(train_list)*aug_num,im.shape[0],im.shape[1],channels))
train_lbls = np.ones((len(train_list)*aug_num,im.shape[0],im.shape[1],1))
i = 0
for f in train_list:
#-- read image
raster = rasterio.open(os.path.join(train_dir,f))
for c in range(channels):
train_imgs[i,:,:,c] = raster.read(c+1)
if augment:
train_imgs[i+1,:,:,c] = np.fliplr(raster.read(c+1))
train_imgs[i+2,:,:,c] = np.flipud(raster.read(c+1))
train_imgs[i+3,:,:,c] = np.fliplr(np.flipud(raster.read(c+1)))
#-- read label
raster = rasterio.open(os.path.join(train_dir,f.replace('coco.tif','mask.tif')))
if dilate:
train_lbls[i,:,:,0] = binary_dilation(raster.read(1))
if augment:
train_lbls[i+1,:,:,0] = binary_dilation(np.fliplr(raster.read(1)))
train_lbls[i+2,:,:,0] = binary_dilation(np.flipud(raster.read(1)))
train_lbls[i+3,:,:,0] = binary_dilation(np.fliplr(np.flipud(raster.read(1))))
else:
train_lbls[i,:,:,0] = raster.read(1)
if augment:
train_lbls[i+1,:,:,0] = np.fliplr(raster.read(1))
train_lbls[i+2,:,:,0] = np.flipud(raster.read(1))
train_lbls[i+3,:,:,0] = np.fliplr(np.flipud(raster.read(1)))
i += aug_num
#-- Read test data
test_imgs = np.ones((len(test_list),im.shape[0],im.shape[1],channels))
# test_lbls = np.ones((len(test_list),im.shape[0],im.shape[1],1))
for i,f in enumerate(test_list):
#-- read image
raster = rasterio.open(os.path.join(test_dir,f))
for c in range(channels):
test_imgs[i,:,:,c] = raster.read(c+1) #.transpose()
#-- read label
# raster = rasterio.open(os.path.join(test_dir,f.replace('coco.tif','mask.tif')))
# test_lbls[i,:,:,0] = binary_dilation(raster.read(1))
# + id="T51_v8lmigEl" colab_type="code" outputId="f5bbe108-d445-4398-bf15-70b2051b8f45" executionInfo={"status": "ok", "timestamp": 1583747667269, "user_tz": 420, "elapsed": 14361, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 369}
ncols = 4
fig,ax = plt.subplots(2, ncols,figsize=(16,5))
for k,j in zip(range(ncols),[80,81,82,83]): #,np.arange(0,10*ncols,10)):
ax[0,k].imshow(train_imgs[j])
ax[1,k].imshow(train_lbls[j].reshape((train_lbls.shape[1],train_lbls.shape[2])))
plt.tight_layout()
plt.show()
# + id="HljXLjBsxSIa" colab_type="code" outputId="3a260eea-ac36-4a26-f5e9-d669e2a1d4e1" executionInfo={"status": "ok", "timestamp": 1583747667269, "user_tz": 420, "elapsed": 14350, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
#-- get ratio of GL to non-GL pixels to use as weight in training
#-- First flatten labels for future use
n,h,wi,nch = train_imgs.shape
print(train_imgs.shape)
train_lbls = train_lbls.reshape((n,h*wi,1))
#-- count the proportion of white pixels to black pixels
GL_tot = 0
non_tot = 0
tot_size =h*wi
for i in range(n):
GL_count = np.count_nonzero(train_lbls[i])
GL_tot += GL_count
non_tot += tot_size - GL_count
ratio = np.float(non_tot)/np.float(GL_tot)
print(ratio)
# + id="sIZXTauiw7aS" colab_type="code" outputId="c3d9cca7-58ed-4d00-b9c1-ebc559089c6e" executionInfo={"status": "ok", "timestamp": 1583747668542, "user_tz": 420, "elapsed": 15616, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
#-- Set weights
#-- get rid of last dimension of training labels
train_lbls = train_lbls.reshape((n,h*wi))
#-- initialize weights
w = np.ones(train_lbls.shape)
#-- loop through images
for i in range(n):
#-- flatten out image and get indices of boundaries
ind = np.nonzero(train_lbls[i])
w[i][ind] *= ratio
print(w.shape)
#-- convert Y_train back to original shape
train_lbls = train_lbls.reshape((train_lbls.shape[0],train_lbls.shape[1],1))
print(train_lbls.shape)
# + id="1jpPSdhI-MPl" colab_type="code" outputId="3fbd9c48-a499-4239-f350-563d42c912ba" executionInfo={"status": "ok", "timestamp": 1583747671858, "user_tz": 420, "elapsed": 18924, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
#-- Upgrade U-Net model from TF1 to TF2
# !tf_upgrade_v2 \
# --infile /content/gdrive/My\ Drive/Colab\ Notebooks/unet_model.py \
# --outfile /content/gdrive/My\ Drive/Colab\ Notebooks/unet_model_v2.py
# + id="tL1rcBCZw_9S" colab_type="code" outputId="904272f9-8ef6-4039-e6c5-45d0fdc905ed" executionInfo={"status": "ok", "timestamp": 1583747673250, "user_tz": 420, "elapsed": 20308, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 309}
#-- Import model
mod_module = imp.load_source('unet_model',os.path.join(colabdir,'unet_model.py'))
#-- set up model
if mod_lbl == 'unet':
print('loading unet model')
model = mod_module.unet_model_double_dropout(height=h,width=wi,channels=channels,
n_init=ninit,n_layers=ndown,
drop=dropout_frac)
elif mod_lbl == 'atrous':
print("loading atrous model")
model = mod_module.unet_model_atrous_double_dropout(height=h,width=wi,
channels=channels,
n_filts=ninit,
drop=dropout_frac)
else:
print('Model label not correct.')
# + id="WmT7qCY39VzM" colab_type="code" outputId="28cbc4c4-8e58-4c73-cf0d-31ad26bdc767" executionInfo={"status": "ok", "timestamp": 1583747673251, "user_tz": 420, "elapsed": 20303, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 156}
#-- compile imported model
model.compile(loss='binary_crossentropy',optimizer='adam',
metrics=['accuracy'],sample_weight_mode="temporal")
# + id="BHk2_b1cIbIf" colab_type="code" outputId="fb3470d3-9598-4724-979d-bb8179b9963f" executionInfo={"status": "ok", "timestamp": 1583747673251, "user_tz": 420, "elapsed": 20294, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
#-- checkpoint file
chk_file = os.path.join(colabdir,'{0}_weights.h5'.format(mod_str))
#-- if file exists, read model from file
if os.path.isfile(chk_file):
print('Check point exists; loading model from file.')
#-- load weights
model.load_weights(chk_file)
else:
print('Model does not previously exist.')
# + id="QN7H0GfWKymx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="51461943-6448-48cb-f581-d487d37141e5" executionInfo={"status": "ok", "timestamp": 1583748085316, "user_tz": 420, "elapsed": 432349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}}
#-- Train the model
if (retrain) or (not os.path.isfile(chk_file)):
print('Training model...')
#-- create checkpoint
model_checkpoint = keras.callbacks.ModelCheckpoint(chk_file, monitor='loss',
verbose=1,
save_best_only=True)
lr_callback = ReduceLROnPlateau(monitor='acc', factor=0.5, patience=5,
verbose=1, mode='auto', min_delta=0.0001,
cooldown=0, min_lr=0)
# es_callback = EarlyStopping(monitor='val_loss',min_delta=0.0001, patience=5,
# verbose=1, mode='auto')
#-- now fit the model
history = model.fit(train_imgs, train_lbls, batch_size=n_batch, epochs=n_epochs,
verbose=1, validation_split=0.1, shuffle=True,
sample_weight=w,
callbacks=[lr_callback,model_checkpoint])
#callbacks=[lr_callback,es_callback,model_checkpoint])
#-- save history to file
outfile = open(os.path.join(colabdir,
'{0}_history.txt'
.format(mod_str)),'w')
outfile.write('Epoch loss\tval_loss\tacc\tval_acc\n')
for i in range(len(history.history['loss'])):
outfile.write('%i\t%f\t%f\t%f\t%f\n'%(i,history.history['loss'][i],history.history['val_loss'][i],\
history.history['acc'][i],history.history['val_acc'][i]))
outfile.close()
#-- Make plots for training history
for item,name in zip(['acc','loss'],['Accuracy','Loss']):
fig = plt.figure(1,figsize=(8,6))
plt.plot(history.history[item])
plt.plot(history.history['val_%s'%item])
plt.title('Model %s'%name)
plt.ylabel(name)
plt.xlabel('Epochs')
plt.legend(['Training', 'Validation'], loc='upper left')
plt.savefig(os.path.join(colabdir,
'{0}_history.pdf'
.format(mod_str)),format='pdf')
plt.close(fig)
# + id="hsbiHUo2Texy" colab_type="code" outputId="84266e1f-ad2d-403e-b5ad-d3f7ffa68670" executionInfo={"status": "ok", "timestamp": 1583748091246, "user_tz": 420, "elapsed": 438264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 68}
print('Model is trained. Running on Train data...')
#-------------------------------
#-- First run on training data
#-------------------------------
out_imgs = model.predict(train_imgs, batch_size=1, verbose=1)
out_imgs = out_imgs.reshape(out_imgs.shape[0],h,wi,out_imgs.shape[2])
print(out_imgs.shape)
#-- make output directory
out_dir = os.path.join(colabdir,'Train_predictions.dir',
'{0}.dir'.format(mod_str))
if (not os.path.isdir(out_dir)):
os.mkdir(out_dir)
#-- save output images (every 4th image. Not interested in augmented cases)
for i,f in enumerate(train_list):
im = image.array_to_img(out_imgs[aug_num*i])
im.save(os.path.join(out_dir,f.replace('coco.tif','pred.png')))
# + id="KTBZrMRlCN-U" colab_type="code" outputId="db94e478-b7c4-48f0-fbca-3e9f200a6df0" executionInfo={"status": "ok", "timestamp": 1583748091247, "user_tz": 420, "elapsed": 438240, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
print(np.max(out_imgs))
print(np.min(out_imgs))
# + id="ZciosNGdHKQs" colab_type="code" outputId="33daa18e-16de-436c-a335-3f1ecea4db4d" executionInfo={"status": "ok", "timestamp": 1583748091593, "user_tz": 420, "elapsed": 438575, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
#-------------------------------
#-- Run on test data
#-------------------------------
out_imgs = model.predict(test_imgs, batch_size=1, verbose=1)
out_imgs = out_imgs.reshape(out_imgs.shape[0],h,wi,out_imgs.shape[2])
print(out_imgs.shape)
#-- make output directory
out_dir = os.path.join(colabdir,'Test_predictions.dir',
'{0}.dir'.format(mod_str))
if (not os.path.isdir(out_dir)):
os.mkdir(out_dir)
#-- save output images
for i,f in enumerate(test_list):
im = image.array_to_img(out_imgs[i])
im.save(os.path.join(out_dir,f.replace('coco.tif','pred.png')))
# + [markdown] id="QUnT_-LrVK1g" colab_type="text"
# # Train on geocoded data
# + id="S3q2FGu3Vlph" colab_type="code" outputId="74499635-c7d3-4128-e5d5-f8f1ae1db30b" executionInfo={"status": "ok", "timestamp": 1583748091594, "user_tz": 420, "elapsed": 438564, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjF0xZxL13V9NKJk2UJjDP9O_Dc7E-7xcNP8LaDWQ=s64", "userId": "17138238406906610166"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
"""
dat_dir = '/content/gdrive/My Drive/GL_learning/GL_learning_data/Test_Training/2018_Track069'
#-- archives to read
fileList = []
for i in range(1,7):
fileList += os.listdir(os.path.join(dat_dir,'Archive_%02i'%i))
tif_list = [f for f in fileList if f.endswith('.tif')]
"""
# + id="8u9H27mHWPQl" colab_type="code" colab={}
|
obsolete_scripts.dir/GL_delineation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = np.array([1,2])
b = np.array([3,4])
print(np.append(a,b,axis=1))
# +
# initial state as tensor
agentF = Unicycle2D(np.array([0,0.2,0]),dt,3,FoV,max_D,min_D)
agentF.X0 = torch.tensor(np.array([0,0.2,0]),dtype=torch.float, requires_grad=True)
#create layers
dynam = torch_dynamics.apply
state_tensors = [torch.tensor(agentF.X,dtype=torch.float,requires_grad=True)]
input_tensors = []
for i in range(N): # horizon
u = policy(agentF,agentT) # tensor
x = state_tensors[-1]
x_ = dynam(x,u)
# compute reward: should be a reward
r = compute_reward(x_)
# Store state and input tensors
state_tensors.append(x_)
input_tensors.append(u)
# visualize
robotF.step(u.detach().numpy())
# form MDP
# -
print(np.eye(3))
import torch
a = torch.tensor([ [0],[0] ])
b = torch.tensor([ [2],[3] ] )
print(a-b)
a = torch.tensor([[0]])
b = torch.tensor(3)
print(a+b)
a + 1
x1 = torch.ones(1, requires_grad=True)
x2 = torch.tensor(2, requires_grad=True, dtype=torch.float)
y = x1 + x2
z = x1 - x2
y.backward(retain_graph=True)
print(x2.grad)
print(x1.grad)
print(x1.grad)
x2 = torch.tensor(2, requires_grad=True, dtype=torch.float)
y = x2
y.backward()
print(x2.grad)
a = [[1],[2],[3],[4],[5]]
a[-2:]
x1
x2
x1 = torch.ones(1, requires_grad=True)
y = x1**2
y.retain_grad()
z = (y**3)
z.retain_grad()
print(z)
z.backward()
y.grad
x1.grad
y.grad
x1 = torch.ones(1, requires_grad=True)
y = torch.cos(x1)
y.retain_grad()
z = y + 2*x1
# z.retain_grad()
z.backward()
print(z)
print(y.grad)
x1.grad
2 - np.sin(1)
def square(x):
y = torch.sin(x)
return y#torch.square(torch.sin(x))
x1 = torch.ones(1, requires_grad=True)
y = torch.tensor([x1,x2],requires_grad=True)
# y = torch.square(torch.sin(x1))
# y.retain_grad()
# z = y + 2*x1
z = y.sum()
z.backward()
# y.backward()
x1.grad
x1.grad
|
QPpolicy/.ipynb_checkpoints/Rough Work-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Зависимости
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
# +
# Инициализируем все известные генераторы случаынйх чисел / Setting all known random seeds
my_code = "Суминов"
seed_limit = 2 ** 32
my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit
os.environ['PYTHONHASHSEED']=str(my_seed)
random.seed(my_seed)
np.random.seed(my_seed)
tf.compat.v1.set_random_seed(my_seed)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# -
# Читаем данные из файла
train_data = pd.read_csv("datasets/iris_train.csv")
train_data.head()
# Определим размер валидационной выборки
val_size = round(0.2*len(train_data))
print(val_size)
# Создадим обучающую и валидационную выборки
random_state = my_seed
train, val = train_test_split(train_data, test_size=val_size, random_state=random_state)
print(len(train), len(val))
# +
# Значения в числовых столбцах преобразуем к отрезку [0,1].
# Для настройки скалировщика используем только обучающую выборку.
num_columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
ord_columns = ['species']
ct = ColumnTransformer(transformers=[('numerical', MinMaxScaler(), num_columns)], remainder='passthrough')
ct.fit(train)
# -
# Преобразуем значения, тип данных приводим к DataFrame
sc_train = pd.DataFrame(ct.transform(train))
sc_val = pd.DataFrame(ct.transform(val))
# Устанавливаем названия столбцов
column_names = num_columns + ord_columns
sc_train.columns = column_names
sc_val.columns = column_names
sc_train
# +
# Отберем необходимые параметры
x_train = sc_train[num_columns]
x_val = sc_val[num_columns]
y_train = (sc_train[ord_columns].values).flatten()
y_val = (sc_val[ord_columns].values).flatten()
# -
model = KNeighborsClassifier()
# Обучим модель
model.fit(x_train, y_train)
# Проверим работу обученной нейронной сети на валидационной выборке
pred_val = model.predict(x_val)
f1 = f1_score(y_val, pred_val, average='weighted')
print(f1)
test = pd.read_csv("datasets/iris_test.csv")
test['species'] = ''
test.head()
sc_test = pd.DataFrame(ct.transform(test))
sc_test.columns = column_names
x_test = sc_test[num_columns]
test['species'] = model.predict(x_test)
test.head()
test.to_csv('Суминов.csv', index=False)
|
2021 Весенний семестр/Практическое задание 6/Суминов - задание 6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise List 5
# Utilizando o Apache Spark e demais ferramentas correlatas, implemente os seguintes passos:
#
# 1. Selecione um estado brasileiro e dez de suas cidades.
# - Crie um CSV para armazenar as cidades, com: `id` (nome da cidade), `latitude`, `longitude` e `população`.
# - Crie outro CSV para armazenar a distância entre essas cidades, com: `src`, `dst` e `relationship` como campos.
# - adicione pelo menos 30 registros nesse arquivo.
# 2. Utilizando as bibliotecas do Spark, crie um objeto `GraphFrame` a partir desses dois CSVs.
# 3. Utilizando o método `bfs` (Breadth First Search), execute **5** filtragens a sua escolha.
# 4. Execute 2 consultas utilizando o método `find`.
# 5. Execute 2 consultas utilizando o método `filterVertices`.
# 6. Implemente uma rotina que, recebendo como entrada um objeto `GraphFrame`, percorra todos os vértices do grafo com o algoritmo da busca em profundidade.
# **Setup**
# +
from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
conf = SparkConf().setAppName('appName').setMaster('local')
sc = SparkContext.getOrCreate(conf)
spark = SparkSession(sc)
# -
# ### 1. Selecione um estado brasileiro e dez de suas cidades
# **Crie um CSV para armazenar as cidades, com: `id` (nome da cidade), `latitude`, `longitude` e `população`**
cidades = spark.read.format("csv").option("header", "true").load("data/transport/transport-nodes.csv")
cidades.show()
# **Crie outro CSV para armazenar a distância entre essas cidades, com: `src`, `dst` e `relationship` como campos**
distancias = spark.read.format("csv").option("header", "true").load("data/transport/transport-relationships.csv")
distancias.show(30)
# ### 2. Utilizando as bibliotecas do Spark, crie um objeto GraphFrame a partir desses dois CSVs.
# +
from graphframes import *
g = GraphFrame(cidades, distancias)
# -
g.vertices.show()
g.edges.show()
# ### 3. Utilizando o método `bfs` (Breadth First Search), execute **5** filtragens a sua escolha.
# **Decent and updated [graphframes documentation](https://docs.databricks.com/_static/notebooks/graphframes-user-guide-py.html)**
g.bfs(fromExpr = "id = 'Araripina'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
g.bfs(fromExpr = "id = 'Surubim'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
g.bfs(fromExpr = "id = 'Salgueiro'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
g.bfs(fromExpr = "id = 'Carpina'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
g.bfs(fromExpr = "id = 'Cabrobo'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
g.bfs(fromExpr = "id = 'Igarassu'",
toExpr = "id = 'Recife'",
edgeFilter = "relationship = 'EROAD'").show()
# ### 4. Execute 2 consultas utilizando o método `find`.
# find vertices with edges in both directions between them
g.find("(a)-[e]->(b); (b)-[e2]->(a)").show()
# find vertices in chains of three
g.find("(a)-[ab]->(b); (b)-[bc]->(c)").show()
# ### 5. Execute 2 consultas utilizando o método `filterVertices`.
# **Filter 1: g2**
g2 = g.filterEdges("cost < 500").filterVertices("population > 100000 or latitude < 8")
g2.vertices.show()
g2.edges.show()
# **Filter 2: g3**
g3 = g.filterEdges("cost > 500").filterVertices("latitude > 7 and population > 100000")
g3.vertices.show()
g3.edges.show()
# ### 6. Implemente uma rotina que, recebendo como entrada um objeto `GraphFrame`, percorra todos os vértices do grafo com o algoritmo da busca em profundidade.
|
pyspark/exercise_list_05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="yoiTUC_zXAb9"
# # 実践編: ディープラーニングを使った配列解析
#
# 近年,次世代シーケンサ(NGS; Next Generation Sequencer)の発展により,遺伝子の塩基配列が高速,大量,安価に読み取られるようになってきました.
#
# ここではディープラーニングを用いて,DNA配列からエピジェネティックな影響や転写制御を予測する問題に取り組みます.ディープラーニングは複雑なモデルを表現でき,遠距離の影響も考慮することができ,より高い精度で予測することが期待できます.
#
#
#
#
#
#
#
# + [markdown] colab_type="text" id="CkqRZHc8crS4"
# ## 環境構築
#
# ここで用いるライブラリは
#
#
# * Chainer
# * Cupy
# * matplotlib
#
# です.Google Colab上では,これらはあらかじめインストールされています.
#
# + [markdown] colab_type="text" id="mEPaj4MfdEyh"
# 以下のセルを実行して,各ライブラリのバージョンを確認してください.
#
#
# + colab_type="code" id="p4X-dmKrdDhd" outputId="465abf75-5b3c-4305-b70e-ec577974801f" colab={"base_uri": "https://localhost:8080/", "height": 300}
import chainer
import cupy
import matplotlib
chainer.print_runtime_info()
print('matplotlib:', matplotlib.__version__)
# + [markdown] colab_type="text" id="0nsNNeFEkjB_"
# 期待される実行結果例
# ```
# Platform: Linux-4.14.137+-x86_64-with-Ubuntu-18.04-bionic
# Chainer: 6.5.0
# ChainerX: Not Available
# NumPy: 1.17.3
# CuPy:
# CuPy Version : 6.5.0
# CUDA Root : /usr/local/cuda
# CUDA Build Version : 10000
# CUDA Driver Version : 10010
# CUDA Runtime Version : 10000
# cuDNN Build Version : 7603
# cuDNN Version : 7603
# NCCL Build Version : 2402
# NCCL Runtime Version : 2402
# iDeep: 2.0.0.post3
# matplotlib: 3.1.1
# ```
# + [markdown] colab_type="text" id="MzdDwd7aeYmT"
# ## 配列解析について
#
# 次世代シーケンサの発展・普及とともに,大量の遺伝子配列が読み取られるようになりました.そうした中で,塩基配列で表現された遺伝子型と病気や形態などの表現型との関係を推定するようなGWAS(Genome Wide Association Study; ゲノムワイド関連解析)が広がってきました.しかし,遺伝子の変異だけでは全ての表現型の変化を説明できないことがわかってきました.特に,非翻訳領域が遺伝子発現に影響を与え,表現型の変化を生じさせていることが様々な実験結果からわかってきています.遺伝子発現時に周辺領域がどのように影響を与えているのかを調べるために様々な手法が提案されています.
#
# 
#
# 例えば,ChIP-seq(クロマチン免疫沈降シーケンス)は,ChIP(クロマチン免疫沈降)と高速DNAシーケンスを組み合わせることで,ヒストン修飾状況や転写調節因子の結合部位を網羅的(ゲノムワイド)に同定する手法です.これにより,転写調節機能を司るヒストン修飾やDNA結合タンパクの結合部位をゲノム全体で同定することができ,遺伝子変異だけでは説明しきれない細胞の表現型に関与する膨大な情報の取得が可能になります.
#
# そこで本節では,ChIP-seqにより得られた転写調節因子の結合部位に当たるDNA塩基配列のパターンを深層学習により学習することで,任意のDNA塩基配列に対して特定の転写調節因子との結合可能性の予測を行います.このアプローチはゲノム全体のヒストン修飾部位の予測やオープンクロマチン領域の予測など幅広い生命現象を統一的に取り扱うことを可能とします。
#
# この課題を機械学習で取り扱う際の技術的な難しさの一つが,DNA塩基配列の長距離相互作用と呼ばれる現象です.これは,核内のDNAは複雑に折り畳まれた様式で存在しており,塩基配列上の並びとしては遠く離れた2つの領域が空間的には近い距離に位置し,転写調節因子の結合に影響を及ぼすことがあるということです.例えば,今回対象とする問題では10万bp (ベース・ペア:DNAを構成する塩基を数える単位) 超の長さのDNA塩基配列を入力として受け取り,DNA塩基配列中のある領域が転写調節因子の結合部位になり得るかを予測します。このような長距離相互作用を考慮しても効率的に学習可能なモデルを構築してきます‥
#
# 今回は,数百種類の人の細胞型から得られた数千のChIP-seq,DNase-seq(オープンクロマチン領域の網羅的解析の一手法)のデータセットから得られたDNA塩基配列を入力として,CAGE(Cap Analysis of Gene Expression)の結果計測されたmRNAの発現量を推定する問題を考えます[1].
# + [markdown] colab_type="text" id="db3ngEYHgSmd"
# ## データセット
#
# ここでは,Basenji[1]で使われた実験データセットの一部を利用します.これらはCAGEなどの配列解析処理を行って得られたデータセットです.
#
# 下のセルを実行してデータをダウンロードしてください.
#
# この配列はそれぞれが長さ131072bpからなり,128bp毎に対しそのカバレッジ値が記録されています.このカバレッジ値の配列の長さは131072/128=1024です.
#
# この問題の目標は長さ131072bpの配列を入力として受け取った時に,この128bp毎のカバレッジ値を推定することが目標です.
#
# 今回は10種類の異なる実験のカバレッジ値を同時に予測する問題を扱います.
# + colab_type="code" id="LjxWi_2chwkX" outputId="69c63cd9-0c1e-4e16-e04e-20bdb5fde838" colab={"base_uri": "https://localhost:8080/", "height": 325}
# !wget https://github.com/japan-medical-ai/medical-ai-course-materials/releases/download/v0.1/seq.h5
# + colab_type="code" id="9yuEHGl_XC2B" outputId="6e0e3ee2-0dc3-4b22-cb0a-f44bf829a950" colab={"base_uri": "https://localhost:8080/", "height": 71}
# !ls -lh
# + [markdown] colab_type="text" id="l8NsMudgiHBI"
# seq.h5というファイルが正しくダウンロードされているかを確認してください.サイズは567MBです.
#
# seq.h5はHDF5形式でデータを格納したファイルです.HDF5ファイルは,ファイルシステムと同様に,階層的にデータを格納することができ,行列やテンソルデータをそれぞれの位置で名前付きで格納することができます.
#
# HDF5形式のファイルを操作するためにh5pyというライブラリがあります.h5pyのFile()関数でファイルを開き,keys()関数でその中に含まれているキーを列挙します.また取得したキーを'[]'内で指定することでそのキーに紐付けられて格納されている各データを参照することができます.
#
# テンソルデータはnumpyと同様にshapeという属性でそのサイズを取得することができます.
#
# 以下のセルを実行して格納されているデータを確認してください.
#
# 各データの名前にtrain(学習),validate(検証),test(テスト)の接頭辞がつけられ,inが入力の塩基配列,outが出力のカバレッジ値に対応します.
#
# 例えば,'train_in'は学習用の入力データであり(5000, 131072, 4)というサイズを持ちます.これは長さが130172からなる配列が5000個あり,それぞれA, T, C, Gの対応する次元の値が1, それ以外は0であるような配列です.
#
# また,'train_out'は学習用の出力データであり,('5000, 1024, 10')というサイズを持ちます.これは長さが1024からなる配列が5000個あり,それぞれが10種類の異なるChIP-seqの結果のカバレッジ値が格納されています.
# + colab_type="code" id="bBQVPyKxi-uE" outputId="7c950b79-5422-4dea-9889-f9d15294ca31" colab={"base_uri": "https://localhost:8080/", "height": 143}
import h5py
import numpy as np
with h5py.File('seq.h5', 'r') as hf:
for key in hf.keys():
print(key, hf[key].shape, hf[key].dtype)
# + [markdown] colab_type="text" id="T7OkqzE-jXlq"
#
#
# ```
# (u'target_labels', (10,), dtype('S29'))
# (u'test_in', (500, 131072, 4), dtype('bool'))
# (u'test_out', (500, 1024, 10), dtype('<f2'))
# (u'train_in', (5000, 131072, 4), dtype('bool'))
# (u'train_out', (5000, 1024, 10), dtype('<f2'))
# (u'valid_in', (500, 131072, 4), dtype('bool'))
# (u'valid_out', (500, 1024, 10), dtype('<f2'))
# ```
#
#
# + [markdown] colab_type="text" id="NkAYTB2ZkEXr"
# h5py形式のファイルをnumpyデータとして扱うには,コピーする必要があります.以下のコードは'train_in'というキーに対応するテンソルデータをnumpyデータとして読み出し,そのデータの一部を表示します.
#
# 試しに最初のデータを取り出して,それの出力の値を表示してみます.
#
# 下のセルを実行してみてください.最初のデータの出力の三つの値を線グラフで出力します.(ここまでのセルを実行していてください).
# + colab_type="code" id="lik6qHPD4m9V" outputId="10cf0762-52a0-45da-ff84-0b4dfe32043a" colab={"base_uri": "https://localhost:8080/", "height": 340}
# %matplotlib inline
import matplotlib.pyplot as plt
with h5py.File('seq.h5') as hf:
y = hf['train_out'][:100]
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 20
fig_size[1] = 5
for i in range(3):
plt.bar(range(y.shape[1]), y[0,:,i])
# + [markdown] colab_type="text" id="9dB69Y47k-y_"
# ## Dilated Convolutionを用いた解析
#
# ### 配列解析の戦略
#
# 今回は配列データが入力であるような問題である.
#
# 配列データを扱うためには大きく3つの戦略があります.
#
# 一つ目は,配列中の順序情報は捨てて,配列をその特徴の集合とみなすことです.これはBag of Words(BoW)表現とよびます.このBoW表現は特徴に十分情報が含まれていれば強力な手法ですがDNA配列のような4種類の文字からなる配列やその部分配列だけではその特徴を捉えることは困難です.
#
# 二つ目は配列中の要素を左から右に順に読み込んでいき計算していく手法です.これは4章でも少し触れたRNNを用いて解析します.RNNは時刻毎に入力を一つずつ読み取り内部状態を更新していきます.RNNの問題点はその計算が逐次的であり計算量が配列長に比例するという点です.現在の計算機は計算を並列化することで高速化を達成していますがRNNは計算を並列化することが困難です.もう一つの問題は遠距離間の関係を捉えることが難しいという点です.RNNはその計算方式から,計算の途中結果を全て固定長の内部状態ベクトルに格納する必要があります.遠距離間の関係を捉えようとすると,多くの情報を覚えておかなければなりませんが状態ベクトルサイズは有限なので,遠距離間の関係を捉えることが困難となっていきます.
#
# 三つ目は配列データを1次元の画像とみなし,画像処理の時と同様にCNNを用いて解析する手法です.CNNはRNNの場合と違って各位置の処理を独立に実行できるため並列に処理することができます.
#
# 今回はこの3つ目の戦略,CNNを用いて解析する手法を採用します.また,Dilated Convolutionを使うことで各位置の処理は遠距離にある情報を直接読み取ることができます.次の章でDilated Convolutionについて詳しくみていきます.
#
#
#
#
# + [markdown] colab_type="text" id="RWl4yYm_nqg7"
# ### Dilated Convolution
#
# 従来の畳み込み層を使って配列解析をする場合を考えてみます.
# 以下の図のようにある位置の入力の情報は各層で隣接する位置からしか読み込まれません.どのくらい離れた位置から情報を取得するかはカーネルサイズによって決定され,カーネルサイズがKの時,Dだけ離れた距離にある情報を取得するためにはD/K層必要となります.今回の問題の場合Dは数百から数万,Kは3や5といった値ですので必要な層数も百から万といった数になってしまい現実的ではありません.
# + [markdown] colab_type="text" id="gJ2MdbaHneLk"
#
# 
#
# [WaveNet: A Generative Model for Raw Audio](https://deepmind.com/blog/wavenet-generative-model-raw-audio/)より引用
# + [markdown] colab_type="text" id="Gazys1FUoV4m"
# それに対し,Dilated Convolution(atrous convolutionやconvolution weith holesともよばれます)は読み取る場所をずらしたところからうけとります.例えばDilation=4の場合,4だけ離れた位置から情報を受け取ります.このDilationを倍々にしていき,カーネルサイズを2とした場合,Dだけ離れた位置の情報を受取るには $\log_2 D$層だけ必要になります.今回のDが数百から数万の場合,10から20層程度あれば済むことになります.
#
# 今回はこのDilated Convolutionを使うことで遠距離にある情報を考慮できるモデルを作成します.
# + [markdown] colab_type="text" id="Vl5f4eonQGU9"
#
# 
#
# [WAVENET: A GENERATIVE MODEL FOR RAW AUDIO, blog](https://deepmind.com/blog/wavenet-generative-model-raw-audio/)より
#
# + [markdown] colab_type="text" id="p0bcCznko_wD"
# ### ブロック
#
# それでは最初に,ネットワークの全体を設計します.
# このネットワークは二つのブロックから構成されます.
#
# 1つ目のブロックは長さが$2^{17}$の配列を入力として長さが$2^{10}$のベクトルを出力とします.これにより入力の128 ($=2^{17}/2^{10}$)bpが出力の1つの位置に対応するようになります.これを実現しているのが,SqueezeBlockです.すなわち,SqueezeBlockは長さ131072bpからなるDNAの塩基配列を入力として受け取り,各フラグメントの長さに相当する128bp毎の情報が一つの値となるような畳込み処理を行います.結果として131072/128=1024の長さのベクトル列が出力されます.このベクトル列はフラグメント毎の特徴が一つのベクトルに圧縮されたものとみなすことができます.
#
# 二つ目のブロックは遠距離にある情報を考慮して各ベクトルの値を計算していく部分であり,DilatedBlockが担当します.DilatedBlockは,SqueezeBlockから出力された1024の長さのベクトル列を受け取り,Dilated Convolutionの仕組みを使うことで互いに離れた位置の情報を効率的に考慮した上で処理していき,入力と同じ1024の長さの出力を返します.この出力が,フラグメント毎に与えられたDNA関連タンパク質の結合可能性を表す数値(カバレッジ値)と一致するように学習を進めます.
#
# それでは,以下のコードを実行してみましょう.
#
# + colab_type="code" id="5M6BDmVdpLkE" colab={}
import chainer
import chainer.functions as F
import chainer.links as L
import cupy as cp
bc = 24 # base channel
default_squeeze_params = [
# out_ch, kernel, stride, dropout
[bc*2, 21, 2, 0], #1 128 -> 64
[int(bc*2.5), 7, 4, 0.05], #2 64 -> 16
[int(bc*3.2), 7, 4, 0.05], #3 16 -> 4
[bc*4, 7, 4, 0.05] #4 4 -> 1
]
default_dilated_params = [
# out_ch, kernel, dilated, dropout
[bc, 3, 1, 0.1],
[bc, 3, 2, 0.1],
[bc, 3, 4, 0.1],
[bc, 3, 8, 0.1],
[bc, 3, 16, 0.1],
[bc, 3, 32, 0.1],
[bc, 3, 64, 0.1]
]
class Net(chainer.Chain):
def __init__(self, squeeze_params=default_squeeze_params, dilated_params=default_dilated_params, n_targets=10):
super(Net, self).__init__()
self._n_squeeze = len(squeeze_params)
self._n_dilated = len(dilated_params)
with self.init_scope():
in_ch = 4
for i, param in enumerate(squeeze_params):
out_ch, kernel, stride, do_rate = param
setattr(self, "s_{}".format(i), SqueezeBlock(in_ch, out_ch, kernel, stride, do_rate))
in_ch = out_ch
for i, param in enumerate(dilated_params):
out_ch, kernel, dilated, do_rate = param
setattr(self, "d_{}".format(i), DilatedBlock(in_ch, out_ch, kernel, dilated, do_rate))
in_ch += out_ch
self.l = L.ConvolutionND(1, None, n_targets, 1)
def forward(self, x):
# x : (B, X, 4)
xp = cp.get_array_module(x)
h = xp.transpose(x, (0, 2, 1))
h = h.astype(xp.float32)
for i in range(self._n_squeeze):
h = self["s_{}".format(i)](h)
hs = [h]
for i in range(self._n_dilated):
h = self["d_{}".format(i)](hs)
hs.append(h)
h = self.l(F.concat(hs, axis=1))
h = xp.transpose(h, (0, 2, 1))
return h
# + [markdown] colab_type="text" id="Kc3RNwK_qHzS"
# このネットワークは初期化時の引数としてSqueezeBlockに関するパラメータと,DilatedBlockに関するパラメータを受け取ります.
#
# それぞれ,出力チャンネル,カーネルサイズ,プーリング,ドロップアウト率の四つ組からなるリストと,出力チャンネル,カーネルサイズ,dilatedサイズ・ドロップアウト率の四つ組からなるリストを受け取ります.
# + [markdown] colab_type="text" id="s3T5pRubrlba"
# 次に,ブロックの定義をします.
# + colab_type="code" id="shOuWcBkrpOE" colab={}
import chainer
import chainer.functions as F
import chainer.links as L
import cupy as cp
class WNConvolutionND(L.ConvolutionND):
def __init__(self, *args, **kwargs):
super(WNConvolutionND, self).__init__(*args, **kwargs)
self.add_param('g', self.W.data.shape[0])
norm = np.linalg.norm(self.W.data.reshape(
self.W.data.shape[0], -1), axis=1)
self.g.data[...] = norm
def __call__(self, x):
norm = F.batch_l2_norm_squared(self.W) ** 0.5
channel_size = self.W.data.shape[0]
norm_broadcasted = F.broadcast_to(
F.reshape(norm, (channel_size, 1, 1)), self.W.data.shape)
g_broadcasted = F.broadcast_to(
F.reshape(self.g, (channel_size, 1, 1)), self.W.data.shape)
return F.convolution_nd(
x, g_broadcasted * self.W / norm_broadcasted, self.b, self.stride,
self.pad, self.cover_all, self.dilate)
class SqueezeBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, kernel, stride, do_rate):
super(SqueezeBlock, self).__init__()
self.do_rate = do_rate
with self.init_scope():
pad = kernel // 2
self.conv = WNConvolutionND(1, in_ch, out_ch*2, kernel, pad=pad, stride=stride)
def forward(self, x):
h = self.conv(x)
h, g = F.split_axis(h, 2, 1)
h = F.dropout(h * F.sigmoid(g), self.do_rate)
return h
class DilatedBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, kernel, dilate, do_rate):
super(DilatedBlock, self).__init__()
self.do_rate = do_rate
with self.init_scope():
self.conv = WNConvolutionND(1, in_ch, out_ch*2, kernel, pad=dilate, dilate=dilate)
def forward(self, xs):
x = F.concat(xs, axis=1)
h = self.conv(x)
h, g = F.split_axis(h, 2, 1)
h = F.dropout(h * F.sigmoid(g), self.do_rate)
return h
# + [markdown] colab_type="text" id="fHZuRr36bxHv"
# 
# + [markdown] colab_type="text" id="RrTAARyW2AYQ"
# WeightNormalization[2]はパラメータの表現を長さと向きに分解して表現する手法で,今回の系列問題のような場合に使われる正規化法です.コード中ではWeightNormalizationが適用された畳み込み層である`WNConvolutionND`が定義されています.
#
# SqueezeBlockは配列を縮めていき,長さが$2^{17}$の配列を$2^{10}$に縮めるためのブロックです(上図).
# 1次元配列を扱うためWNConvolutionNDを使い,最初の引数で1次元配列であることを示す`1`を指定しています.
# また,活性化関数では$h = Wx * sigmoid(Ux)$と表されるGated Linear Unit[3]を利用しています.計算では効率化のため,WxとUxを別々に計算するのではなく2倍の出力チャンネル数を持つConvolutionを適用した後に出力結果をチャンネル方向に2つに分割し$(Wx, Ux)$,片方にsigmoid関数を適用した後,それらを要素毎にかけ合わせます.
#
# DilatedBlockはすでに長さ1024の長さになった配列に対し,Dilated Convolutionを使って遠距離にある情報も使って計算していくブロックです(上図).引数としてdilatedを受け取ります.Dilated Convolutionを使う場合は通常のConvolution層(今回はConvolutionNDだが,Convolution2Dも同様)の引数にdilatedを加えるだけで計算できます.
#
# また,DilatedBlockではDenseNet[4]と呼ばれる,以前の途中結果が全て次の層の入力として使われる手法を採用します(DilatedBlock内 forward()内の`concat`がそれに対応).これはニューラルネットワークで多くのスキップ接続を作ることで,層が増えても勾配が減衰せず,学習がしやすくなることを利用したものです.
#
#
# + [markdown] colab_type="text" id="y-ZdRuhSq2Rq"
# それでは,試しにネットワークを構築して,そこにサンプルデータを流してみましょう.
#
#
# + colab_type="code" id="DARrKIMurGiH" outputId="5435968a-95a9-4fc6-d1c6-5c5ee39a25dd" colab={"base_uri": "https://localhost:8080/", "height": 35}
import numpy as np
n = Net()
size = 131072 # 128 * 1024
batchsize = 4
x = np.empty((batchsize, size, 4), dtype=np.bool)
y = n.forward(x)
print(y.shape)
# + [markdown] colab_type="text" id="ydR6gwYCsATQ"
# ```
# (4, 1024, 10)
# ```
#
#
# + [markdown] colab_type="text" id="OyJ8lu_psGlk"
# ここで,もともとバッチサイズ(B)=4, 入力長(L)=131072, 入力チャンネル数(C)=4だった配列が計算後はB=4, L=1024, C=10の配列となりました.
# + [markdown] colab_type="text" id="vLNgEVh0vjOt"
# [リンクテキスト](https://)今回予測するカバレッジ値は,フラグメント毎にDNA関連タンパク質がどの程度の頻度で結合したかを表すカウントデータであるとみなせます.そこで学習ではカウントデータに対する損失関数である対数ポアソン損失関数を利用します.
#
# 対数ポアソン損失関数を使う場合,モデルはポアソン分布の唯一のパラメータである平均を予測し,その予測された平均をもったポアソン分布を使った場合の学習データの尤度を計算します.そしてその尤度の最大化,それと同じである負の対数尤度の最小化を行います.この際,プログラム上では学習対象パラメータが含まれない項を無視しています.
# なお,この関数の最小値はそのままだと$0$にはならので,最小値である$t \log t$をあらかじめひいておき,損失関数の最小値が$0$となるようにします.
# + colab_type="code" id="rgQmu0Pgvh0P" colab={}
import chainer.functions as F
import math
import sklearn
import numpy as np
def log_poisson_loss(log_x, t):
loss = F.mean(F.exp(log_x) - t * log_x)
t = chainer.cuda.to_cpu(t.astype(np.float32))
offset = F.mean(cp.array(t - t * np.ma.log(t)))
return loss - offset
def log_r2_score(log_x, t):
return F.r2_score(F.exp(log_x), t)
# + [markdown] colab_type="text" id="40kTUr3O2lu5"
# また,学習率の調整にCosineSchedulerを使います.ニューラルネットワークの学習では,徐々に学習率を小さくしていくと,より汎化性能の高い解を見つけられることがわかっています.ニューラルネットワークの学習の目的関数は多くの性能の悪い局所解があるため,最初は学習率を高くして局所解にはまらないようにして全体の中での良い解を探し,後半は徐々に学習率を0に近づけていき収束させるというものです.
# CosineSchedulerはCosine関数の0度から90度までの変化のように学習率を変化させます.また学習は初期が不安定なので最初のn_warmup回,学習率を0から初期学習率まで線形に増やすことも一般的です.今回は学習率が低めで学習も安定しているのでn_warmupは0としてあります.
#
# + colab_type="code" id="QvjM_C-z2o8m" colab={}
from chainer import training
import numpy as np
import math
class CosineScheduler(training.Extension):
def __init__(self, attr='lr', init_val=0.0001, n_decays=200, n_warmups=3, target=None, optimizer=None):
self._attr = attr
self._target = target
self._optimizer = optimizer
self._min_loss = None
self._last_value = None
self._init_val = init_val
self._n_decays = n_decays - n_warmups
self._decay_count = 0
self._n_warmups = n_warmups
def __call__(self, trainer):
updater = trainer.updater
optimizer = self._get_optimizer(trainer)
epoch = updater.epoch
if epoch < self._n_warmups:
value = self._init_val / (self._n_warmups + 1) * (epoch + 1)
else:
value = 0.5 * self._init_val * (1 + math.cos(math.pi * (epoch - self._n_warmups) / self._n_decays))
self._update_value(optimizer, value)
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
# + [markdown] colab_type="text" id="56jZaaD82p-X"
# 最後に学習中に訓練データに意味を変えない変化を加えるData Augmentationを適用します.これは画像において回転させたり,平行移動させたりする場合と同じです.
# 今回は128bp毎にカバレッジ値を予測していますが,数塩基(例えば4~8など)移動したとしてもカバレッジ値は同じ程度になることが期待されます.そこで最大max_shift分だけ配列を前後にシフトします(完全にランダムな塩基配列を余った部分に入れると実際の塩基配列の分布と変わる可能性があるのでここではroll()関数を巡回シフトしています).
# + colab_type="code" id="UX2NE83o274Y" colab={}
import chainer
import random
class PreprocessedDataset(chainer.dataset.DatasetMixin):
def __init__(self, xs, ys, max_shift):
self.xs = xs
self.ys = ys
self.max_shift = max_shift
def __len__(self):
return len(self.xs)
def get_example(self, i):
# It applies following preprocesses:
# - Cropping
# - Random flip
x = self.xs[i]
y = self.ys[i]
s = random.randint(-self.max_shift, self.max_shift)
x = np.roll(x, s, axis=0)
return x, y
# + [markdown] colab_type="text" id="9RCVvBw0v9i-"
# これで全部準備ができました.残りはChainerのTrainerを改造して学習するだけです.以下のコードを実行してください.
#
# 元々のデータ全体では学習に時間がかかるので,データ/`ratio`分だけを学習,検証用データとして利用します.今回`ratio`は1に設定されています.この場合30分程度で学習が完了します.短い時間で試したい方はratio=1をratio=10やratio=20として実験してみてください.
#
# + colab_type="code" id="b1_e0bE7wB48" outputId="dc2ce3b1-a66f-4c42-935c-223dab2c9685" colab={"base_uri": "https://localhost:8080/", "height": 917}
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from chainer.training import extensions
from chainer import training
import h5py
ml_h5 = h5py.File('seq.h5')
train_x = ml_h5['train_in']
train_y = ml_h5['train_out']
valid_x = ml_h5['valid_in']
valid_y = ml_h5['valid_out']
test_x = ml_h5['test_in']
test_y = ml_h5['test_out']
ratio = 1
train_x = train_x[:len(train_x)//ratio]
train_y = train_y[:len(train_y)//ratio]
valid_x = valid_x[:len(valid_x)//ratio]
valid_y = valid_y[:len(valid_y)//ratio]
max_shift_for_data_augmentation = 5
train = PreprocessedDataset(train_x, train_y, max_shift_for_data_augmentation)
val = chainer.datasets.TupleDataset(valid_x, valid_y)
batchsize = 8
train_iter = chainer.iterators.SerialIterator(train, batchsize)
val_iter = chainer.iterators.SerialIterator(val, batchsize, repeat=False, shuffle=False)
model = L.Classifier(Net(), lossfun=log_poisson_loss, accfun=log_r2_score)
lr = 0.001
optimizer = chainer.optimizers.Adam(alpha=lr, beta1=0.97, beta2=0.98)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.GradientClipping(threshold=0.01))
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=0)
n_epochs = 10
n_warmups = 0
out = "out"
trainer = training.Trainer(updater, (n_epochs, 'epoch'), out=out)
trainer.extend(CosineScheduler(attr='alpha', init_val=lr, n_decays=n_epochs, n_warmups=n_warmups), trigger=(1, 'epoch'))
trainer.extend(extensions.Evaluator(val_iter, model, device = 0))
trainer.extend(extensions.LogReport(trigger=(0.2, 'epoch')))
trainer.extend(extensions.snapshot_object(model, 'model_epoch_{.updater.epoch}'), trigger=(1, 'epoch'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']), trigger = (0.1, 'epoch'))
# trainer.extend(extensions.ProgressBar())
trainer.run()
# + [markdown] colab_type="text" id="mN4nDWQW3Dki"
# 学習が成功したならば,ディレクトリのout以下に学習されたモデルが出力されているはずです.実際にモデルが出力されているのかを確認しましょう.
# + colab_type="code" id="hfT1yyTl3C9X" outputId="3e2fc02b-8924-4703-dd64-ae99fd04073e" colab={"base_uri": "https://localhost:8080/", "height": 233}
# !ls -l out/
# + [markdown] colab_type="text" id="L4eQCDXG3L6e"
# 次に,学習したモデルを用いてテストデータに対しても予測してみます.次のようにして学習が終わったモデルを読み込み,テストデータに対してモデルを適用してみましょう.
# + colab_type="code" id="UfJ7ZEQX3UQS" outputId="17722083-b2b4-4114-f4c0-138e7ba82987" colab={"base_uri": "https://localhost:8080/", "height": 89}
import chainer
import chainer.links as L
# %matplotlib inline
import matplotlib.pyplot as plt
model_n_epoch = 10
out_dir = 'out'
model = L.Classifier(Net())
chainer.serializers.load_npz('{}/model_epoch_{}'.format(out_dir, model_n_epoch), model)
predictor = model.predictor
print(len(test_x))
with chainer.no_backprop_mode():
test_y_estimated = F.exp(predictor(test_x[:1]))
test_y = test_y[:1]
print(test_y_estimated.shape)
print(test_y_estimated[0,:,0])
# + [markdown] colab_type="text" id="dlA0DLxY3atL"
# 結果を抜粋して表示してみましょう.ここでは1つ目(i=0)の出力について正解と推定結果を出力しています.今回の場合でも,学習データを絞り(クラス数を10とした),学習回数も少ないですが,ピークを捉えられていることがわかると思います.
# + colab_type="code" id="nN4rkeuU7rjV" outputId="3e7300c6-abab-4729-bf04-3c56233a1288" colab={"base_uri": "https://localhost:8080/", "height": 630}
y = test_y_estimated.data
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 20
fig_size[1] = 10
i = 0
b1 = plt.bar(range(y.shape[1]), y[0,:,i])
b2 = plt.bar(range(y.shape[1]), test_y[0,:,i])
plt.legend((b1, b2), ('estimated', 'observed'))
# + [markdown] colab_type="text" id="pmaz68ZrsFOb"
# 時間に余裕があれば学習のn_epochsを10から30~50程度に増やしたり,層数を増やしたり,チャンネル数を増やしたりして,より高精度なモデルが学習できるのかを調べてみましょう.
#
# + [markdown] colab_type="text" id="7ThNVkbDGWrN"
#
#
# * [1] "Sequential regulatory activity prediction across chromosomes with convolutional neural networks", <NAME> and et al., Genome Res. 2018. 28: 739-750
# * [2] "Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks", T. Salimans and et al., arXiv:1602.07868
# * [3] "Language Modeling with Gated Convolutional Networks", <NAME> and et al., arXiv:1612.08083
# * [4] "Densely Connected Convolutional Networks", <NAME>, and et al., CVPR 2017
|
notebooks/07_DNA_Sequence_Data_Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Writing Custom Dataset Exporters
#
# This recipe demonstrates how to write a [custom DatasetExporter](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#custom-formats) and use it to export a FiftyOne dataset to disk in your custom format.
# ## Setup
#
# If you haven't already, install FiftyOne:
# !pip install fiftyone
# In this recipe we'll use the [FiftyOne Dataset Zoo](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/zoo_datasets.html) to download the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) to use as sample data to feed our custom exporter.
#
# Behind the scenes, FiftyOne either the
# [TensorFlow Datasets](https://www.tensorflow.org/datasets) or
# [TorchVision Datasets](https://pytorch.org/docs/stable/torchvision/datasets.html) libraries to wrangle the datasets, depending on which ML library you have installed.
#
# You can, for example, install PyTorch as follows:
# !pip install torch torchvision
# ## Writing a DatasetExporter
#
# FiftyOne provides a [DatasetExporter](https://voxel51.com/docs/fiftyone/api/fiftyone.utils.data.html#fiftyone.utils.data.exporters.DatasetExporter) interface that defines how it exports datasets to disk when methods such as [Dataset.export()](https://voxel51.com/docs/fiftyone/api/fiftyone.core.html#fiftyone.core.dataset.Dataset.export) are used.
#
# `DatasetExporter` itself is an abstract interface; the concrete interface that you should implement is determined by the type of dataset that you are exporting. See [writing a custom DatasetExporter](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#custom-formats) for full details.
#
# In this recipe, we'll write a custom [LabeledImageDatasetExporter](https://voxel51.com/docs/fiftyone/api/fiftyone.utils.data.html#fiftyone.utils.data.exporters.LabeledImageDatasetExporter) that can export an image classification dataset to disk in the following format:
#
# ```
# <dataset_dir>/
# data/
# <filename1>.<ext>
# <filename2>.<ext>
# ...
# labels.csv
# ```
#
# where `labels.csv` is a CSV file that contains the image metadata and associated labels in the following format:
#
# ```
# filepath,size_bytes,mime_type,width,height,num_channels,label
# <filepath>,<size_bytes>,<mime_type>,<width>,<height>,<num_channels>,<label>
# <filepath>,<size_bytes>,<mime_type>,<width>,<height>,<num_channels>,<label>
# ...
# ```
# Here's the complete definition of the `DatasetExporter`:
# +
import csv
import os
import fiftyone as fo
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
class CSVImageClassificationDatasetExporter(foud.LabeledImageDatasetExporter):
"""Exporter for image classification datasets whose labels and image
metadata are stored on disk in a CSV file.
Datasets of this type are exported in the following format:
<dataset_dir>/
data/
<filename1>.<ext>
<filename2>.<ext>
...
labels.csv
where ``labels.csv`` is a CSV file in the following format::
filepath,size_bytes,mime_type,width,height,num_channels,label
<filepath>,<size_bytes>,<mime_type>,<width>,<height>,<num_channels>,<label>
<filepath>,<size_bytes>,<mime_type>,<width>,<height>,<num_channels>,<label>
...
Args:
export_dir: the directory to write the export
"""
def __init__(self, export_dir):
super().__init__(export_dir)
self._data_dir = None
self._labels_path = None
self._filename_maker = None
self._labels = None
@property
def requires_image_metadata(self):
"""Whether this exporter requires
:class:`fiftyone.core.metadata.ImageMetadata` instances for each sample
being exported.
"""
return True
@property
def label_cls(self):
"""The :class:`fiftyone.core.labels.Label` class(es) exported by this
exporter.
This can be any of the following:
- a :class:`fiftyone.core.labels.Label` class. In this case, the
exporter directly exports labels of this type
- a dict mapping keys to :class:`fiftyone.core.labels.Label` classes.
In this case, the exporter can handle label dictionaries with
value-types specified by this dictionary. Not all keys need be
present in the exported label dicts
- ``None``. In this case, the exporter makes no guarantees about the
labels that it can export
"""
return fo.Classification
def setup(self):
"""Performs any necessary setup before exporting the first sample in
the dataset.
This method is called when the exporter's context manager interface is
entered, :func:`DatasetExporter.__enter__`.
"""
self._data_dir = os.path.join(self.export_dir, "data")
self._labels_path = os.path.join(self.export_dir, "labels.csv")
self._labels = []
self._filename_maker = fou.UniqueFilenameMaker(
output_dir=self._data_dir, default_ext=".jpg"
)
def export_sample(self, image_or_path, label, metadata):
"""Exports the given sample to the dataset.
Args:
image_or_path: an image or the path to the image on disk
label: an instance of :meth:`label_cls`, or a dictionary mapping
field names to :class:`fiftyone.core.labels.Label` instances,
or ``None`` if the sample is unlabeled
metadata (None): a :class:`fiftyone.core.metadata.ImageMetadata`
instance for the sample. Only required when
:meth:`requires_image_metadata` is ``True``
"""
#
# The `_export_image_or_path()` utility is provided by the base
# `LabeledImageDatasetExporter` class. Its function is as follows:
#
# - If `image_or_path` is an image, it is written to `export_dir` using
# the provided `UniqueFilenameMaker` to generate a unique filename
#
# - If `image_or_path` is the path to an image, it is directly copied
# into `export_dir` with the same filename, unless a conflict would
# occur, in which case an index of the form `-%d` is added to the
# filename
#
out_image_path = self._export_image_or_path(
image_or_path, self._filename_maker
)
self._labels.append((
out_image_path,
metadata.size_bytes,
metadata.mime_type,
metadata.width,
metadata.height,
metadata.num_channels,
label.label, # here, `label` is a `Classification` instance
))
def close(self, *args):
"""Performs any necessary actions after the last sample has been
exported.
This method is called when the exporter's context manager interface is
exited, :func:`DatasetExporter.__exit__`.
Args:
*args: the arguments to :func:`DatasetExporter.__exit__`
"""
# Ensure the base output directory exists
basedir = os.path.dirname(self._labels_path)
if basedir and not os.path.isdir(basedir):
os.makedirs(basedir)
# Write the labels CSV file
with open(self._labels_path, "w") as f:
writer = csv.writer(f)
writer.writerow([
"filepath",
"size_bytes",
"mime_type",
"width",
"height",
"num_channels",
"label",
])
for row in self._labels:
writer.writerow(row)
# -
# ## Generating a sample dataset
# In order to use `CSVImageClassificationDatasetExporter`, we need some labeled image samples to work with.
#
# Let's use some samples from the test split of CIFAR-10:
# +
import fiftyone.zoo as foz
num_samples = 1000
#
# Load `num_samples` from CIFAR-10
#
# This command will download the test split of CIFAR-10 from the web the first
# time it is executed, if necessary
#
cifar10_test = foz.load_zoo_dataset("cifar10", split="test")
samples = cifar10_test.limit(num_samples)
# -
# Print summary information about the samples
print(samples)
# Print a sample
print(samples.first())
# ## Exporting a dataset
# With our samples and `DatasetExporter` in-hand, exporting the samples to disk in our custom format is as simple as follows:
# +
export_dir = "/tmp/fiftyone/custom-dataset-exporter"
# Export the dataset
print("Exporting %d samples to '%s'" % (len(samples), export_dir))
exporter = CSVImageClassificationDatasetExporter(export_dir)
samples.export(dataset_exporter=exporter)
# -
# Let's inspect the contents of the exported dataset to verify that it was written in the correct format:
# !ls -lah /tmp/fiftyone/custom-dataset-exporter
# !ls -lah /tmp/fiftyone/custom-dataset-exporter/data | head -n 10
# !head -n 10 /tmp/fiftyone/custom-dataset-exporter/labels.csv
# ## Cleanup
#
# You can cleanup the files generated by this recipe by running:
# !rm -rf /tmp/fiftyone
|
docs/source/recipes/custom_exporter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module 1: Introduction to Python
#
# Python is a high-level programming language with extensive libraries available to perform various data analysis tasks.
# The following tutorial contains examples of using various data types, functions, and library modules available in the standard Python library. The notebook can be downloaded from http://www.cse.msu.edu/~ptan/dmbook/tutorials/tutorial1/tutorial1.ipynb. Read the step-by-step instructions below carefully. To execute the code, click on each cell below and press the SHIFT-ENTER keys simultaneously.
#
# We begin with some basic information about Python:
# 1. Python is an interpreted language, unlike other high-level programming languages such as C or C++. You only need to submit your Python program to an interpreter for execution, without having to explicitly compile and link the code first.
#
# 2. Python is a dynamically typed language, which means variable names are bound to their respective types during execution time. You do not have to explicitly declare the type of a variable before using it in the code unlike Java, C++, and other statically-typed languages.
#
# 3. Instead of using braces '{' and '}', Python uses whitespace indentation to group together related statements in loops or other control-flow statements.
#
# 4. Python uses the hash character ('#') to precede single-line comments. Triple-quoted strings (''') are commonly used to denote multi-line comments (even though it is not part of the standard Python language) or docstring of functions.
#
# 5. Python uses pass by reference (instead of pass by value) when assigning a variable to another (e.g., a = b) or when passing an object as input argument to a function. Thus, any modification to the assigned variable or to the input argument within the function will affect the original object.
#
# 6. Python uses `None` to denote a null object (e.g., a = None). You do not have to terminate each statement with a terminating character (such as a semicolon) unlike other languages.
#
# 7. You may access the variables or functions defined in another Python program file using the `import` command. This is analogous to the `import` command in Java or the `#include` command in C or C++.
# ## 1.1 Elementary Data Types
#
# The standard Python library provides support for various elementary data types, including including integers, booleans, floating points, and strings. A summary of the data types is shown in the table below.
#
# | | Data Type | Example |
# |:--------|:-----------------:|:------------|
# |Number | Integer | x = 4 |
# | | Long integer | x = 15L |
# | | Floating point | x = 3.142 |
# | | Boolean | x = True |
# |Text | Character | x = 'c' |
# | | String | x = "this" or x = 'this' |
# +
x = 4 # integer
print(x, type(x))
y = True # boolean (True, False)
print(y, type(y))
z = 3.7 # floating point
print(z, type(z))
s = "This is a string" # string
print(s, type(s))
# -
# The following are some of the arithmetic operations available for manipulating integers and floating point numbers
# +
x = 4 # integer
x1 = x + 4 # addition
x2 = x * 3 # multiplication
x += 2 # equivalent to x = x + 2
x3 = x
x *= 3 # equivalent to x = x * 3
x4 = x
x5 = x % 4 # modulo (remainder) operator
z = 3.7 # floating point number
z1 = z - 2 # subtraction
z2 = z / 3 # division
z3 = z // 3 # integer division
z4 = z ** 2 # square of z
z5 = z4 ** 0.5 # square root
z6 = pow(z,2) # equivalent to square of z
z7 = round(z) # rounding z to its nearest integer
z8 = int(z) # type casting float to int
print(x,x1,x2,x3,x4,x5)
print(z,z1,z2,z3,z4)
print(z5,z6,z7,z8)
# -
# The following are some of the functions provided by the math module for integers and floating point numbers
# +
import math
x = 4
print(math.sqrt(x)) # sqrt(4) = 2
print(math.pow(x,2)) # 4**2 = 16
print(math.exp(x)) # exp(4) = 54.6
print(math.log(x,2)) # log based 2 (default is natural logarithm)
print(math.fabs(-4)) # absolute value
print(math.factorial(x)) # 4! = 4 x 3 x 2 x 1 = 24
z = 0.2
print(math.ceil(z)) # ceiling function
print(math.floor(z)) # floor function
print(math.trunc(z)) # truncate function
z = 3*math.pi # math.pi = 3.141592653589793
print(math.sin(z)) # sine function
print(math.tanh(z)) # arctan function
x = math.nan # not a number
print(math.isnan(x))
x = math.inf # infinity
print(math.isinf(x))
# -
# The following are some of the logical operations available for booleans
# +
y1 = True
y2 = False
print(y1 and y2) # logical AND
print(y1 or y2) # logical OR
print(y1 and not y2) # logical NOT
# -
# The following are some of the operations and functions for manipulating strings
# +
s1 = "This"
print(s1[1:]) # print last three characters
print(len(s1)) # get the string length
print("Length of string is " + str(len(s1))) # type casting int to str
print(s1.upper()) # convert to upper case
print(s1.lower()) # convert to lower case
s2 = "This is a string"
words = s2.split(' ') # split the string into words
print(words[0])
print(s2.replace('a','another')) # replace "a" with "another"
print(s2.replace('is','at')) # replace "is" with "at"
print(s2.find("a")) # find the position of "a" in s2
print(s1 in s2) # check if s1 is a substring of s2
print(s1 == 'This') # equality comparison
print(s1 < 'That') # inequality comparison
print(s2 + " too") # string concatenation
print((s1 + " ")* 3) # replicate the string 3 times
# -
# ## 1.2 Compound Data Types
#
# The following examples show how to create and manipulate a list object
# +
intlist = [1, 3, 5, 7, 9]
print(type(intlist))
print(intlist)
intlist2 = list(range(0,10,2)) # range[startvalue, endvalue, stepsize]
print(intlist2)
print(intlist[2]) # get the third element of the list
print(intlist[:2]) # get the first two elements
print(intlist[2:]) # get the last three elements of the list
print(len(intlist)) # get the number of elements in the list
print(sum(intlist)) # sums up elements of the list
intlist.append(11) # insert 11 to end of the list
print(intlist)
print(intlist.pop()) # remove last element of the list
print(intlist)
print(intlist + [11,13,15]) # concatenate two lists
print(intlist * 3) # replicate the list
intlist.insert(2,4) # insert item 4 at index 2
print(intlist)
intlist.sort(reverse=True) # sort elements in descending order
print(intlist)
# +
mylist = ['this', 'is', 'a', 'list']
print(mylist)
print(type(mylist))
print("list" in mylist) # check whether "list" is in mylist
print(mylist[2]) # show the 3rd element of the list
print(mylist[:2]) # show the first two elements of the list
print(mylist[2:]) # show the last two elements of the list
mylist.append("too") # insert element to end of the list
separator = " "
print(separator.join(mylist)) # merge all elements of the list into a string
mylist.remove("is") # remove element from list
print(mylist)
# -
# The following examples show how to create and manipulate a dictionary object
# +
abbrev = {}
abbrev['MI'] = "Michigan"
abbrev['MN'] = "Minnesota"
abbrev['TX'] = "Texas"
abbrev['CA'] = "California"
print(abbrev)
print(abbrev.keys()) # get the keys of the dictionary
print(abbrev.values()) # get the values of the dictionary
print(len(abbrev)) # get number of key-value pairs
print(abbrev.get('MI'))
print("FL" in abbrev)
print("CA" in abbrev)
keys = ['apples', 'oranges', 'bananas', 'cherries']
values = [3, 4, 2, 10]
fruits = dict(zip(keys, values))
print(fruits)
print(sorted(fruits)) # sort keys of dictionary
from operator import itemgetter
print(sorted(fruits.items(), key=itemgetter(0))) # sort by key of dictionary
print(sorted(fruits.items(), key=itemgetter(1))) # sort by value of dictionary
# -
# The following examples show how to create and manipulate a tuple object. Unlike a list, a tuple object is immutable, i.e., they cannot be modified after creation.
# +
MItuple = ('MI', 'Michigan', 'Lansing')
CAtuple = ('CA', 'California', 'Sacramento')
TXtuple = ('TX', 'Texas', 'Austin')
print(MItuple)
print(MItuple[1:])
states = [MItuple, CAtuple, TXtuple] # this will create a list of tuples
print(states)
print(states[2])
print(states[2][:])
print(states[2][1:])
states.sort(key=lambda state: state[2]) # sort the states by their capital cities
print(states)
# -
# ## 1.3 Control Flow Statements
#
# Similar to other programming languages, the control flow statements in Python include if, for, and while statements. Examples on how to use these statements are shown below.
# +
# using if-else statement
x = 10
if x % 2 == 0:
print("x =", x, "is even")
else:
print("x =", x, "is odd")
if x > 0:
print("x =", x, "is positive")
elif x < 0:
print("x =", x, "is negative")
else:
print("x =", x, "is neither positive nor negative")
# +
# using for loop with a list
mylist = ['this', 'is', 'a', 'list']
for word in mylist:
print(word.replace("is", "at"))
mylist2 = [len(word) for word in mylist] # number of characters in each word
print(mylist2)
# using for loop with list of tuples
states = [('MI', 'Michigan', 'Lansing'),('CA', 'California', 'Sacramento'),
('TX', 'Texas', 'Austin')]
sorted_capitals = [state[2] for state in states]
sorted_capitals.sort()
print(sorted_capitals)
# using for loop with dictionary
fruits = {'apples': 3, 'oranges': 4, 'bananas': 2, 'cherries': 10}
fruitnames = [k for (k,v) in fruits.items()]
print(fruitnames)
# +
# using while loop
mylist = list(range(-10,10))
print(mylist)
i = 0
while (mylist[i] < 0):
i = i + 1
print("First non-negative number:", mylist[i])
# -
# ## 1.4 User-Defined Functions
#
# You can create your own functions in Python, which can be named or unnamed. Unnamed functions are defined using the lambda keyword as shown in the previous example for sorting a list of tuples.
# +
myfunc = lambda x: 3*x**2 - 2*x + 3 # example of an unnamed quadratic function
print(myfunc(2))
# +
import math
# The following function will discard missing values from a list
def discard(inlist, sortFlag=False): # default value for sortFlag is False
outlist = []
for item in inlist:
if not math.isnan(item):
outlist.append(item)
if sortFlag:
outlist.sort()
return outlist
mylist = [12, math.nan, 23, -11, 45, math.nan, 71]
print(discard(mylist,True))
# -
# ## 1.5 File I/O
#
# You can read and write data from a list or other objects to a file.
# +
states = [('MI', 'Michigan', 'Lansing'),('CA', 'California', 'Sacramento'),
('TX', 'Texas', 'Austin'), ('MN', 'Minnesota', 'St Paul')]
with open('states.txt', 'w') as f:
f.write('\n'.join('%s,%s,%s' % state for state in states))
with open('states.txt', 'r') as f:
for line in f:
fields = line.split(sep=',') # split each line into its respective fields
print('State=',fields[1],'(',fields[0],')','Capital:', fields[2])
# -
# ## 1.6 Common Libraries
#
# set_trace for debugging
# +
from pdb import set_trace
x = [1,2,3]
set_trace()
x = [1,2]
set_trace()
print(x)
# -
# Counter
# +
from collections import Counter
x = [1,2,1,3,2,2,3]
print(Counter(x))
# -
pandas dataframe
# +
import pandas as pd
dic = {"x":[1,2,3],"y":[2,4,6],"z":[5,2,1]}
df = pd.DataFrame(dic)
set_trace()
print(df.columns)
df['y'][1] = 3
df2 = df.iloc[2]
df3 = df.iloc[0:2]
df4 = df[["z","x"]]
df5 = df["y"]
set_trace()
print(df)
# -
# numpy for math calculation
# +
import numpy as np
x = [[1,2,3],[2,3,6]]
y = np.array(x)
y = y + 1
y = y * 2
print(y)
# -
# ### Exercise
# Code a sorting algorithm (ascending order)
# +
def my_sort(x):
return x
x = [4,2,1,3,6,4,3,9,1]
sorted_x = my_sort(x)
print(sorted_x)
# -
|
tutorial/tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## TVP-VAR, MCMC, and sparse simulation smoothing
# +
# %matplotlib inline
from importlib import reload
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.stats import invwishart, invgamma
# Get the macro dataset
dta = sm.datasets.macrodata.load_pandas().data
dta.index = pd.date_range('1959Q1', '2009Q3', freq='QS')
# -
# ### Background
#
# Bayesian analysis of linear Gaussian state space models via Markov chain Monte Carlo (MCMC) methods has become both commonplace and relatively straightforward in recent years, due especially to advances in sampling from the joint posterior of the unobserved state vector conditional on the data and model parameters (see especially <NAME> (1994), <NAME> Shephard (1995), and <NAME> Koopman (2002)). This is particularly useful for Gibbs sampling MCMC approaches.
#
# While these procedures make use of the forward/backward application of the recursive Kalman filter and smoother, another recent line of research takes a different approach and constructs the posterior joint distribution of the entire vector of states at once - see in particular Chan and Jeliazkov (2009) for an econometric time series treatment and McCausland et al. (2011) for a more general survey. In particular, the posterior mean and precision matrix are constructed explicitly, with the latter a sparse band matrix. Advantage is then taken of efficient algorithms for Cholesky factorization of sparse band matrices; this reduces memory costs and can improve performance. Following McCausland et al. (2011), we refer to this method as the "Cholesky Factor Algorithm" (CFA) approach.
#
# The CFA-based simulation smoother has some advantages and some drawbacks compared to that based on the more typical Kalman filter and smoother (KFS).
#
# **Advantages of CFA**:
#
# - Derivation of the joint posterior distribution is relatively straightforward and easy to understand.
# - In some cases can be both faster and less memory-intensive than the KFS approach
# - In the Appendix at the end of this notebook, we briefly discuss the performance of the two simulation smoothers for the TVP-VAR model. In summary: simple tests on a single machine suggest that for the TVP-VAR model, the CFA and KFS implementations in Statsmodels have about the same runtimes, while both implementations are about twice as fast as the replication code, written in Matlab, provided by Chan and Jeliazkov (2009).
#
# **Drawbacks of CFA**:
#
# The main drawback is that this method has not (at least so far) reached the generality of the KFS approach. For example:
#
# - It can not be used with models that have reduced-rank error terms in the observation or state equations.
# - One implication of this is that the typical state space model trick of including identities in the state equation to accommodate, for example, higher-order lags in autoregressive models is not applicable. These models can still be handled by the CFA approach, but at the cost of requiring a slightly different implementation for each lag that is included.
# - As an example, standard ways of representing ARMA and VARMA processes in state space form do include identities in the observation and/or state equations, and so the basic formulas presented in Chan and Jeliazkov (2009) do not apply immediately to these models.
# - Less flexibility is available in the state initialization / prior.
# ### Implementation in Statsmodels
#
# A CFA simulation smoother along the lines of the basic formulas presented in Chan and Jeliazkov (2009) has been implemented in Statsmodels.
#
# **Notes**:
#
# - Therefore, the CFA simulation smoother in Statsmodels so-far only supports the case that the state transition is truly a first-order Markov process (i.e. it does not support a p-th order Markov process that has been stacked using identities into a first-order process).
# - By contrast, the KFS smoother in Statsmodels is fully general any can be used for any state space model, including those with stacked p-th order Markov processes or other identities in the observation and state equations.
#
# Either a KFS or the CFA simulation smoothers can be constructed from a state space model using the `simulation_smoother` method. To show the basic idea, we first consider a simple example.
# #### Local level model
#
# A local level model decomposes an observed series $y_t$ into a persistent trend $\mu_t$ and a transitory error component
#
# $$
# \begin{aligned}
# y_t & = \mu_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, \sigma_\text{irregular}^2) \\
# \mu_t & = \mu_{t-1} + \eta_t, \quad ~ \eta_t \sim N(0, \sigma_\text{level}^2)
# \end{aligned}
# $$
#
# This model satisfies the requirements of the CFA simulation smoother because both the observation error term $\varepsilon_t$ and the state innovation term $\eta_t$ are non-degenerate - that is, their covariance matrices are full rank.
#
# We apply this model to inflation, and consider simulating draws from the posterior of the joint state vector. That is, we are interested in sampling from
#
# $$p(\mu^t \mid y^t, \sigma_\text{irregular}^2, \sigma_\text{level}^2)$$
#
# where we define $\mu^t \equiv (\mu_1, \dots, \mu_T)'$ and $y^t \equiv (y_1, \dots, y_T)'$.
#
# In Statsmodels, the local level model falls into the more general class of "unobserved components" models, and can be constructed as follows:
# +
# Construct a local level model for inflation
mod = sm.tsa.UnobservedComponents(dta.infl, 'llevel')
# Fit the model's parameters (sigma2_varepsilon and sigma2_eta)
# via maximum likelihood
res = mod.fit()
print(res.params)
# Create simulation smoother objects
sim_kfs = mod.simulation_smoother() # default method is KFS
sim_cfa = mod.simulation_smoother(method='cfa') # can specify CFA method
# -
# The simulation smoother objects `sim_kfs` and `sim_cfa` have `simulate` methods that perform simulation smoothing. Each time that `simulate` is called, the `simulated_state` attribute will be re-populated with a new simulated draw from the posterior.
#
# Below, we construct 20 simulated paths for the trend, using the KFS and CFA approaches, where the simulation is at the maximum likelihood parameter estimates.
# +
nsimulations = 20
simulated_state_kfs = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
simulated_state_cfa = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
for i in range(nsimulations):
# Apply KFS simulation smoothing
sim_kfs.simulate()
# Save the KFS simulated state
simulated_state_kfs.iloc[:, i] = sim_kfs.simulated_state[0]
# Apply CFA simulation smoothing
sim_cfa.simulate()
# Save the CFA simulated state
simulated_state_cfa.iloc[:, i] = sim_cfa.simulated_state[0]
# -
# Plotting the observed data and the simulations created using each method below, it is not too hard to see that these two methods are doing the same thing.
# +
# Plot the inflation data along with simulated trends
fig, axes = plt.subplots(2, figsize=(15, 6))
# Plot data and KFS simulations
dta.infl.plot(ax=axes[0], color='k')
axes[0].set_title('Simulations based on KFS approach, MLE parameters')
simulated_state_kfs.plot(ax=axes[0], color='C0', alpha=0.25, legend=False)
# Plot data and CFA simulations
dta.infl.plot(ax=axes[1], color='k')
axes[1].set_title('Simulations based on CFA approach, MLE parameters')
simulated_state_cfa.plot(ax=axes[1], color='C0', alpha=0.25, legend=False)
# Add a legend, clean up layout
handles, labels = axes[0].get_legend_handles_labels()
axes[0].legend(handles[:2], ['Data', 'Simulated state'])
fig.tight_layout();
# -
# #### Updating the model's parameters
#
# The simulation smoothers are tied to the model instance, here the variable `mod`. Whenever the model instance is updated with new parameters, the simulation smoothers will take those new parameters into account in future calls to the `simulate` method.
#
# This is convenient for MCMC algorithms, which repeatedly (a) update the model's parameters, (b) draw a sample of the state vector, and then (c) draw new values for the model's parameters.
#
# Here we will change the model to a different parameterization that yields a smoother trend, and show how the simulated values change (for brevity we only show the simulations from the KFS approach, but simulations from the CFA approach would be the same).
# +
fig, ax = plt.subplots(figsize=(15, 3))
# Update the model's parameterization to one that attributes more
# variation in inflation to the observation error and so has less
# variation in the trend component
mod.update([4, 0.05])
# Plot simulations
for i in range(nsimulations):
sim_kfs.simulate()
ax.plot(dta.index, sim_kfs.simulated_state[0],
color='C0', alpha=0.25, label='Simulated state')
# Plot data
dta.infl.plot(ax=ax, color='k', label='Data', zorder=-1)
# Add title, legend, clean up layout
ax.set_title('Simulations with alternative parameterization yielding a smoother trend')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-2:], labels[-2:])
fig.tight_layout();
# -
# ### Application: Bayesian analysis of a TVP-VAR model by MCMC
#
# One of the applications that Chan and Jeliazkov (2009) consider is the time-varying parameters vector autoregression (TVP-VAR) model, estimated with Bayesian Gibb sampling (MCMC) methods. They apply this to model the co-movements in four macroeconomic time series:
#
# - Real GDP growth
# - Inflation
# - Unemployment rate
# - Short-term interest rates
#
# We will replicate their example, using a very similar dataset that is included in Statsmodels.
# +
# Subset to the four variables of interest
y = dta[['realgdp', 'cpi', 'unemp', 'tbilrate']].copy()
y.columns = ['gdp', 'inf', 'unemp', 'int']
# Convert to real GDP growth and CPI inflation rates
y[['gdp', 'inf']] = np.log(y[['gdp', 'inf']]).diff() * 100
y = y.iloc[1:]
fig, ax = plt.subplots(figsize=(15, 5))
y.plot(ax=ax)
ax.set_title('Evolution of macroeconomic variables included in TVP-VAR exercise');
# -
# #### TVP-VAR model
#
# **Note**: this section is based on Chan and Jeliazkov (2009) section 3.1, which can be consulted for additional details.
#
# The usual (time-invariant) VAR(1) model is typically written:
#
# $$
# \begin{aligned}
# y_t & = \mu + \Phi y_{t-1} + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H)
# \end{aligned}
# $$
#
# where $y_t$ is a $p \times 1$ vector of variables observed at time $t$ and $H$ is a covariance matrix.
#
# The TVP-VAR(1) model generalizes this to allow the coefficients to vary over time according. Stacking all the parameters into a vector according to $\alpha_t = \text{vec}([\mu_t : \Phi_t])$, where $\text{vec}$ denotes the operation that stacks columns of a matrix into a vector, we model their evolution over time according to:
#
# $$\alpha_{i,t+1} = \alpha_{i, t} + \eta_{i,t}, \qquad \eta_{i, t} \sim N(0, \sigma_i^2)$$
#
# In other words, each parameter evolves independently according to a random walk.
#
# Note that there are $p$ coefficients in $\mu_t$ and $p^2$ coefficients in $\Phi_t$, so the full state vector $\alpha$ is shaped $p * (p + 1) \times 1$.
# Putting the TVP-VAR(1) model into state-space form is relatively straightforward, and in fact we just have to re-write the observation equation into SUR form:
#
# $$
# \begin{aligned}
# y_t & = Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H) \\
# \alpha_{t+1} & = \alpha_t + \eta_t, \qquad \eta_t \sim N(0, \text{diag}(\{\sigma_i^2\}))
# \end{aligned}
# $$
#
# where
#
# $$
# Z_t = \begin{bmatrix}
# 1 & y_{t-1}' & 0 & \dots & & 0 \\
# 0 & 0 & 1 & y_{t-1}' & & 0 \\
# \vdots & & & \ddots & \ddots & \vdots \\
# 0 & 0 & 0 & 0 & 1 & y_{t-1}' \\
# \end{bmatrix}
# $$
#
# As long as $H$ is full rank and each of the variances $\sigma_i^2$ is non-zero, the model satisfies the requirements of the CFA simulation smoother.
#
# We also need to specify the initialization / prior for the initial state, $\alpha_1$. Here we will follow Chan and Jeliazkov (2009) in using $\alpha_1 \sim N(0, 5 I)$, although we could also model it as diffuse.
# Aside from the time-varying coefficients $\alpha_t$, the other parameters that we will need to estimate are terms in the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
# #### TVP-VAR model in Statsmodels
#
# Constructing this model programatically in Statsmodels is also relatively straightforward, since there are basically four steps:
#
# 1. Create a new `TVPVAR` class as a subclass of `sm.tsa.statespace.MLEModel`
# 2. Fill in the fixed values of the state space system matrices
# 3. Specify the initialization of $\alpha_1$
# 4. Create a method for updating the state space system matrices with new values of the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
#
# To do this, first note that the general state space representation used by Statsmodels is:
#
# $$
# \begin{aligned}
# y_t & = d_t + Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H_t) \\
# \alpha_{t+1} & = c_t + T_t \alpha_t + R_t \eta_t, \qquad \eta_t \sim N(0, Q_t) \\
# \end{aligned}
# $$
#
# Then the TVP-VAR(1) model implies the following specializations:
#
# - The intercept terms are zero, i.e. $c_t = d_t = 0$
# - The design matrix $Z_t$ is time-varying but its values are fixed as described above (i.e. its values contain ones and lags of $y_t$)
# - The observation covariance matrix is not time-varying, i.e. $H_t = H_{t+1} = H$
# - The transition matrix is not time-varying and is equal to the identity matrix, i.e. $T_t = T_{t+1} = I$
# - The selection matrix $R_t$ is not time-varying and is also equal to the identity matrix, i.e. $R_t = R_{t+1} = I$
# - The state covariance matrix $Q_t$ is not time-varying and is diagonal, i.e. $Q_t = Q_{t+1} = \text{diag}(\{\sigma_i^2\})$
# 1. Create a new TVPVAR class as a subclass of sm.tsa.statespace.MLEModel
class TVPVAR(sm.tsa.statespace.MLEModel):
# Steps 2-3 are best done in the class "constructor", i.e. the __init__ method
def __init__(self, y):
# Create a matrix with [y_t' : y_{t-1}'] for t = 2, ..., T
augmented = sm.tsa.lagmat(y, 1, trim='both', original='in', use_pandas=True)
# Separate into y_t and z_t = [1 : y_{t-1}']
p = y.shape[1]
y_t = augmented.iloc[:, :p]
z_t = sm.add_constant(augmented.iloc[:, p:])
# Recall that the length of the state vector is p * (p + 1)
k_states = p * (p + 1)
super().__init__(y_t, exog=z_t, k_states=k_states)
# Note that the state space system matrices default to contain zeros,
# so we don't need to explicitly set c_t = d_t = 0.
# Construct the design matrix Z_t
# Notes:
# -> self.k_endog = p is the dimension of the observed vector
# -> self.k_states = p * (p + 1) is the dimension of the observed vector
# -> self.nobs = T is the number of observations in y_t
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
for i in range(self.k_endog):
start = i * (self.k_endog + 1)
end = start + self.k_endog + 1
self['design', i, start:end, :] = z_t.T
# Construct the transition matrix T = I
self['transition'] = np.eye(k_states)
# Construct the selection matrix R = I
self['selection'] = np.eye(k_states)
# Step 3: Initialize the state vector as alpha_1 ~ N(0, 5I)
self.ssm.initialize('known', stationary_cov=5 * np.eye(self.k_states))
# Step 4. Create a method that we can call to update H and Q
def update_variances(self, obs_cov, state_cov_diag):
self['obs_cov'] = obs_cov
self['state_cov'] = np.diag(state_cov_diag)
# Finally, it can be convenient to define human-readable names for
# each element of the state vector. These will be available in output
@property
def state_names(self):
state_names = np.empty((self.k_endog, self.k_endog + 1), dtype=object)
for i in range(self.k_endog):
endog_name = self.endog_names[i]
state_names[i] = (
['intercept.%s' % endog_name] +
['L1.%s->%s' % (other_name, endog_name) for other_name in self.endog_names])
return state_names.ravel().tolist()
# The above class defined the state space model for any given dataset. Now we need to create a specific instance of it with the dataset that we created earlier containing real GDP growth, inflation, unemployment, and interest rates.
# Create an instance of our TVPVAR class with our observed dataset y
mod = TVPVAR(y)
# #### Preliminary investigation with ad-hoc parameters in H, Q
# In our analysis below, we will need to begin our MCMC iterations with some initial parameterization. Following Chan and Jeliazkov (2009) we will set $H$ to be the sample covariance matrix of our dataset, and we will set $\sigma_i^2 = 0.01$ for each $i$.
#
# Before discussing the MCMC scheme that will allow us to make inferences about the model, first we can consider the output of the model when simply plugging in these initial parameters. To fill in these parameters, we use the `update_variances` method that we defined earlier and then perform Kalman filtering and smoothing conditional on those parameters.
#
# **Warning: This exercise is just by way of explanation - we must wait for the output of the MCMC exercise to study the actual implications of the model in a meaningful way**.
# +
initial_obs_cov = np.cov(y.T)
initial_state_cov_diag = [0.01] * mod.k_states
# Update H and Q
mod.update_variances(initial_obs_cov, initial_state_cov_diag)
# Perform Kalman filtering and smoothing
# (the [] is just an empty list that in some models might contain
# additional parameters. Here, we don't have any additional parameters
# so we just pass an empty list)
initial_res = mod.smooth([])
# -
# The `initial_res` variable contains the output of Kalman filtering and smoothing, conditional on those initial parameters. In particular, we may be interested in the "smoothed states", which are $E[\alpha_t \mid y^t, H, \{\sigma_i^2\}]$.
#
# First, lets create a function that graphs the coefficients over time, separated into the equations for equation of the observed variables.
def plot_coefficients_by_equation(states):
fig, axes = plt.subplots(2, 2, figsize=(15, 8))
# The way we defined Z_t implies that the first 5 elements of the
# state vector correspond to the first variable in y_t, which is GDP growth
ax = axes[0, 0]
states.iloc[:, :5].plot(ax=ax)
ax.set_title('GDP growth')
ax.legend()
# The next 5 elements correspond to inflation
ax = axes[0, 1]
states.iloc[:, 5:10].plot(ax=ax)
ax.set_title('Inflation rate')
ax.legend();
# The next 5 elements correspond to unemployment
ax = axes[1, 0]
states.iloc[:, 10:15].plot(ax=ax)
ax.set_title('Unemployment equation')
ax.legend()
# The last 5 elements correspond to the interest rate
ax = axes[1, 1]
states.iloc[:, 15:20].plot(ax=ax)
ax.set_title('Interest rate equation')
ax.legend();
return ax
# Now, we are interested in the smoothed states, which are available in the `states.smoothed` attribute out our results object `initial_res`.
#
# As the graph below shows, the initial parameterization implies substantial time-variation in some of the coefficients.
# +
# Here, for illustration purposes only, we plot the time-varying
# coefficients conditional on an ad-hoc parameterization
# Recall that `initial_res` contains the Kalman filtering and smoothing,
# and the `states.smoothed` attribute contains the smoothed states
plot_coefficients_by_equation(initial_res.states.smoothed);
# -
# #### Bayesian estimation via MCMC
#
# We will now implement the Gibbs sampler scheme described in Chan and Jeliazkov (2009), Algorithm 2.
#
#
# We use the following (conditionally conjugate) priors:
#
# $$
# \begin{aligned}
# H & \sim \mathcal{IW}(\nu_1^0, S_1^0) \\
# \sigma_i^2 & \sim \mathcal{IG} \left ( \frac{\nu_{i2}^0}{2}, \frac{S_{i2}^0}{2} \right )
# \end{aligned}
# $$
#
# where $\mathcal{IW}$ denotes the inverse-Wishart distribution and $\mathcal{IG}$ denotes the inverse-Gamma distribution. We set the prior hyperparameters as:
#
# $$
# \begin{aligned}
# v_1^0 = T + 3, & \quad S_1^0 = I \\
# v_{i2}^0 = 6, & \quad S_{i2}^0 = 0.01 \qquad \text{for each} ~ i\\
# \end{aligned}
# $$
# +
# Prior hyperparameters
# Prior for obs. cov. is inverse-Wishart(v_1^0=k + 3, S10=I)
v10 = mod.k_endog + 3
S10 = np.eye(mod.k_endog)
# Prior for state cov. variances is inverse-Gamma(v_{i2}^0 / 2 = 3, S+{i2}^0 / 2 = 0.005)
vi20 = 6
Si20 = 0.01
# -
# Before running the MCMC iterations, there are a couple of practical steps:
#
# 1. Create arrays to store the draws of our state vector, observation covariance matrix, and state error variances.
# 2. Put the initial values for H and Q (described above) into the storage vectors
# 3. Construct the simulation smoother object associated with our `TVPVAR` instance to make draws of the state vector
# +
# Gibbs sampler setup
niter = 11000
nburn = 1000
# 1. Create storage arrays
store_states = np.zeros((niter + 1, mod.nobs, mod.k_states))
store_obs_cov = np.zeros((niter + 1, mod.k_endog, mod.k_endog))
store_state_cov = np.zeros((niter + 1, mod.k_states))
# 2. Put in the initial values
store_obs_cov[0] = initial_obs_cov
store_state_cov[0] = initial_state_cov_diag
mod.update_variances(store_obs_cov[0], store_state_cov[0])
# 3. Construct posterior samplers
sim = mod.simulation_smoother(method='cfa')
# -
# As before, we could have used either the simulation smoother based on the Kalman filter and smoother or that based on the Cholesky Factor Algorithm.
for i in range(niter):
mod.update_variances(store_obs_cov[i], store_state_cov[i])
sim.simulate()
# 1. Sample states
store_states[i + 1] = sim.simulated_state.T
# 2. Simulate obs cov
fitted = np.matmul(mod['design'].transpose(2, 0, 1), store_states[i + 1][..., None])[..., 0]
resid = mod.endog - fitted
store_obs_cov[i + 1] = invwishart.rvs(v10 + mod.nobs, S10 + resid.T @ resid)
# 3. Simulate state cov variances
resid = store_states[i + 1, 1:] - store_states[i + 1, :-1]
sse = np.sum(resid**2, axis=0)
for j in range(mod.k_states):
rv = invgamma.rvs((vi20 + mod.nobs - 1) / 2, scale=(Si20 + sse[j]) / 2)
store_state_cov[i + 1, j] = rv
# After removing a number of initial draws, the remaining draws from the posterior allow us to conduct inference. Below, we plot the posterior mean of the time-varying regression coefficients.
#
# (**Note**: these plots are different from those in Figure 1 of the published version of Chan and Jeliazkov (2009), but they are very similar to those produced by the Matlab replication code available at http://joshuachan.org/code/code_TVPVAR.html)
# +
# Collect the posterior means of each time-varying coefficient
states_posterior_mean = pd.DataFrame(
np.mean(store_states[nburn + 1:], axis=0),
index=mod._index, columns=mod.state_names)
# Plot these means over time
plot_coefficients_by_equation(states_posterior_mean);
# -
# Python also has a number of libraries to assist with exploring Bayesian models. Here we'll just use the [arviz](https://arviz-devs.github.io/arviz/index.html) package to explore the credible intervals of each of the covariance and variance parameters, although it makes available a much wider set of tools for analysis.
# +
import arviz as az
# Collect the observation error covariance parameters
az_obs_cov = az.convert_to_inference_data({
('Var[%s]' % mod.endog_names[i] if i == j else
'Cov[%s, %s]' % (mod.endog_names[i], mod.endog_names[j])):
store_obs_cov[nburn + 1:, i, j]
for i in range(mod.k_endog) for j in range(i, mod.k_endog)})
# Plot the credible intervals
az.plot_forest(az_obs_cov, figsize=(8, 7));
# +
# Collect the state innovation variance parameters
az_state_cov = az.convert_to_inference_data({
r'$\sigma^2$[%s]' % mod.state_names[i]: store_state_cov[nburn + 1:, i]
for i in range(mod.k_states)})
# Plot the credible intervals
az.plot_forest(az_state_cov, figsize=(8, 7));
# -
# ### Appendix: performance
#
# Finally, we run a few simple tests to compare the performance of the KFS and CFA simulation smoothers by using the `%timeit` Jupyter notebook magic.
#
# One caveat is that the KFS simulation smoother can produce a variety of output beyond just simulations of the posterior state vector, and these additional computations could bias the results. To make the results comparable, we will tell the KFS simulation smoother to only compute simulations of the state by using the `simulation_output` argument.
# +
from statsmodels.tsa.statespace.simulation_smoother import SIMULATION_STATE
sim_cfa = mod.simulation_smoother(method='cfa')
sim_kfs = mod.simulation_smoother(simulation_output=SIMULATION_STATE)
# -
# Then we can use the following code to perform a basic timing exercise:
#
# ```python
# # %timeit -n 10000 -r 3 sim_cfa.simulate()
# # %timeit -n 10000 -r 3 sim_kfs.simulate()
# ```
#
# On the machine this was tested on, this resulted in the following:
#
# ```
# 2.06 ms ± 26.5 µs per loop (mean ± std. dev. of 3 runs, 10000 loops each)
# 2.02 ms ± 68.4 µs per loop (mean ± std. dev. of 3 runs, 10000 loops each)
# ```
# These results suggest that - at least for this model - there are not noticeable computational gains from the CFA approach relative to the KFS approach. However, this does not rule out the following:
#
# 1. The Statsmodels implementation of the CFA simulation smoother could possibly be further optimized
# 2. The CFA approach may only show improvement for certain models (for example with a large number of `endog` variables)
#
# One simple way to take a first pass at assessing the first possibility is to compare the runtime of the Statsmodels implementation of the CFA simulation smoother to the Matlab implementation in the replication codes of Chan and Jeliazkov (2009), available at http://joshuachan.org/code/code_TVPVAR.html.
#
# While the Statsmodels version of the CFA simulation smoother is written in Cython and compiled to C code, the Matlab version takes advantage of the Matlab's sparse matrix capabilities. As a result, even though it is not compiled code, we might expect it to have relatively good performance.
#
# On the machine this was tested on, the Matlab version typically ran the MCMC loop with 11,000 iterations in 70-75 seconds, while the MCMC loop in this notebook using the Statsmodels CFA simulation smoother (see above), also with 11,0000 iterations, ran in 40-45 seconds. This is some evidence that the Statsmodels implementation of the CFA smoother already performs relatively well (although it does not rule out that there are additional gains possible).
# ### Bibliography
#
# Carter, <NAME>., and <NAME>. "On Gibbs sampling for state space models." Biometrika 81, no. 3 (1994): 541-553.
#
# Chan, <NAME>, and <NAME>. "Efficient simulation and integrated likelihood estimation in state space models." International Journal of Mathematical Modelling and Numerical Optimisation 1, no. 1-2 (2009): 101-120.
#
# <NAME>, Piet, and <NAME>. "The simulation smoother for time series models." Biometrika 82, no. 2 (1995): 339-350.
#
# <NAME>, and <NAME>. "A simple and efficient simulation smoother for state space time series analysis." Biometrika 89, no. 3 (2002): 603-616.
#
# McCausland, <NAME>., <NAME>, and <NAME>. "Simulation smoothing for state–space models: A computational efficiency analysis." Computational Statistics & Data Analysis 55, no. 1 (2011): 199-212.
|
examples/notebooks/statespace_tvpvar_mcmc_cfa.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/krakowiakpawel9/data-science-bootcamp/blob/master/02_analiza_danych/06_sql_porownanie.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="B2PJxkMoI22M" colab_type="text"
# * @author: <EMAIL>
# * @site: e-smartdata.org
# + [markdown] id="4bZ3mB0Uazid" colab_type="text"
# ### Pandas
# >Strona biblioteki: [https://pandas.pydata.org/](https://pandas.pydata.org/)
# >Dokumentacja: [https://pandas.pydata.org/pandas-docs/stable/](https://pandas.pydata.org/pandas-docs/stable/)
# >
# >Podstawowa biblioteka do analizy danych w języku Python.
# >
# >Aby zainstalować bibliotekę Pandas użyj polecenia poniżej:
# ```
# pip install pandas
# ```
# ### Spis treści:
# 1. [Import bibliotek](#a1)
# 2. [Wczytanie danych i preprocessing](#a2)
# 3. [Porównanie do języka SQL](#a3)
#
#
#
# + [markdown] id="puqmafbudLxD" colab_type="text"
# ### <a name='a1'></a> Import bibliotek
# + id="AEE5ZeP_mtQo" colab_type="code" outputId="40ab363c-1dec-4abd-fb1f-c64f77465a0a" colab={"base_uri": "https://localhost:8080/", "height": 34}
import numpy as np
import pandas as pd
pd.__version__
# + [markdown] id="DStcVKBtdRaf" colab_type="text"
# ### <a name='a2'></a> Wczytanie danych i preprocessing
# + id="GWwf431ccVzM" colab_type="code" outputId="cab96fc0-c8f8-462d-e40e-dfc126f8ff2e" colab={"base_uri": "https://localhost:8080/", "height": 288}
url = ('https://storage.googleapis.com/esmartdata-courses-files/ds-bootcamp/online_retail.xlsx')
retail_raw = pd.read_excel(url)
retail_raw.head()
# + id="2BSzjIcMn979" colab_type="code" outputId="555a8614-298a-4059-ed03-73c7a004d551" colab={"base_uri": "https://localhost:8080/", "height": 240}
retail = retail_raw.copy()
retail.info()
# + id="FOVmj656oQ3m" colab_type="code" outputId="14d78654-3719-42ac-d6f9-61af91155738" colab={"base_uri": "https://localhost:8080/", "height": 295}
retail.describe()
# + id="qJdiaTX6oY-L" colab_type="code" outputId="9eb04a95-f3c6-4bfb-b8ed-baff60c9b444" colab={"base_uri": "https://localhost:8080/", "height": 171}
retail.isnull().sum()
# + id="KrXKk3c4ohDz" colab_type="code" outputId="5fceb3bd-87d0-4a89-b1b5-898a1eeadf0e" colab={"base_uri": "https://localhost:8080/", "height": 171}
retail = retail.dropna()
retail.isnull().sum()
# + id="DT-xRXaZonjw" colab_type="code" outputId="04624377-e844-4219-ef0d-fb60b0c27060" colab={"base_uri": "https://localhost:8080/", "height": 240}
retail = retail[retail.Quantity >= 0]
retail.info()
# + id="_KMwbyjUo83T" colab_type="code" outputId="db4fa98a-363d-41ae-9770-85911d663036" colab={"base_uri": "https://localhost:8080/", "height": 240}
retail.CustomerID = retail.CustomerID.apply(lambda x: str(int(x)))
retail.info()
# + id="IA1o78VcpNaU" colab_type="code" outputId="df03a1a8-406f-419c-eb74-ad1207cfb5e7" colab={"base_uri": "https://localhost:8080/", "height": 288}
retail.head()
# + [markdown] id="7x5mJHF3damM" colab_type="text"
# ### <a name='a3'></a> Porównanie do języka SQL
# + id="0fADpCB9dE4T" colab_type="code" outputId="8210a1c7-aa96-439a-d557-5ccfced1464c" colab={"base_uri": "https://localhost:8080/", "height": 588}
# SELECT * FROM retial;
retail
# + id="9aIUYKJmfDBw" colab_type="code" outputId="e6838693-64e7-42d8-da42-1614ab903eea" colab={"base_uri": "https://localhost:8080/", "height": 417}
# SELECT Quantity, UnitPrice, CustomerID FROM retial;
retail[['Quantity', 'UnitPrice', 'CustomerID']]
# + id="Oqe7i3XKfJ9a" colab_type="code" outputId="c5b16645-62a5-46e7-c572-eba4de8dcbe6" colab={"base_uri": "https://localhost:8080/", "height": 357}
# SELECT Quantity, UnitPrice, CustomerID FROM retial LIMIT 10;
retail[['Quantity', 'UnitPrice', 'CustomerID']].head(10)
# + id="KEAger9Vqgkv" colab_type="code" outputId="9983340b-c42c-402f-cd0b-68262c0a60be" colab={"base_uri": "https://localhost:8080/", "height": 357}
retail[['Quantity', 'UnitPrice', 'CustomerID']][:10]
# + id="WMFGgvJtfSfE" colab_type="code" outputId="9693986a-8358-4172-fa5c-b776b94339ed" colab={"base_uri": "https://localhost:8080/", "height": 588}
# SELECT * FROM retial WHERE CustomerID='17850';
retail[retail.CustomerID == '17850']
# + id="ld7iAJNOq2qa" colab_type="code" outputId="5605131b-368f-42bc-9510-09b8697d5846" colab={"base_uri": "https://localhost:8080/", "height": 588}
retail.query('CustomerID == "17850"')
# + id="oiV38TuRff7O" colab_type="code" outputId="0705517d-0bf3-4181-c843-abac3beefc76" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# SELECT * FROM retial WHERE CustomerID='17850' and UnitPrice > 5;
retail[(retail.CustomerID == '17850') & (retail.UnitPrice > 5)]
# + id="U7p3tJ9Jf_IJ" colab_type="code" outputId="abcb4001-3c2f-4190-f53b-29b9176727e8" colab={"base_uri": "https://localhost:8080/", "height": 588}
# SELECT * FROM retial WHERE CustomerID='17850' or Country='France';
retail[(retail.CustomerID == '17850') | (retail.Country == 'France')]
# + id="efAp2MF2gMkd" colab_type="code" outputId="01a0c934-a007-47ea-a74d-290a152cd0ae" colab={"base_uri": "https://localhost:8080/", "height": 48}
# SELECT * FROM retail WHERE InvoiceNo is not null;
retail[retail.InvoiceNo.notnull()]
# + id="uv4UJhehgWwt" colab_type="code" outputId="84ff64c6-9642-4906-b856-128694d0b354" colab={"base_uri": "https://localhost:8080/", "height": 240}
# SELECT CustomerID, count(*) FROM retial GROUP BY CustomerID;
retail.groupby('CustomerID').size()
# + id="jvrHhR8svOhv" colab_type="code" outputId="47404483-6746-49f4-a289-35a573232752" colab={"base_uri": "https://localhost:8080/", "height": 96}
retail.head(1)
# + id="tccbs5BIts-p" colab_type="code" outputId="87d4f6d4-28a6-4ef8-f9c3-15155ac49a14" colab={"base_uri": "https://localhost:8080/", "height": 192}
retail['Revenue'] = retail.Quantity * retail.UnitPrice
retail.head(3)
# + id="6YNqg2aEgvHd" colab_type="code" outputId="bcdd3248-02ba-4d4a-f51d-702eae8633dd" colab={"base_uri": "https://localhost:8080/", "height": 448}
# SELECT CustomerID, avg(Revenue), count(*) FROM retial GROUP BY CustomerID;
retail.groupby('CustomerID').aggregate({'Revenue': np.mean, 'CustomerID': np.size})
# + id="XmDr4yfBwfJK" colab_type="code" outputId="cde64871-0af6-43b0-e955-403330319417" colab={"base_uri": "https://localhost:8080/", "height": 448}
retail.groupby('CustomerID').aggregate({'Revenue': np.mean, 'CustomerID': np.size}). \
rename(columns={'Revenue': 'RevenueAverage', 'CustomerID': 'CustomerIDCount'})
# + id="gRSB88RZxX26" colab_type="code" outputId="4c2ace0a-2876-4a8e-decb-cd4ff5d2ac0a" colab={"base_uri": "https://localhost:8080/", "height": 144}
retail.head(2)
# + id="UDIw8SAWxdQQ" colab_type="code" outputId="d9f753c1-0bc7-4e52-c69b-4bd1b516c01a" colab={"base_uri": "https://localhost:8080/", "height": 162}
retail['InvoiceDateDay'] = retail.InvoiceDate.dt.day
retail.head(2)
# + id="jvRwnJByhIic" colab_type="code" outputId="d916328e-0963-4f45-998b-d5e363fc1ee0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# SELECT InvoiceDateDay, sum(Revenue) FROM retail GROUP BY InvoiceDateDay;
results = retail.groupby('InvoiceDateDay').aggregate({'Revenue': np.sum})
results
# + id="3gI7onk8x6-E" colab_type="code" outputId="977ddfcb-89c1-40be-9a61-093b5faa48d9" colab={"base_uri": "https://localhost:8080/", "height": 306}
import seaborn as sns
sns.set()
results.plot(kind='bar')
# + id="63bVyDsIiIMQ" colab_type="code" outputId="30e41123-8639-49a7-9d9f-4e2f9871d3cf" colab={"base_uri": "https://localhost:8080/", "height": 357}
# SELECT * FROM retail ORDER BY Quantity DESC LIMIT 5;
retail.nlargest(n=5, columns='Quantity')
# + id="p0iGWQUsihKb" colab_type="code" outputId="f1566619-ff0b-4e9e-9576-f4b5c0e23be5" colab={"base_uri": "https://localhost:8080/", "height": 306}
# SELECT * FROM retail ORDER BY Quantity LIMIT 5;
retail.nsmallest(n=5, columns='Quantity')
# + id="Qbq7K1k-zWq1" colab_type="code" colab={}
|
02_analiza_danych/06_sql_porownanie.ipynb
|