code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# importing all required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm, skew
from scipy.special import boxcox
import sklearn
from sklearn.model_selection import KFold, train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, Ridge, BayesianRidge
from sklearn.svm import LinearSVR, SVR
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor , RandomForestRegressor, BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import r2_score
from xgboost import XGBRegressor
import warnings
warnings.filterwarnings("ignore")
# -
# ### Data Overview
df = pd.read_csv("CarPrice_Assignment.csv")
df
df.info()
df.shape
df.nunique()
# **The Car_ID here is just representing the data entry serial so we can drop this feature**
#
# **CarName here is showing the Make and then model name , lets seprate the Maker and Model**
df.drop(columns='car_ID', axis = 1, inplace = True)
df['Make'] = df.CarName.str.split(pat = " ", n = 1, expand=True)[0]
df['Model'] = df.CarName.str.split(pat = " ", n = 1, expand=True)[1]
df.drop(columns='CarName', axis = 1, inplace = True)
df.head()
# **Remove the Model column feature**
df.drop(columns='Model', axis = 1, inplace = True)
df.head()
# +
# getting the categorical and numerical features
categorical_features = list(df.select_dtypes(include = object).columns)
numerical_features = list((df.select_dtypes(include = ['float64', 'int64']).columns))
print(f'number of categorical features : {len(categorical_features)} and number of numerical features: {len(numerical_features)}' )
# -
print("Number of unique values in categorical_features are: ")
print(df[categorical_features].nunique(),"\n")
print("Number of unique values in numerical_features are:")
df[numerical_features].nunique()
for features in categorical_features:
print("Value count in ",features,": ")
print(df[features].value_counts(),"\n")
# #### Observations
#
# 1. There are typo errors in Makers names -
# * nissan misspelled as Nissan
# * toyota misspelled as toyouta
# * vokswagen misspelled as vw
# * mazda misspelled as maxda
# * porsche misspelled as porcshce
#
# 2. cylindernumber and doornumber can be expressed as numeric categories ex- change two to numeric 2 etc
#
# Will change these variables
#
df["Make"]=df["Make"].replace({"toyouta":"toyota",
"vokswagen":"volkswagen",
"vw":"volkswagen",
"maxda":"mazda",
"Nissan":"nissan",
"porcshce":"porsche"
})
# +
mapping = {
"two": 0,
"three": 1,
"four": 2,
"five": 3,
"six": 4,
"eight": 5,
"twelve": 4
}
columns_list = ['cylindernumber', 'doornumber']
for columns in columns_list:
df.loc[:, columns] = df[columns].map(mapping)
# -
df.head()
# ### Data Preprocessing and EDA
# +
# Funtion for feature plot
def feature_plot(data, feature):
sns.distplot(data[feature] , fit=norm);
plt.ylabel('Frequency')
plt.title(feature)
fig = plt.figure()
res = stats.probplot(data[feature], plot=plt)
return plt.show()
# -
feature_plot(df, 'price')
# **We Observed that Price data is skewed distribution**
# Lets check with log of price
df.price = np.log(df.price)
feature_plot(df, 'price')
# **this have imroved the distribution of price**
#
# Lets Check distribution of other numerical features
for feature in numerical_features:
feature_plot(df, feature)
# **Symboling is having range from -3 to 3 , lets change this to positive by adding 3**
df['symboling'] = df['symboling']+3
df['symboling'].value_counts()
skewed_features = df[numerical_features].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_features})
skewness.head(10)
# +
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
skewed_features = skewness.index
lam = 0.3
for feature in skewed_features:
df[feature] = boxcox(df[feature], lam)
# -
skewed_features = df[numerical_features].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_features})
skewness.head(10)
# **We Can see there is significant reduction in skewness**
#
# Lets check now Categorical features
#
df[categorical_features].head()
# As we have changes the dorrnumber and cylindernumber, Lets drop them from df
categorical_features.remove('doornumber')
categorical_features.remove('cylindernumber')
df[categorical_features].head()
df = pd.get_dummies(df, columns=categorical_features)
df.head()
# ### 3. Model Building and Evaluation
X = df.drop(['price'], axis=1)
y = df['price']
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
numerical_features= X.select_dtypes(include=['int64','float64']).columns.tolist()
x_train[numerical_features].head(3)
# **Scaling the numerical features**
scaler = StandardScaler()
x_train[numerical_features] = scaler.fit_transform(x_train[numerical_features])
x_test[numerical_features] = scaler.fit_transform(x_test[numerical_features])
# ### Training the model
def train_model(x_train, x_test, y_train, y_test):
models = []
n_folds = 20
models.append(('R', Ridge(random_state=42)))
models.append(('BR', BayesianRidge(n_iter=1000)))
models.append(('KNR', KNeighborsRegressor()))
models.append(('DTR', DecisionTreeRegressor(random_state=42)))
models.append(('SVR', SVR()))
models.append(('ABR', AdaBoostRegressor(n_estimators=300, random_state=42)))
models.append(('BR', BaggingRegressor(n_estimators=300, random_state=42)))
models.append(('GBR', GradientBoostingRegressor(n_estimators=300, random_state=42)))
models.append(('XGB', XGBRegressor(n_estimators=300, random_state=42)))
models.append(('RFR', RandomForestRegressor(n_estimators=300, random_state=42)))
for name, model in models:
kf = KFold(n_folds, shuffle=True, random_state=42)
cv_results = cross_val_score(model, x_train, y_train, cv=kf, scoring='r2')
model = model.fit(x_train, y_train)
m_predict = model.predict(x_test)
m_score = r2_score(np.exp(y_test), np.exp(m_predict))
print("%s: r2_test = %.3f /// r2_train = %.3f" % (name, m_score, cv_results.mean()))
train_model(x_train, x_test, y_train, y_test)
| All_regression_comaprision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import numpy
import matplotlib.pyplot
# %matplotlib inline
data = numpy.loadtxt(fname='data/weather-01.csv', delimiter = ',')
# +
# create a figure to hold subplots
fig = matplotlib.pyplot.figure(figsize= (10.0, 3.0))
#create a placeholder for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis = 0))
subplot2.set_ylabel('min')
subplot2.plot(numpy.min(data, axis = 0))
subplot3.set_ylabel('max')
subplot3.plot(numpy.max(data, axis = 0))
fig.tight_layout()
matplotlib.pyplot.show()
# -
# ## Loops
word = 'notebook'
print (word[4])
for char in word:
print(char)
# ## Get a list of filenames from disk
import glob
print(glob.glob('data/weather*.csv'))
# ### Putting it all together
# +
filenames = sorted(glob.glob('data/weather*.csv'))
#filenames = filenames[0:3]
for f in filenames:
print(f)
data = numpy.loadtxt(fname=f, delimiter=',')
if numpy.max(data, axis=0)[0] == 0 and numpy.max(data, axis=0)[20] == 20:
print ("Suspicious looking maxima")
elif numpy.sum(numpy.min(data, axis=0)) == 0:
print("Minima equal zero!")
else:
print("Data looks ok")
# create a figure to hold subplots
fig = matplotlib.pyplot.figure(figsize= (10.0, 3.0))
#create a placeholder for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis = 0))
subplot2.set_ylabel('min')
subplot2.plot(numpy.min(data, axis = 0))
subplot3.set_ylabel('max')
subplot3.plot(numpy.max(data, axis = 0))
fig.tight_layout()
matplotlib.pyplot.show()
# -
# ## Making decisions
num = 107
if num > 100:
print ('Greater')
else:
print ('Not Greater')
print ('Done')
# +
num = 3
if num > 0:
print(num, "is positive")
elif num == 0:
print(num, "is zero")
else:
print(num, "is negative")
# -
# ## Functions
def fahr_to_kelvin(temp):
return((temp-32)* (5/9) + 273.15)
print(fahr_to_kelvin(110))
def analyse (filename):
""" This function plots the average, minima and maxima for a weather station dataset.
It takes a comma-delimited file and ploats each row.
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
# create a figure to hold subplots
fig = matplotlib.pyplot.figure(figsize= (10.0, 3.0))
#create a placeholder for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis = 0))
subplot2.set_ylabel('min')
subplot2.plot(numpy.min(data, axis = 0))
subplot3.set_ylabel('max')
subplot3.plot(numpy.max(data, axis = 0))
fig.tight_layout()
matplotlib.pyplot.show()
def detect_problems(filename):
"""Sum of our temperature files have problems, check for these
This function reads a comma-delimited file and reports on odd maxima and minia that add up to zero.
This seems to happen when the sensors break.
The function does not return any data.
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
if numpy.max(data, axis=0)[0] == 0 and numpy.max(data, axis=0)[20] == 20:
print ("Suspicious looking maxima")
elif numpy.sum(numpy.min(data, axis=0)) == 0:
print("Minima equal zero!")
else:
print("Data looks ok")
for f in filenames [0:5]:
print(f)
analyse(f)
detect_problems(f)
help(numpy.loadtxt)
help(detect_problems)
help(analyse)
| 02-plots-and-subplots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
print(f"Pandas Version: {pd.__version__}")
import numpy as np
print(f"NumPy Version: {np.__version__}")
from multiprocessing import Pool
import sys
print(f"System Version: {sys.version}")
df = pd.read_csv("County_Mortgage_Funding.csv")
df.drop("Unnamed: 0", axis=1, inplace=True)
def assign_to_centroid(centroid_dictionary, classification_dictionary, data):
"""
Given a dictionary containing the positions of current centroids and a dictionary that will host the classifications of points to a centroid,
this function will assign points to a particular centroid using euclidean distance.
This is the function that is executed in parallel.
"""
for point in data:
# list of length k
norms = [np.linalg.norm(point-centroid_dictionary[centroid]) for centroid in centroid_dictionary]
# index corresponds to cluster #
centroid_assignment = norms.index(min(norms))
classification_dictionary[centroid_assignment].append(point)
return classification_dictionary
def parallel_kmeans(k, data, processors,iterations):
"""
This function will run the k-Means algorithm in parallel.
Inputs:
k = Number of Clusters
data = numpy array
processors = number of cores to use
iterations = total iterations to run if algo does not converge.
Outputs:
merged_classification_dict = A dictionary containing the classification for each point in n-dimensional space.
centroid_dict = A dictionary contains the location of the k-centroids in n-dimensional space.
"""
seed = np.random.RandomState(21) #random seed
# splitting the data according to number of processors requested.
data_split = np.array_split(data, processors)
# this dictionary will store our centroids and their locations in n-dimensional space.
centroid_dict = {}
all_indicies = np.arange(len(data)) #all possible indicies of the data matrix
#pick k random starting indicies for centroids - no replacement, want unique indicies
intitial_centroids_indicies = seed.choice(all_indicies, size=k, replace=False)
#setting k-random rows as initial centroids.
for i in range(k):
row = data[intitial_centroids_indicies[i]]
centroid_dict[i] = row
# this outer loop will ensure we stop after a certain number of iterations if we do not converge.
for iter_ in range(iterations):
classification_dict = {} # will store classifications of each n-dimensional point to a centroid
for i in range(k):
classification_dict[i] = [] #points to be stored in lists according to centroid.
# multi-processing stage
with Pool(processes=processors) as pool: #context manager
process_objects = [pool.apply_async(assign_to_centroid, args=(centroid_dict, classification_dict, data_chunk)) for data_chunk in data_split]
process_results = [proc.get() for proc in process_objects]
# take the first dictionary that contains centroid assignments for points
merged_classification_dict = process_results.pop(0)
# Merging all the other dictionaries with the above dictionary.
for dictionary in process_results:
for key in dictionary:
merged_classification_dict[key].extend(dictionary[key])
old_centroids = dict(centroid_dict) # deep copy and to keep track of old_centroid locations
# Here we are updating the centroid positions
for class_ in merged_classification_dict:
centroid_dict[class_] = np.mean(merged_classification_dict[class_], axis=0)
convergence = True #Assume we have converged!
for centroid in centroid_dict:
previous_position = old_centroids[centroid]
new_position = centroid_dict[centroid]
# Calculated the distortion
distortion = np.linalg.norm(new_position-previous_position)
# if distortion is not 0,then we have not converged to a minimum. We need to keep iterating.
if distortion != 0:
convergence = False
if convergence: # if we converge, break out of the iteration loop
# print(f"Convergence on iteration {iter_}")
break
return merged_classification_dict, centroid_dict
# ## Benchmarks
#
# Note: In terms of performance, using `.apply_async` significantly outperformed `.apply`.
# ### K = 2
# %%timeit
# k=2, 1 processor, max of 1000 iterations
classifications, centroids = parallel_kmeans(2, df.values, 1, 1000)
# %%timeit
# k=2, 2 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(2, df.values, 2, 1000)
# %%timeit
# k=2, 3 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(2, df.values, 3, 1000)
# %%timeit
# k=2, 8 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(2, df.values, 8, 1000)
# ### K = 3
# %%timeit
# k=3, 1 processor, max of 1000 iterations
classifications, centroids = parallel_kmeans(3, df.values, 1, 1000)
# %%timeit
# k=3, 2 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(3, df.values, 2, 1000)
# %%timeit
# k=3, 3 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(3, df.values, 3, 1000)
# %%timeit
# k=3, 8 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(3, df.values, 8, 1000)
# ## Outputs
# k=2, 8 processors, max of 1000 iterations
classifications, centroids = parallel_kmeans(2, df.values, 8, 1000)
classifications.keys() # we have 2 centroids
(len(classifications[0]) + len(classifications[1])) == df.shape[0] # all rows classified to a centroid
centroids # The final centroid positions
| k-Means_Parallelized_GABR_IBRAHIM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 1 - Quantum Tools
#
# * Python and Pip
# * Jupyter
# * Google Colaboratory
# * Binder
# * Qiskit (and its composing parts)
# * Community and VSCode extension
# * IBMQ and IBMQ Experience
#
# # Exercises
# * Installing software
# * Creating IBMQ account
# * Local setup by running [this notebook](exercises/IBMQ_setup.ipynb) on your machine. (You can [clone](https://help.github.com/articles/cloning-a-repository/) or download this repo)
#
# ## Resources
# * [PDF slides](slides.pdf)
# * [slides src](latex/main.tex) Latex files and image resources used in the presentation (useful for PR on slide typos and such)
#
| community/awards/teach_me_quantum_2018/TeachMeQ/Week_1-Quantum_Tools/README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Geometry
# using the RealGeometry class
# +
import os
import sys
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
sys.path.append('..')
# %config InlineBackend.print_figure_kwargs={'bbox_inches':None}
# %load_ext autoreload
# %autoreload 2
# -
from forcing import Forcing
from real_geometry import RealGeometry, glaciers
from ideal_geometry import IdealGeometry, FavierTest
# ## ISOMIP geometry
ds1 = xr.open_dataset('../../data/isomip/Ocean1_input_geom_v1.01.nc')
ds2 = xr.open_dataset('../../data/isomip/Ocean2_input_geom_v1.01.nc')
ds3 = xr.open_dataset('../../data/isomip/Ocean3_input_geom_v1.01.nc')
ds4 = xr.open_dataset('../../data/isomip/Ocean4_input_geom_v1.01.nc')
ds1
plt.figure(figsize=(12,5))
plt.axhline(0,c='grey',lw=.5)
ds1.bedrockTopography.sel(y=40000,method='nearest').plot(c='k')
for i, ds in enumerate([ds1,ds2,ds3.isel(t=0),ds3.isel(t=-1),ds4.isel(t=0),ds4.isel(t=-1)]):
name = ['Ocean1','Ocean2','Ocean3 start','Ocean3 end','Ocean4 start','Ocean4 end'][i]
c = f'C{i}'
ds_ = ds.sel(y=40000,method='nearest')
(ds_.upperSurface+i*20).plot(c=c, label=name)
(ds_.lowerSurface+i*20).plot(c=c)
plt.title('ice shelf centre lines')
plt.text(700000,-150,'different offsets\nfor visual clarity')
plt.ylabel('z [m]')
plt.legend()
ds1.lowerSurface.sel(y=40000,method='nearest').plot()
ds2.lowerSurface.sel(y=40000,method='nearest').plot()
plt.xlim((6.38e5,6.43e5))
ds1.bedrockTopography.sel(x=500000, method='nearest').plot()
ds1_ = ds1.sel(x=slice(4.16e5,6.42e5)).dropna('x')
ds2_ = ds2.sel(x=slice(4.16e5,6.42e5)).dropna('x')
ds1_
ds2_.lowerSurface.plot()
# +
if os.path.exists('../text.nc'):
i:
print('ss')
try:
os.path.exists('../Plume.py')
print('should exist')
except:
print('file does not exist')
# -
os.path.exists('../text.nc')
ds = IdealGeometry('Ocean1').create()
ds
ds.box.plot()
ds.disf.plot()
ds.draft.plot()
ds.dgrl.plot()
# ## idealized test geometries
f, ax = plt.subplots(6,3, figsize=(8,8), sharex=True, sharey=True, constrained_layout=True)
for i in range(3):
if i==2:
cbargs = {}
else:
cbargs = dict(add_colorbar=False)
ax[-1,i].set_xlabel('x [km]')
tg = IdealGeometry(f'test{i+1}').create()
tg = tg.assign_coords({'x':tg.x/1e3, 'y':tg.y/1e3})
tg.draft .plot(ax=ax[0,i], vmin=-1000, vmax=-500 , **cbargs)
tg.mask .plot(ax=ax[1,i] , **cbargs)
tg.alpha .plot(ax=ax[2,i], vmin= 0, vmax=0.006, **cbargs)
tg.dgrl .plot(ax=ax[3,i] , **cbargs)
tg.grl_adv.plot(ax=ax[4,i], vmin=-1000, vmax=-500 , **cbargs)
tg.box .plot(ax=ax[5,i] , **cbargs)
for j in range(5):
ax[j,0].set_ylabel('y [km]')
tg
ds = Forcing(tg).tanh(ztcl=-700,Tdeep=0)
f, ax = plt.subplots(1,2,figsize=(10,3),constrained_layout=True)
ax[0].plot(ds.Tz, ds.z)
ax[0].set_ylim((-1500,0))
ds.Ta.plot(ax=ax[1])
ds_ = Forcing(tg).constant()
f, ax = plt.subplots(1,2,figsize=(10,3),constrained_layout=True)
ax[0].plot(ds_.Tz, ds_.z)
ax[0].set_ylim((-1500,0))
ds_.Ta.plot(ax=ax[1])
# ## geometries and forcing scenarios of Favier et al. (2019)
# +
iceshelves = ['fris', 'totten', 'thwaites', 'test', 'test2']
f, ax = plt.subplots(3, len(iceshelves), figsize=(10,6), sharex=True, sharey=True)
for i, iceshelf in enumerate(iceshelves):
if i==len(iceshelves)-1:
cbarargs = {}
else:
cbarargs = dict(add_colorbar=False)
ds = FavierTest(iceshelf, 'warm0')
ds.draft.plot(ax=ax[0,i], vmin=-1000, vmax=0 , **cbarargs)
ds.Ta .plot(ax=ax[1,i], vmin= -1.2, vmax=1.2 , **cbarargs)
ds.Sa .plot(ax=ax[2,i], vmin= 34 , vmax=34.8, **cbarargs)
ax[0,i].set_title(iceshelf)
# -
# ## realistic geometries
glaciers
ds = xr.open_dataset('../../results/PICOP/Totten_n3_geometry.nc')
# +
da = (ds.draft.rolling(x=5).mean()+ds.draft.rolling(y=5).mean())/2
dx, dy = da.x[1]-da.x[0], da.y[1]-da.y[0]
dxdy = abs((da.y-da.y.shift(y=1))*(da.x-da.x.shift(x=1)))
ip1 = da.shift(x=-1)
im1 = da.shift(x= 1)
jp1 = da.shift(y=-1)
jm1 = da.shift(y= 1)
n1 = np.array([-2*dy*(ip1-im1), -2*dx*(jp1-jm1), 4*dxdy])
n1_norm = np.linalg.norm(n1, axis=0)
F = (ds.grl_adv.rolling(x=5).mean()+ds.grl_adv.rolling(x=5).mean())/2
grad = np.gradient(F, dx.values)
dFdx = xr.DataArray(data=grad[1], dims=da.dims, coords=da.coords)
dFdy = xr.DataArray(data=grad[0], dims=da.dims, coords=da.coords)
n2 = np.array([-dFdy, dFdx, xr.zeros_like(dFdx)])
n2_norm = np.linalg.norm(n2, axis=0)
alpha = abs(np.rad2deg(np.arccos((-dFdy*n1[0]+dFdx*n1[1])/n1_norm/n2_norm))-90)
beta = np.rad2deg(np.arccos(4*dxdy/n1_norm))
# -
del alpha
alpha.plot(vmax=10)
np.deg2rad(alpha.median())
np.gradient(np.array([1,2,3,np.nan,np.nan,4,5,6]))
beta.plot(vmax=10)
(beta-alpha).plot()
# +
ddx = da.copy()
ddy = da.copy()
ddx.data = np.gradient(da)[1]
ddy.data = np.gradient(da)[0]
fig, ax = plt.subplots(1,3, figsize=(12,5))
da.plot(ax=ax[0])
ddx.plot(ax=ax[1])
ddy.plot(ax=ax[2])
# -
plt.imshow(np.gradient(da)[0])
# ## plots
for glacier in ['Amery', 'Totten', 'MoscowUniversity', 'Dotson', 'Thwaites', 'PineIsland']:
RealGeometry(name=glacier).plot_PICO()
for glacier in ['Amery', 'MoscowUniversity', 'Dotson', 'Thwaites', 'Totten', 'PineIsland']:
RealGeometry(name=glacier).plot_PICOP()
| src/notebooks/Geometry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from glob import glob
import os
import torch
import SimpleITK as sitk
from SUMNet_bn import SUMNet
from torchvision import transforms
import torch.nn.functional as F
import cv2
from tqdm import tqdm_notebook as tq
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
seg_model_loadPath = '/home/siplab/rachana/rak/Results/SUMNet_new/Adam_1e-4_ep100_CE+Lov/'
netS = SUMNet(in_ch=1,out_ch=2)
netS.load_state_dict(torch.load(seg_model_loadPath+'sumnet_best.pt'))
netS = netS.cuda()
apply_norm = transforms.Normalize([-460.466],[444.421])
# +
cand_path = "/home/siplab/rachana/rak/dataset/candidates.csv"
b_sz = 8
df_node = pd.read_csv(cand_path)
subset = ['3']#,'5']
running_correct = 0
count = 0
orig_list = []
pred_list = []
for s in subset:
print('Subset:',s)
luna_subset_path = '/home/siplab/rachana/rak/dataset/subset'+str(s)+'/'
all_files = os.listdir(luna_subset_path)
mhd_files = []
for f in all_files:
if '.mhd' in f:
mhd_files.append(f)
count = 0
for m in tq(mhd_files):
mini_df = df_node[df_node["seriesuid"]==m[:-4]]
itk_img = sitk.ReadImage(luna_subset_path+m)
img_array = sitk.GetArrayFromImage(itk_img)
origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
spacing = np.array(itk_img.GetSpacing())
slice_list = []
if len(mini_df)>0:
for i in range(len(mini_df)):
fName = mini_df['seriesuid'].values[i]
z_coord = mini_df['coordZ'].values[i]
orig_class = mini_df['class'].values[i]
pred = 0
v_center =np.rint((z_coord-origin[2])/spacing[2])
img_slice = img_array[int(v_center)]
mid_mean = img_slice[100:400,100:400].mean()
img_slice[img_slice==img_slice.min()] = mid_mean
img_slice[img_slice==img_slice.max()] = mid_mean
img_slice_tensor = torch.from_numpy(img_slice).unsqueeze(0).float()
img_slice_norm = apply_norm(img_slice_tensor).unsqueeze(0)
out = F.softmax(netS(img_slice_norm.cuda()),dim=1)
out_np = np.asarray(out[0,1].squeeze(0).detach().cpu().numpy()*255,dtype=np.uint8)
ret, thresh = cv2.threshold(out_np,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
connectivity = 4
output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
stats = output[2]
temp = stats[1:, cv2.CC_STAT_AREA]
if len(temp)>0:
largest_label = 1 + np.argmax(temp)
areas = stats[1:, cv2.CC_STAT_AREA]
max_area = np.max(areas)
if max_area>150:
pred = 1
if pred == orig_class:
running_correct += 1
pred_list.append(pred)
orig_list.append(orig_class)
count += 1
# -
print('Accuarcy:',(running_correct/count)*100)
from sklearn.metrics import confusion_matrix
cf = confusion_matrix(orig_list, pred_list)
tn, fp, fn, tp = cf.ravel()
cf
sensitivity = tp/(tp+fn)
print('Sensitivity:',sensitivity)
specificity = tn/(tn+fp)
print('Specificity:',specificity)
precision = tp/(tp+fp)
print('Precision:',precision)
| Plots/find_acc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# %load_ext autoreload
# %autoreload 2
# +
# %%time
"""
Creates a corpus from Wikipedia dump file.
Inspired by:
https://github.com/panyang/Wikipedia_Word2vec/blob/master/v1/process_wiki.py
"""
import sys
from gensim.corpora import WikiCorpus
def make_corpus(in_f, out_f):
"""Convert Wikipedia xml dump file to text corpus"""
output = open(out_f, 'w')
wiki = WikiCorpus(in_f)
i = 0
for text in wiki.get_texts():
output.write(bytes(' '.join(text), 'utf-8').decode('utf-8') + '\n')
i = i + 1
if (i % 10000 == 0):
print('Processed ' + str(i) + ' articles')
output.close()
print('Processing complete!')
wiki_file = '/home/jvdzwaan/Downloads/nlwiki-20190201-pages-articles.xml.bz2'
out_file = '/home/jvdzwaan/data/tmp/nlwiki'
make_corpus(wiki_file, out_file)
# +
# %%time
import spacy
from collections import Counter
from nlppln.utils import get_files
in_dir = '/home/jvdzwaan/data/nlwiki-text-1000'
for in_file in get_files(in_dir):
print(in_file)
nlp = spacy.load('nl')
wiki_file = '/home/jvdzwaan/Downloads/nlwiki-20190201-pages-articles.xml.bz2'
wiki = WikiCorpus(wiki_file)
i = 0
for text in wiki.get_texts():
#doc = nlp(text)
i += 1
if (i % 10000 == 0):
print('Processed ' + str(i) + ' articles')
| notebooks/nlwiki_to_text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit
# name: python394jvsc74a57bd063fd5069d213b44bf678585dea6b12cceca9941eaf7f819626cde1f2670de90d
# ---
# Reorder items with sort() or sorted()
# ------------------------------------
avengers = ['ironman', '<NAME>', 'thor', 'captian marvel', 'black panther',]
print(sorted(avengers))
print(avengers)
avengers.sort()
print(avengers)
numbers = [ 7, 3,9,5,6, ]
numbers.sort()
print(numbers)
# ## changing the sort order
#
numbers = [ 7, 3,9,5,6, ]
numbers.sort(reverse=True)
print(numbers)
| May21/lists/listsdemoextended.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Convolutional Dictionary Learning
# =================================
#
# This example demonstrates the use of [dictlrn.cbpdndl.ConvBPDNDictLearn](http://sporco.rtfd.org/en/latest/modules/sporco.dictlrn.cbpdndl.html#sporco.dictlrn.cbpdndl.ConvBPDNDictLearn) for learning a 3D convolutional dictionary from video data. The dictionary learning algorithm is based on the ADMM consensus dictionary update.
# +
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from scipy.ndimage import zoom
import imageio
from sporco.dictlrn import cbpdndl
from sporco import util
from sporco import signal
from sporco import plot
plot.config_notebook_plotting()
# -
# Construct 3D training array from video data
reader = imageio.get_reader('imageio:cockatoo.mp4')
frmlst = []
for i, frm in enumerate(reader):
if i >= 250:
frm = zoom(signal.rgb2gray(frm.astype(np.float32)/255.0), 0.25)
frmlst.append(frm[20:-20, 70:-70])
vid = np.stack(frmlst, axis=2)
# Highpass filter video frames.
npd = 16
fltlmbd = 10
vl, vh = signal.tikhonov_filter(vid, fltlmbd, npd)
# Construct initial dictionary.
np.random.seed(12345)
D0 = np.random.randn(5, 5, 3, 25)
# Set regularization parameter and options for dictionary learning solver.
lmbda = 0.1
opt = cbpdndl.ConvBPDNDictLearn.Options({'Verbose': True, 'MaxMainIter': 200,
'CBPDN': {'rho': 5e1*lmbda, 'AutoRho': {'Enabled': True}},
'CCMOD': {'rho': 1e2, 'AutoRho': {'Enabled': True}}},
dmethod='cns')
# Create solver object and solve.
d = cbpdndl.ConvBPDNDictLearn(D0, vh, lmbda, opt, dimK=0, dimN=3)
D1 = d.solve()
print("ConvBPDNDictLearn solve time: %.2fs" % d.timer.elapsed('solve'))
# Display initial and final dictionaries: central temporal slice
D1 = D1.squeeze()
fig = plot.figure(figsize=(14,7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0[...,2,:]), fig=fig, title='D0')
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1[...,2,:]), fig=fig, title='D1')
fig.show()
# Display initial and final dictionaries: central spatial vertical slice
D1 = D1.squeeze()
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0[2]), fig=fig, title='D0')
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1[2]), fig=fig, title='D1')
fig.show()
# Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
its = d.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.XPrRsdl, its.XDlRsdl, its.DPrRsdl, its.DDlRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['X Primal', 'X Dual', 'D Primal', 'D Dual'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(np.vstack((its.XRho, its.DRho)).T, xlbl='Iterations',
ylbl='Penalty Parameter', ptyp='semilogy',
lgnd=['$\\rho_X$', '$\\rho_D$'], fig=fig)
fig.show()
| cdl/cbpdndl_video.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/morcellinus/Pytorch_NLP/blob/main/Colab%20Output.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="MG-W2Y7sJ1M6"
# # NLTK 패키지
# + colab={"base_uri": "https://localhost:8080/"} id="fL2280YMKgs5" outputId="3fdf33e4-1bfb-4a62-d93e-60af5ed6570f"
import spacy
nlp = spacy.load('en')
text = "Mary, don't slap the green witch"
print([str(token) for token in nlp(text.lower())])
# + colab={"base_uri": "https://localhost:8080/"} id="f3-DffnrK21y" outputId="e9dd4af9-cf75-42a2-c836-7f9842ef60bf"
from nltk.tokenize import TweetTokenizer
tweet = u"Snow White and the Seven Degrees #MakeAMovieCold@midnight:-)" #해쉬태그와 유저태그를 어떻게 토큰화 할 것인가 > TweetTokenizer에 내장된 알고리즘이 있는듯 하다.
tokenizer = TweetTokenizer()
print(tokenizer.tokenize(tweet.lower()))
# + [markdown] id="gYweCEcJLa-w"
# # 유니그램, 바이그램, 트라이그램, ... , n-그램
# + colab={"base_uri": "https://localhost:8080/"} id="Q4Nody58L6H7" outputId="1942894c-9997-463e-878d-fea331a185a0"
def n_grams(text, n):
'''
takes tokens or text, returns a list of n-grams
'''
return [text[i:i+n] for i in range(len(text)-n+1)]
cleaned = ['mary', ',', "n't", 'slap', 'green', 'witch', '.']
print(n_grams(cleaned, 3))
# + [markdown] id="ep4wPC8DMtko"
# # Lemmatizing and Stemming
# + colab={"base_uri": "https://localhost:8080/"} id="UtCpZe_3NK7z" outputId="16ade04e-b99c-409e-a67c-010004b8c801"
nlp = spacy.load('en')
doc = nlp("He was running late")
for token in doc:
print("{} ---> {}".format(token, token.lemma_))
# + [markdown] id="kPwtM0vWNeVP"
# # POS tagging
# + colab={"base_uri": "https://localhost:8080/"} id="APWyLsBNNxQk" outputId="a81c4496-13cf-4912-e5bb-9ca9483ddb98"
nlp = spacy.load('en')
doc = nlp("Mary slapped the green witch.")
for token in doc:
print("{} ---> {}".format(token, token.pos_))
# + [markdown] id="u0QeidR3OCeG"
# # 청크 나누기
# + colab={"base_uri": "https://localhost:8080/"} id="OLM02rB6P1mY" outputId="a4e2e439-7ac5-4269-cd7f-1be5189c71c2"
# 여러 토큰으로 구분되는 텍스트 구에 레이블을 할당
# NP(명사구), VP(동사구) 등
nlp = spacy.load('en')
doc = nlp("Mary slapped the green witch.")
for chunk in doc.noun_chunks:
print("{} ---> {}".format(chunk, chunk.label_))
| Pytorch3_NLP_Basic/Colab Output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Labeling: Excess Return Over Median
#
# 
# _*Fig. 1:*_ Distribution of excess over median return for 22 stock tickers from period between Jan 2019 and May 2020.
#
# ## Abstract
#
# In this notebook, we demonstrate labeling financial returns data according to excess over median. Using cross-sectional data on returns of many different stocks, each observation is labeled according to whether (or how much) its return exceeds the median return. Correlations can then be found between features and the likelihood that a stock will outperform the market.
#
# ## Introduction
# This technique is used in the following paper:
# ["The benefits of tree-based models for stock selection"](https://link.springer.com/article/10.1057/jam.2012.17) by _Zhu et al._ (2012).
#
# In that paper, independent composite features are constructed as weighted averages of various parameters in fundamental and quantitative analysis, such as PE ratio, corporate cash flows, debt etc. The composite features are applied as parameters in linear regression or a decision tree to predict whether a stock will outperform the market median return.
#
#
# ## How it works
#
# A dataframe containing forward total stock returns is calculated from close prices. The median return of all companies at time $t$ in the dataframe is used to represent the market return, and excess returns are calculated by subtracting the median return from each stock's return over the time period $t$ \[Zhu et al. 2012\]. The numerical returns over median can then be used as is (for regression analysis), or can be relabeled simply to its sign (for classification analysis).
#
# At time $t$:
#
# $$P_t = \{p_{t,0}, p_{t,1}, ..., p_{t,n}\}$$
# $$m_t = median(P_t)$$
# $$L(P_t) = \{p_{t,0} - m_t, p_{t,1} - m_t, ...,p_{t,n} - m_t\}$$
#
# If categorical rather than numerical labels are desired:
#
# $$
# \begin{equation}
# \begin{split}
# L(p_{t,n}) = \begin{cases}
# -1 &\ \text{if} \ \ p_{t,n} - m_t < 0\\
# 0 &\ \text{if} \ \ p_{t,n} - m_t = 0\\
# 1 &\ \text{if} \ \ p_{t,n} - m_t > 0\\
# \end{cases}
# \end{split}
# \end{equation}
# $$
#
# ---
# ## Examples of use
# +
import numpy as np
import pandas as pd
import yfinance as yf
from mlfinlab.labeling import excess_over_median
import matplotlib.pyplot as plt
# +
# Load price data for 22 stocks
tickers = "AAPL MSFT COST PFE SYY F GE BABA AMD CCL ZM FB WFC JPM NVDA CVX AAL UBER C UA VZ NOK"
data = yf.download(tickers, start="2019-01-20", end="2020-05-25",
group_by="ticker")
data = data.loc[:, (slice(None), 'Adj Close')]
data.columns = data.columns.droplevel(1)
data.head()
# -
# We find the excess return over median for all tickers in the time period, calculate the mean and standard deviation of returns, and plot the distribution.
excess1 = excess_over_median(data)
excess1.head()
# We can visualize the distribution as a histogram.
s2 = pd.Series(excess1.iloc[:-1, :].values.flatten())
ax2 = s2.plot.hist(bins=50)
ax2.set_xlim(-0.2,0.2)
ax2.set_xlabel('Return Over Median')
ax2.set_title('Distribution of Return Over Median for 22 Stocks')
plt.savefig('distribution_over_median.png')
# Instead of returning the numerical value of excess return over median, we can also simply return the sign. Using categorical rather than numerical labels alleviates problems that can arise due to extreme outlier returns [Zhu et al. 2012].
excess2 = excess_over_median(data, binary=True)
excess2.head()
# We can verify that the number of positive labels matches the number of negative labels. Note: for larger data sets, there is increased chance that some tickers will have the exact same return for a given time index. If that return is also equal to the the median, the number of positive labels may not match exactly with the number of negatives, but should be very close.
excess2.stack().value_counts()
# ---
# ## Conclusion
# This notebook presents the method to label data according to excess return over median. This method can return either numerical or categorical labels for observations. Zhu et al. utilize these labels to predict monthly stock returns using linear regression and decision trees based on composite features as independent variables. In this process:
# - Forward rates of return for assets are calculated for the entire selection of stocks indexed by time bars.
# - At each time index, the median rate of return for all stocks is calculated. The median is subtracted from each stock's return to find the excess return over median.
# - If categorical labels are desired, the excess returns are converted to their signs.
#
# This method is useful for labelling data used for training regression models and decision trees. Zhu et al. found that decision trees were slightly better at predicting outperformers than linear regression.
# ## References
# 1. <NAME>., <NAME>. and <NAME>., 2012. The benefits of tree-based models for stock selection. Journal of Asset Management, [online] 13(6), pp.437-448. Available at: <https://link.springer.com/article/10.1057/jam.2012.17>.
| jupyter-notebooks/hudson-and-thames-quant/Labelling/Labels Excess Over Median/excess_over_median.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reference
#
# This example is taken from the book [DL with Python](https://www.manning.com/books/deep-learning-with-python) by <NAME>.
#
# All the notebooks from the book are available for free on [Github](https://github.com/fchollet/deep-learning-with-python-notebooks)
#
# If you like to run the example locally follow the instructions provided on [Keras website](https://keras.io/#installation)
#
# ---
import keras
keras.__version__
# # One-hot encoding of words or characters
#
# This notebook contains the first code sample found in Chapter 6, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
#
# ----
#
# One-hot encoding is the most common, most basic way to turn a token into a vector. You already saw it in action in our initial IMDB and
# Reuters examples from chapter 3 (done with words, in our case). It consists in associating a unique integer index to every word, then
# turning this integer index i into a binary vector of size N, the size of the vocabulary, that would be all-zeros except for the i-th
# entry, which would be 1.
#
# Of course, one-hot encoding can be done at the character level as well. To unambiguously drive home what one-hot encoding is and how to
# implement it, here are two toy examples of one-hot encoding: one for words, the other for characters.
#
#
# Word level one-hot encoding (toy example):
# +
import numpy as np
# This is our initial data; one entry per "sample"
# (in this toy example, a "sample" is just a sentence, but
# it could be an entire document).
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
# First, build an index of all tokens in the data.
token_index = {}
for sample in samples:
# We simply tokenize the samples via the `split` method.
# in real life, we would also strip punctuation and special characters
# from the samples.
for word in sample.split():
if word not in token_index:
# Assign a unique index to each unique word
token_index[word] = len(token_index) + 1
# Note that we don't attribute index 0 to anything.
print(str(token_index)+'\n')
# Next, we vectorize our samples.
# We will only consider the first `max_length` words in each sample.
max_length = 10
# This is where we store our results:
results = np.zeros((len(samples), max_length, max(token_index.values()) + 1))
print(str(results.shape)+'\n')
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:max_length]:
index = token_index.get(word)
results[i, j, index] = 1.
print(results)
# -
# Character level one-hot encoding (toy example)
# +
import string
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
characters = string.printable # All printable ASCII characters.
token_index = dict(zip(characters, range(1, len(characters) + 1)))
print(str(token_index)+'\n')
max_length = 50
results = np.zeros((len(samples), max_length, max(token_index.values()) + 1))
for i, sample in enumerate(samples):
for j, character in enumerate(sample[:max_length]):
index = token_index.get(character)
results[i, j, index] = 1.
print(results)
# -
# Note that Keras has built-in utilities for doing one-hot encoding text at the word level or character level, starting from raw text data.
# This is what you should actually be using, as it will take care of a number of important features, such as stripping special characters
# from strings, or only taking into the top N most common words in your dataset (a common restriction to avoid dealing with very large input
# vector spaces).
# Using Keras for word-level one-hot encoding:
# +
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
# We create a tokenizer, configured to only take
# into account the top-1000 most common words
tokenizer = Tokenizer(num_words=1000)
# This builds the word index
tokenizer.fit_on_texts(samples)
# This turns strings into lists of integer indices.
sequences = tokenizer.texts_to_sequences(samples)
# You could also directly get the one-hot binary representations.
# Note that other vectorization modes than one-hot encoding are supported!
one_hot_results = tokenizer.texts_to_matrix(samples, mode='binary')
# This is how you can recover the word index that was computed
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
# -
#
# A variant of one-hot encoding is the so-called "one-hot hashing trick", which can be used when the number of unique tokens in your
# vocabulary is too large to handle explicitly. Instead of explicitly assigning an index to each word and keeping a reference of these
# indices in a dictionary, one may hash words into vectors of fixed size. This is typically done with a very lightweight hashing function.
# The main advantage of this method is that it does away with maintaining an explicit word index, which
# saves memory and allows online encoding of the data (starting to generate token vectors right away, before having seen all of the available
# data). The one drawback of this method is that it is susceptible to "hash collisions": two different words may end up with the same hash,
# and subsequently any machine learning model looking at these hashes won't be able to tell the difference between these words. The likelihood
# of hash collisions decreases when the dimensionality of the hashing space is much larger than the total number of unique tokens being hashed.
# Word-level one-hot encoding with hashing trick (toy example):
# +
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
# We will store our words as vectors of size 1000.
# Note that if you have close to 1000 words (or more)
# you will start seeing many hash collisions, which
# will decrease the accuracy of this encoding method.
dimensionality = 1000
max_length = 10
results = np.zeros((len(samples), max_length, dimensionality))
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:max_length]:
# Hash the word into a "random" integer index
# that is between 0 and 1000
index = abs(hash(word)) % dimensionality
results[i, j, index] = 1.
# -
| samples/12-one-hot-encoding-of-words-or-characters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/projjal1/Machine-Learning-Sklearn/blob/main/Movie_Recommender_System.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9L_9-Zd3zJHu"
# Working on Movie Recommender System Task.
#
# Dataset taken from https://www.kaggle.com/rounakbanik/the-movies-dataset?select=movies_metadata.csv
# + id="t8eSH7C0zWrU"
#Loading the modules
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import difflib
# + id="PsI6gpgF1VLR"
#Set dataset path
DATASET_PATH="https://raw.githubusercontent.com/shubham1710/Movie-Recommendation-System/master/moviedata.csv"
# + id="z2KLrCvR2U00"
#Load dataset
movie = pd.read_csv(DATASET_PATH)
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="MyLxeQl12eQa" outputId="1c8d5e2a-3953-4a4b-da09-435e57701052"
#Let's look at few rows
movie.head(4)
# + colab={"base_uri": "https://localhost:8080/"} id="mCl4-6a621-7" outputId="a9302dd0-9983-4454-ac3d-75c2ab1b87f0"
#Lets look at null columns numbers
movie.isnull().sum()
# + id="VYzsGAPm2kNa"
#Fill Null Attribute Columns
features = ['keywords','cast','genres','director','tagline']
for feature in features:
movie[feature] = movie[feature].fillna('')
# + colab={"base_uri": "https://localhost:8080/"} id="IKtnaOJM2zeb" outputId="0ea8c422-d69e-4c7a-c2c7-e9e1a5b01883"
#Let's now look at null columns number
movie.isnull().sum()
# + id="ztKBVwjc1gmO"
#Combination of features
def combine_features(row):
try:
return row['keywords'] +" "+row['cast']+" "+row['genres']+" "+row['director']+" "+row['tagline']
except:
print ("Error:", row)
movie["combined_features"] = movie.apply(combine_features,axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="oOdFRwRF3OqD" outputId="7c3f9f32-805c-46b0-8f43-7f0af6c1114f"
#Look at dataset
movie.head(4)
# + id="uM_Y-laj3Z-z"
#Creating API calls to extract features
def title_from_index(index):
return movie[movie.index == index]["title"].values[0]
def index_from_title(title):
title_list = movie['title'].tolist()
common = difflib.get_close_matches(title, title_list, 1)
titlesim = common[0]
return movie[movie.title == titlesim]["index"].values[0]
# + colab={"base_uri": "https://localhost:8080/"} id="VdbhHM3Y3ktz" outputId="f0fbaba9-f403-4092-97bc-d10af10a8ceb"
#Using CountVectorizer to find similarity measures
cv = CountVectorizer()
count_matrix = cv.fit_transform(movie["combined_features"])
cosine_sim = cosine_similarity(count_matrix)
user_movie = input("Enter movie of your choice:\t")
movie_index = index_from_title(user_movie)
similar_movies = list(enumerate(cosine_sim[movie_index]))
similar_movies_sorted = sorted(similar_movies,key=lambda x:x[1],reverse=True)
i=0
print("\nOther movies you might be interested in:-\n")
for rec_movie in similar_movies_sorted:
if(i!=0):
print (i,") ",title_from_index(rec_movie[0]),sep="")
i=i+1
if i>50:
break
| Movie_Recommender_System.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="MJtspJSwiosZ"
# ## Connecting Your PostgreSQL to a SQL Table and Performing Some Queries
#
# Exploring two SQL Table for Analytics;
#
#
# 1. mailing_list
# 2. products1
#
#
# + id="m6_9DSK8hVxi"
Server [localhost]:
Database [postgres]:
Port [5432]:
Username [postgres]:
Password for user postgres:
psql (13.1)
postgres=# \c
You are now connected to database "postgres" as user "postgres".
#Connect to our working DB- reispartech
postgres=# \c reispartech (our DB)
You are now connected to database "reispartech" as user "postgres".
#import our SQL Table (mailing_list) into PostgreSQL for Analytics
reispartech=# \i C:/Users/Reispar_Tolulade/Downloads/mailing_list.sql
CREATE TABLE
INSERT 0 1
INSERT 0 1
INSERT 0 1....
reispartech=#
reispartech=# \d
List of relations
Schema | Name | Type | Owner
--------+---------------------+----------+----------
#import our SQL Table (products1) into PostgreSQL for Analytics
reispartech=# \i C:/Users/Reispar_Tolulade/Downloads/products1.sql
CREATE TABLE
INSERT 0 1
INSERT 0 1...
reispartech=#
reispartech=# \d
List of relations
Schema | Name | Type | Owner
--------+---------------------+----------+----------
(6 rows)
# Let's explore our imported SQL Table columns and Data Characters with constraints
reispartech=# \d products1
Table "public.products1"
Column | Type | Collation | Nullable | Default
--------------------+-----------------------+-----------+----------+---------------------------------------
id | bigint | | not null | nextval('products1_id_seq'::regclass)
product | character varying(50) | | not null |
manufacturing_cost | numeric(6,2) | | |
created_at | date | | |
Indexes:
"products1_pkey" PRIMARY KEY, btree (id)
#Using the SELECT command, Let's Query our SQL Table (products1)
reispartech=# SELECT*FROM products1;
id | product | manufacturing_cost | created_at
-----+------------------------------------+--------------------+------------
1 | Milk - Condensed | 266.83 | 2019-12-03
2 | Pasta - Shells, Medium, Dry | 841.98 | 2018-09-16
3 | Chips Potato Swt Chilli Sour | 930.60 | 2020-07-15
4 | Bread - Bistro Sour | 697.19 | 2018-05-17
5 | Wine - Valpolicella Masi | 981.62 | 2020-12-11
6 | Beans - Turtle, Black, Dry | 526.63 | 2019-01-10
7 | Sage Ground Wiberg | 733.09 | 2019-09-16
8 | Pepper - Roasted Red | 492.17 | 2021-01-22
9 | Bread Crumbs - Japanese Style | 149.15 | 2018-12-28
10 | Tortillas - Flour, 10 | 180.17 | 2019-07-23
11 | Wine - Rubyport | 415.51 | 2018-10-21
12 | Capon - Breast, Double, Wing On | 578.45 | 2019-02-20
13 | Otomegusa <NAME> | 594.92 | 2020-05-05
14 | Crab Brie In Phyllo | 232.80 | 2020-05-19
15 | Skirt - 24 Foot | 712.05 | 2018-12-22
16 | Wine - Champagne Brut Veuve | 214.41 | 2021-01-09
17 | Muffin Mix - Chocolate Chip | 619.62 | 2019-12-25
18 | Wine - Ruffino Chianti Classico | 387.55 | 2019-06-02
19 | Bagelers | 993.76 | 2018-06-19
20 | Chicken - Whole | 520.40 | 2020-03-29
21 | Scallops - U - 10 | 943.74 | 2019-10-30
22 | Wine - Baron De Rothschild | 658.67 | 2018-08-21
23 | Cake - Cake Sheet Macaroon | 770.26 | 2019-12-13
24 | Red Currants | 907.84 | 2021-05-07
25 | Egg - Salad Premix | 276.76 | 2019-12-30
26 | Beans - Soya Bean | 804.32 | 2020-05-12
27 | Wine - Cotes Du Rhone | 438.43 | 2019-02-22
28 | Foie Gras | 126.70 | 2018-07-01
29 | Cheese - Swiss Sliced | 925.18 | 2018-08-13
30 | Wine - Pinot Noir Pond Haddock | 820.50 | 2018-12-10
31 | Scallops - U - 10 | 494.50 | 2019-03-31
32 | Wine - Black Tower Qr | 696.51 | 2018-07-29
33 | Sugar - Brown | 418.89 | 2019-08-28
34 | Ice Cream - Fudge Bars | 999.12 | 2020-09-04
35 | Sauce - Soya, Light | 993.04 | 2019-04-19
36 | Oil - Canola | 709.64 | 2021-05-03
37 | Soup - Base Broth Chix | 161.24 | 2019-08-18
38 | Flour - Strong Pizza | 766.65 | 2021-02-27
39 | Island Oasis - Cappucino Mix | 548.95 | 2019-06-29
40 | Bagel - Everything Presliced | 284.41 | 2020-12-07
-- More --
# Narrow down on the unique locations in our SQL mailing_list Table column
reispartech=# SELECT distinct(location) from mailing_list;
location
------------------------
Burkina Faso
Bangladesh
Indonesia
Italy
Venezuela
Uruguay
Czech Republic
Sweden
United Kingdom
Uganda
Jordan
Dominican Republic
Cambodia
Germany
Macedonia
Papua New Guinea
Canada
Sri Lanka
Uzbekistan
Finland
Portugal
Colombia
Albania
Saudi Arabia
Ukraine
Argentina
-- More --
## Identify Customer names with small letter 'y' as last Letter in First_name column
reispartech=# SELECT * from mailing_list where first_name like '%y'; # (doesnt take Capital 'Y' in the name)
id | first_name | location | email | company | created_at
-----+------------+---------------+----------------------------------+---------------+------------
14 | Camey | Kenya | <EMAIL> | Linkbuzz | 2019-01-31
34 | Bendicty | United States | | | 2018-12-19
48 | Tabby | Russia | <EMAIL> | Edgewire | 2020-12-21
51 | Bogey | Greece | <EMAIL> | Topicware | 2019-09-15
60 | Alley | <NAME> | | Katz | 2021-02-02
66 | Audrey | <NAME> | <EMAIL> | Rooxo | 2019-12-19
67 | Lindsay | Russia | <EMAIL> | Rhybox | 2021-03-04
73 | Geoffry | Honduras | <EMAIL> | Katz | 2019-08-11
77 | Gradey | Russia | <EMAIL> | Livefish | 2020-07-16
85 | Early | Indonesia | | Wikizz | 2020-04-12
100 | Cathy | United States | <EMAIL> | Zoonder | 2019-04-19
108 | Lyndy | China | <EMAIL> | Minyx | 2019-02-24
130 | Tanny | Brazil | | Aimbo | 2020-04-29
135 | Ivy | Russia | <EMAIL> | Thoughtstorm | 2019-11-22
165 | Rhody | France | <EMAIL> | Skippad | 2020-08-21
171 | Kippy | Colombia | <EMAIL> | Dynabox | 2018-08-28
177 | Gabby | Japan | <EMAIL> | Wikibox | 2018-07-29
178 | Audy | South Africa | <EMAIL> | | 2018-12-04
193 | Cally | Zambia | <EMAIL> | | 2018-05-27
196 | Kayley | France | <EMAIL> | Brightbean | 2019-10-19
205 | Ginny | Germany | <EMAIL> | | 2019-04-20
214 | Ky | Bhutan | <EMAIL> | Jaloo | 2020-07-21
227 | Cordy | Moldova | <EMAIL> | Gabvine | 2019-05-27
234 | Amity | Russia | | Demivee | 2020-06-18
236 | Sly | Reunion | <EMAIL> | Gabtype | 2020-01-21
248 | Howey | China | <EMAIL> | Flashset | 2019-06-02
## Identify Customer names where three (3) letters preceeds small letter 'y' in First_name column
reispartech=# SELECT * from mailing_list where first_name like '___y';
id | first_name | location | email | company | created_at
-----+------------+--------------+----------------------------+-----------+------------
178 | Audy | South Africa | <EMAIL> | | 2018-12-04
364 | Judy | Afghanistan | <EMAIL> | Reallinks | 2021-02-14
424 | Addy | Venezuela | <EMAIL> | | 2019-08-21
425 | Ikey | Japan | <EMAIL> | Gigabox | 2019-05-11
(4 rows)
## Identify Customer names where letter 'y' appears anywhere in First_name column () disregarding case sentivity.
reispartech=# SELECT * from mailing_list where first_name like '%y%'; (any where the small letter y appears in the customer name)
id | first_name | location | email | company | created_at
-----+------------+---------------------+------------------------------------+---------------+------------
14 | Camey | Kenya | <EMAIL> | Linkbuzz | 2019-01-31
30 | Raymund | Mauritius | <EMAIL> | Thoughtworks | 2020-04-30
34 | Bendicty | United States | | | 2018-12-19
36 | Lloyd | Czech Republic | | | 2018-06-19
46 | Lyell | Indonesia | <EMAIL> | Edgetag | 2018-08-12
48 | Tabby | Russia | <EMAIL> | Edgewire | 2020-12-21
51 | Bogey | Greece | <EMAIL> | Topicware | 2019-09-15
60 | Alley | <NAME> | | Katz | 2021-02-02
66 | Audrey | <NAME> | <EMAIL> | Rooxo | 2019-12-19
67 | Lindsay | Russia | <EMAIL> | Rhybox | 2021-03-04
73 | Geoffry | Honduras | <EMAIL> | Katz | 2019-08-11
77 | Gradey | Russia | <EMAIL> | Livefish | 2020-07-16
79 | Royall | Japan | <EMAIL> | Quatz | 2020-08-14
85 | Early | Indonesia | | Wikizz | 2020-04-12
100 | Cathy | United States | <EMAIL> | Zoonder | 2019-04-19
105 | Lynelle | Poland | <EMAIL> | Wordpedia | 2020-04-22
108 | Lyndy | China | <EMAIL> | Minyx | 2019-02-24
130 | Tanny | Brazil | | Aimbo | 2020-04-29
131 | Royall | Sweden | <EMAIL> | Latz | 2019-07-10
135 | Ivy | Russia | <EMAIL> | Thoughtstorm | 2019-11-22
139 | Elvyn | Bulgaria | <EMAIL> | Skyvu | 2019-03-24
153 | Sayre | France | <EMAIL> | Fiveclub | 2019-01-18
165 | Rhody | France | <EMAIL> | Skippad | 2020-08-21
171 | Kippy | Colombia | <EMAIL> | Dynabox | 2018-08-28
177 | Gabby | Japan | <EMAIL> | Wikibox | 2018-07-29
178 | Audy | South Africa | <EMAIL> | | 2018-12-04
-- More --
# Ignore the Case Sentivity for letter y using ilike
reispartech=# SELECT * from mailing_list where first_name ilike '%y';
id | first_name | location | email | company | created_at
-----+------------+---------------+----------------------------------+---------------+------------
14 | Camey | Kenya | <EMAIL> | Linkbuzz | 2019-01-31
34 | Bendicty | United States | | | 2018-12-19
48 | Tabby | Russia | <EMAIL> | Edgewire | 2020-12-21
51 | Bogey | Greece | <EMAIL> | Topicware | 2019-09-15
60 | Alley | <NAME> | | Katz | 2021-02-02
66 | Audrey | <NAME> | <EMAIL> | Rooxo | 2019-12-19
67 | Lindsay | Russia | <EMAIL> | Rhybox | 2021-03-04
73 | Geoffry | Honduras | <EMAIL> | Katz | 2019-08-11
77 | Gradey | Russia | <EMAIL> | Livefish | 2020-07-16
85 | Early | Indonesia | | Wikizz | 2020-04-12
100 | Cathy | United States | <EMAIL> | Zoonder | 2019-04-19
108 | Lyndy | China | <EMAIL> | Minyx | 2019-02-24
130 | Tanny | Brazil | | Aimbo | 2020-04-29
135 | Ivy | Russia | <EMAIL> | Thoughtstorm | 2019-11-22
165 | Rhody | France | <EMAIL> | Skippad | 2020-08-21
171 | Kippy | Colombia | <EMAIL> | Dynabox | 2018-08-28
177 | Gabby | Japan | <EMAIL> | Wikibox | 2018-07-29
178 | Audy | South Africa | <EMAIL> | | 2018-12-04
193 | Cally | Zambia | <EMAIL> | | 2018-05-27
196 | Kayley | France | <EMAIL> | Brightbean | 2019-10-19
205 | Ginny | Germany | <EMAIL> | | 2019-04-20
214 | Ky | Bhutan | <EMAIL> | Jaloo | 2020-07-21
227 | Cordy | Moldova | <EMAIL> | Gabvine | 2019-05-27
234 | Amity | Russia | | Demivee | 2020-06-18
236 | Sly | Reunion | <EMAIL> | Gabtype | 2020-01-21
248 | Howey | China | <EMAIL> | Flashset | 2019-06-02
253 | Chrissy | Iran | <EMAIL> | Voomm | 2020-02-14
267 | Audrey | Bolivia | | Brainverse | 2018-05-23
278 | Sonny | Yemen | <EMAIL> | | 2020-10-21
279 | Torey | China | <EMAIL> | Nlounge | 2020-06-28
reispartech=# SELECT * from mailing_list where location ilike 'c___'; (3 dash)
id | first_name | location | email | company | created_at
-----+------------+----------+---------------------+---------+------------
245 | Meghann | Cuba | | | 2021-01-02
397 | Parsifal | Cuba | <EMAIL> | Demizz | 2020-11-22
#divide rows into groups in the mailing_list TABLE -using Group By Clause
reispartech=# SELECT location from mailing_list group by location;
location
------------------------
Burkina Faso
Bangladesh
Indonesia
Italy
Venezuela
Uruguay
Czech Republic
Sweden
United Kingdom
Uganda
Jordan
Dominican Republic
Cambodia
Germany
Macedonia
Papua New Guinea
Canada
Sri Lanka
Uzbekistan
Finland
Portugal
Colombia
Albania
Saudi Arabia
Ukraine
Argentina
Cuba
Latvia
North Korea
Azerbaijan
-- More --
reispartech=# SELECT first_name, location from mailing_list group by first_name, location;
first_name | location
--------------+------------------------
Astrid | Madagascar
Dania | Indonesia
Ladonna | Philippines
Tabina | France
Marlon | Indonesia
Amity | Russia
Kathlin | Russia
Lockwood | Norway
Audrey | Bolivia
Winona | Indonesia
Dru | Russia
Brendan | Venezuela
Camey | Kenya
Danette | Nigeria
Bartholemy | Cambodia
Nestor | Philippines
Brinna | Portugal
Denys | Slovenia
Humfried | Indonesia
Chandra | Russia
Wilhelm | Japan
Ranique | China
Haleigh | Palestinian Territory
Sayre | France
Zelig | China
Anatole | Poland
Adelaide | China
Duane | China
Boone | Indonesia
Pablo | Russia
reispartech=# SELECT distinct(location) from mailing_list group by location;
location
------------------------
Burkina Faso
Indonesia
Bangladesh
Italy
Venezuela
Uruguay
Czech Republic
Sweden
United Kingdom
Uganda
Jordan
Dominican Republic
Germany
Cambodia
Macedonia
Papua New Guinea
Sri Lanka
Canada
Uzbekistan
Portugal
Finland
Colombia
Albania
Ukraine
Saudi Arabia
Argentina
Cuba
Latvia
North Korea
Slovenia
Azerbaijan
Greece
Egypt
Afghanistan
-- More --
#Using the aggregate funtion COUNT, match the number of rows in Location that matches the Condition Query
reispartech=# SELECT location, count(*) from mailing_list group by location;
location | count
------------------------+-------
Burkina Faso | 1
Bangladesh | 1
Indonesia | 50
Italy | 1
Venezuela | 4
Uruguay | 1
Czech Republic | 6
Sweden | 20
United Kingdom | 1
Uganda | 2
Jordan | 1
Dominican Republic | 3
Cambodia | 1
Germany | 1
Macedonia | 2
Papua New Guinea | 1
Canada | 8
Sri Lanka | 4
Uzbekistan | 1
Finland | 2
Portugal | 11
Colombia | 10
Albania | 7
Saudi Arabia | 1
Ukraine | 4
Argentina | 3
Cuba | 2
Latvia | 2
North Korea | 1
Azerbaijan | 1
-- More --
#Identify Locations that appear above four (4) times
HAVING- used after group by or before the order by
reispartech=# SELECT location, count(first_name) from mailing_list
reispartech-# group by location
reispartech-# having count(*) > 4
reispartech-# order by location;
location | count
----------------+-------
Albania | 7
Brazil | 13
Canada | 8
China | 84
Colombia | 10
Czech Republic | 6
France | 15
Greece | 9
Indonesia | 50
Japan | 10
Mexico | 6
Peru | 7
Philippines | 25
Poland | 15
Portugal | 11
Russia | 38
Slovenia | 7
Sweden | 20
Thailand | 7
United States | 14
Vietnam | 6
(21 rows)
#Identify countries with Letter y in it
reispartech=# SELECT location from mailing_list group by location having location like '%y';
location
-----------------------
Palestinian Territory
Italy
Germany
Uruguay
Norway
(5 rows)
#In the products1 SQL Table, what is the least manufacturing cost?
reispartech=# SELECT MIN(manufacturing_cost) from products1;
min
--------
108.71
(1 row)
#In the products1 SQL Table, what is the highest manufacturing cost?
reispartech=# SELECT MAX(manufacturing_cost) from products1;
max
--------
999.12
(1 row)
reispartech=# SELECT product, MAX(manufacturing_cost) from products1 group by product;
product | max
------------------------------------+--------
Cup - 3.5oz, Foam | 384.47
Pails With Lids | 957.43
Knife Plastic - White | 305.38
Chicken - Soup Base | 236.16
Potatoes - Fingerling 4 Oz | 585.45
Wine - Touraine Azay - Le - Rideau | 521.15
Island Oasis - Cappucino Mix | 548.95
Vinegar - Raspberry | 970.28
Potatoes - Mini Red | 834.36
Cilantro / Coriander - Fresh | 614.64
Lamb - Leg, Diced | 297.88
Pork - Smoked Back Bacon | 151.29
Cookies Almond Hazelnut | 986.93
Oil - Peanut | 946.51
Tea - Herbal Orange Spice | 682.43
Beef Tenderloin Aaa | 875.89
Pasta - Rotini, Colour, Dry | 455.69
Veal - Inside | 634.96
Nut - Almond, Blanched, Whole | 634.55
Yoplait - Strawbrasp Peac | 588.61
Sauce - Oyster | 529.73
Bread - Kimel Stick Poly | 588.82
Nantucket Apple Juice | 916.16
Cheese - Fontina | 912.22
Tomatoes | 926.60
Cheese - Mozzarella, Shredded | 870.96
Garbage Bag - Clear | 990.83
Ham - Cooked Bayonne Tinned | 284.96
Plaintain | 182.26
Salmon - Fillets | 555.02
#Identify products having a manufacturing cost greater than 200
reispartech=# SELECT product, MAX(manufacturing_cost) from products1 group by product, manufacturing_cost having manufacturing_cost>200;
product | max
------------------------------------+--------
Seabream Whole Farmed | 623.37
Energy Drink Red Bull | 705.94
Beef - Tongue, Cooked | 797.14
Milk - Condensed | 266.83
Godiva White Chocolate | 946.23
Wine - Rubyport | 415.51
Sage Ground Wiberg | 733.09
Brownies - Two Bite, Chocolate | 583.00
Milk - 2% | 657.64
Tuna - Salad Premix | 200.71
Rabbit - Saddles | 449.76
Puree - Kiwi | 868.44
Pasta - Elbows, Macaroni, Dry | 346.59
Wine - Chianti Classico Riserva | 902.17
Pail - 4l White, With Handle | 531.52
Rum - Cream, Amarula | 922.64
Milk - Chocolate 250 Ml | 251.14
Onions Granulated | 468.02
Wine - Bar<NAME> | 658.67
Bok Choy - Baby | 744.17
Ice Cream - Turtles Stick Bar | 446.37
Rosemary - Primerba, Paste | 457.09
Bay Leaf Ground | 496.42
Bok Choy - Baby | 726.23
Pepper - Roasted Red | 492.17
Pork - Bacon, Sliced | 568.60
Sugar - Brown, Individual | 594.08
Onions - Pearl | 454.27
Bread - Bistro White | 669.83
Propel Sport Drink | 709.14
-- More --
#Give a 10% discount on your manufacturing prices
#Let's slash manufacturing_cost price by 10%
reispartech=# \d products1; #looking again at the table columns
Table "public.products1"
Column | Type | Collation | Nullable | Default
--------------------+-----------------------+-----------+----------+---------------------------------------
id | bigint | | not null | nextval('products1_id_seq'::regclass)
product | character varying(50) | | not null |
manufacturing_cost | numeric(6,2) | | |
created_at | date | | |
Indexes:
"products1_pkey" PRIMARY KEY, btree (id)
reispartech=# SELECT * from products1; #exploring the product and their initial cost
id | product | manufacturing_cost | created_at
-----+------------------------------------+--------------------+------------
1 | Milk - Condensed | 266.83 | 2019-12-03
2 | Pasta - Shells, Medium, Dry | 841.98 | 2018-09-16
3 | Chips Potato Swt Chilli Sour | 930.60 | 2020-07-15
4 | Bread - Bistro Sour | 697.19 | 2018-05-17
5 | Wine - Valpolicella Masi | 981.62 | 2020-12-11
6 | Beans - Turtle, Black, Dry | 526.63 | 2019-01-10
7 | Sage Ground Wiberg | 733.09 | 2019-09-16
8 | Pepper - Roasted Red | 492.17 | 2021-01-22
9 | Bread Crumbs - Japanese Style | 149.15 | 2018-12-28
10 | Tortillas - Flour, 10 | 180.17 | 2019-07-23
11 | Wine - Rubyport | 415.51 | 2018-10-21
12 | Capon - Breast, Double, Wing On | 578.45 | 2019-02-20
13 | Otomegusa <NAME> | 594.92 | 2020-05-05
14 | Crab Brie In Phyllo | 232.80 | 2020-05-19
15 | Skirt - 24 Foot | 712.05 | 2018-12-22
16 | Wine - Champagne Brut Veuve | 214.41 | 2021-01-09
17 | Muffin Mix - Chocolate Chip | 619.62 | 2019-12-25
18 | Wine - Ruffino Chianti Classico | 387.55 | 2019-06-02
19 | Bagelers | 993.76 | 2018-06-19
20 | Chicken - Whole | 520.40 | 2020-03-29
21 | Scallops - U - 10 | 943.74 | 2019-10-30
22 | Wine - Baron De Rothschild | 658.67 | 2018-08-21
23 | Cake - Cake Sheet Macaroon | 770.26 | 2019-12-13
24 | Red Currants | 907.84 | 2021-05-07
25 | Egg - Salad Premix | 276.76 | 2019-12-30
26 | Beans - Soya Bean | 804.32 | 2020-05-12
27 | Wine - Cotes Du Rhone | 438.43 | 2019-02-22
28 | Foie Gras | 126.70 | 2018-07-01
29 | Cheese - Swiss Sliced | 925.18 | 2018-08-13
30 | Wine - Pinot Noir Pond Haddock | 820.50 | 2018-12-10
-- More --
#Apply the 10% discount on manufacturing cost
reispartech=# SELECT id, product, manufacturing_cost, manufacturing_cost*.10 FROM products1;
id | product | manufacturing_cost | ?column?
-----+------------------------------------+--------------------+----------
1 | Milk - Condensed | 266.83 | 26.6830
2 | Pasta - Shells, Medium, Dry | 841.98 | 84.1980
3 | Chips Potato Swt Chilli Sour | 930.60 | 93.0600
4 | Bread - Bistro Sour | 697.19 | 69.7190
5 | Wine - Valpolicella Masi | 981.62 | 98.1620
6 | Beans - Turtle, Black, Dry | 526.63 | 52.6630
7 | Sage Ground Wiberg | 733.09 | 73.3090
8 | Pepper - Roasted Red | 492.17 | 49.2170
9 | Bread Crumbs - Japanese Style | 149.15 | 14.9150
10 | Tortillas - Flour, 10 | 180.17 | 18.0170
11 | Wine - Rubyport | 415.51 | 41.5510
12 | Capon - Breast, Double, Wing On | 578.45 | 57.8450
13 | Otomegusa Dashi Konbu | 594.92 | 59.4920
14 | Crab Brie In Phyllo | 232.80 | 23.2800
15 | Skirt - 24 Foot | 712.05 | 71.2050
16 | Wine - Champagne Brut Veuve | 214.41 | 21.4410
17 | Muffin Mix - Chocolate Chip | 619.62 | 61.9620
18 | Wine - Ruffino Chianti Classico | 387.55 | 38.7550
19 | Bagelers | 993.76 | 99.3760
20 | Chicken - Whole | 520.40 | 52.0400
21 | Scallops - U - 10 | 943.74 | 94.3740
22 | Wine - Baron De Rothschild | 658.67 | 65.8670
23 | Cake - Cake Sheet Macaroon | 770.26 | 77.0260
24 | Red Currants | 907.84 | 90.7840
25 | Egg - Salad Premix | 276.76 | 27.6760
26 | Beans - Soya Bean | 804.32 | 80.4320
27 | Wine - Cotes Du Rhone | 438.43 | 43.8430
28 | Foie Gras | 126.70 | 12.6700
29 | Cheese - Swiss Sliced | 925.18 | 92.5180
30 | Wine - Pinot Noir Pond Haddock | 820.50 | 82.0500
-- More --
#I'm going to use an Alias here (to set a header name for the new column and new price after discount)
reispartech=# SELECT id, product, manufacturing_cost, manufacturing_cost*.10 AS discount, (manufacturing_cost -
reispartech(# manufacturing_cost*.10) AS New_price from products1;
id | product | manufacturing_cost | discount | new_price
-----+------------------------------------+--------------------+----------+-----------
1 | Milk - Condensed | 266.83 | 26.6830 | 240.1470
2 | Pasta - Shells, Medium, Dry | 841.98 | 84.1980 | 757.7820
3 | Chips Potato Swt Chilli Sour | 930.60 | 93.0600 | 837.5400
4 | Bread - Bistro Sour | 697.19 | 69.7190 | 627.4710
5 | Wine - Valpolicella Masi | 981.62 | 98.1620 | 883.4580
6 | Beans - Turtle, Black, Dry | 526.63 | 52.6630 | 473.9670
7 | Sage Ground Wiberg | 733.09 | 73.3090 | 659.7810
8 | Pepper - Roasted Red | 492.17 | 49.2170 | 442.9530
9 | Bread Crumbs - Japanese Style | 149.15 | 14.9150 | 134.2350
10 | Tortillas - Flour, 10 | 180.17 | 18.0170 | 162.1530
11 | Wine - Rubyport | 415.51 | 41.5510 | 373.9590
12 | Capon - Breast, Double, Wing On | 578.45 | 57.8450 | 520.6050
13 | <NAME> | 594.92 | 59.4920 | 535.4280
14 | Crab Brie In Phyllo | 232.80 | 23.2800 | 209.5200
15 | Skirt - 24 Foot | 712.05 | 71.2050 | 640.8450
16 | Wine - Champagne Brut Veuve | 214.41 | 21.4410 | 192.9690
17 | Muffin Mix - Chocolate Chip | 619.62 | 61.9620 | 557.6580
18 | Wine - Ruffino Chianti Classico | 387.55 | 38.7550 | 348.7950
19 | Bagelers | 993.76 | 99.3760 | 894.3840
20 | Chicken - Whole | 520.40 | 52.0400 | 468.3600
21 | Scallops - U - 10 | 943.74 | 94.3740 | 849.3660
22 | Wine - Baron De Rothschild | 658.67 | 65.8670 | 592.8030
23 | Cake - Cake Sheet Macaroon | 770.26 | 77.0260 | 693.2340
24 | Red Currants | 907.84 | 90.7840 | 817.0560
25 | Egg - Salad Premix | 276.76 | 27.6760 | 249.0840
26 | Beans - Soya Bean | 804.32 | 80.4320 | 723.8880
27 | Wine - Cotes Du Rhone | 438.43 | 43.8430 | 394.5870
-- More --
#Save this into a temporary DB
#let's push this to a temp table
reispartech=# SELECT id, product, manufacturing_cost, manufacturing_cost*.10 AS discount, (manufacturing_cost -
reispartech(# manufacturing_cost*.10) AS New_price INTO temp_price from products1;
SELECT 500
reispartech=# \d
List of relations
Schema | Name | Type | Owner
--------+---------------------+----------+----------
public | temp_price | table | postgres
(9 rows)
reispartech=# drop table temp_price;
DROP TABLE
Show Table without old manufacturing cost, show new cost and temporary save into a DB
#let's remove the initial manufacturing_cost
reispartech=# SELECT id, product, manufacturing_cost*.10 AS discount, (manufacturing_cost -
reispartech(# manufacturing_cost*.10) AS New_price INTO temp_price from products1;
SELECT 500
reispartech=# select * from temp_price; #show new table
id | product | discount | new_price
-----+------------------------------------+----------+-----------
1 | Milk - Condensed | 26.6830 | 240.1470
2 | Pasta - Shells, Medium, Dry | 84.1980 | 757.7820
3 | Chips Potato Swt Chilli Sour | 93.0600 | 837.5400
4 | Bread - Bistro Sour | 69.7190 | 627.4710
5 | Wine - Valpolicella Masi | 98.1620 | 883.4580
6 | Beans - Turtle, Black, Dry | 52.6630 | 473.9670
7 | Sage Ground Wiberg | 73.3090 | 659.7810
8 | Pepper - Roasted Red | 49.2170 | 442.9530
9 | Bread Crumbs - Japanese Style | 14.9150 | 134.2350
10 | Tortillas - Flour, 10 | 18.0170 | 162.1530
11 | Wine - Rubyport | 41.5510 | 373.9590
12 | Capon - Breast, Double, Wing On | 57.8450 | 520.6050
13 | Otomegusa Dashi Konbu | 59.4920 | 535.4280
14 | Crab Brie In Phyllo | 23.2800 | 209.5200
15 | Skirt - 24 Foot | 71.2050 | 640.8450
16 | Wine - Champagne Brut Veuve | 21.4410 | 192.9690
17 | Muffin Mix - Chocolate Chip | 61.9620 | 557.6580
18 | Wine - Ruffino Chianti Classico | 38.7550 | 348.7950
19 | Bagelers | 99.3760 | 894.3840
20 | Chicken - Whole | 52.0400 | 468.3600
21 | Scallops - U - 10 | 94.3740 | 849.3660
22 | Wine - Baron De Rothschild | 65.8670 | 592.8030
23 | Cake - Cake Sheet Macaroon | 77.0260 | 693.2340
24 | Red Currants | 90.7840 | 817.0560
25 | Egg - Salad Premix | 27.6760 | 249.0840
26 | Beans - Soya Bean | 80.4320 | 723.8880
27 | Wine - Cotes Du Rhone | 43.8430 | 394.5870
-- More --
# + id="K5HB2lUo4sGy"
| PosgreSQL_Products_mailingList.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6aqlk87D8Gwx" colab_type="text"
# ## 1. Outliers percentile
# + id="y7z-HyVc8Hot" colab_type="code" colab={}
import pandas as pd
# + id="lKK1WDlu8KTP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="41f19735-7001-4aa0-a4b2-35992afc719f"
df = pd.read_csv("https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/1_outliers/heights.csv")
df.head()
# + [markdown] id="xE1AeFwr8ZLQ" colab_type="text"
# ### Detect outliers using percentile
# + id="-v3jqw5Y8Uo_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e94cc24a-b363-4bb7-de6f-0d28576fec14"
max_thresold = df['height'].quantile(0.95)
max_thresold
# + id="kErgSzMW8bgU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 78} outputId="b5c52093-d861-47b3-e424-1eabafb98b55"
df[df['height']>max_thresold]
# + id="nSzMYnAX8cu0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d0ea4229-4cba-41f0-d4b1-5a9510c5f8ce"
min_thresold = df['height'].quantile(0.05)
min_thresold
# + id="PVwWkGm08f7f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 78} outputId="012f32ab-3abb-4263-a66b-295a7d10b071"
df[df['height']<min_thresold]
# + [markdown] id="y0R7XQLO8jiV" colab_type="text"
# #### Remove outliers
# + id="6Ogr2kyA8hti" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="ca8339e1-bbf4-4b8f-f738-26a1971e4d6b"
df[(df['height']<max_thresold) & (df['height']>min_thresold)]
# + [markdown] id="IgxkwRHc8oac" colab_type="text"
# #### Now lets explore banglore property prices dataset
# + id="0C3RcAlV8md_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="9b7a0180-f91d-4737-bc01-a1a9c8be80e1"
df = pd.read_csv("https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/1_outliers/bhp.csv")
df.head()
# + id="l2BFgaUv8ui_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="fd14e08f-8e2e-40bd-c467-dea1cc0fadc2"
df.shape
# + id="KEftx3K28wkk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 288} outputId="80584a8c-7453-4378-ee9d-bd2a1d40df97"
df.describe()
# + [markdown] id="_MVqaAMV80wV" colab_type="text"
# **Explore samples that are above 99.90% percentile and below 1% percentile rank**
# + id="5Q4ksVHW8x8w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="63782e60-4c4c-4283-873d-3f066f48d925"
min_thresold, max_thresold = df.price_per_sqft.quantile([0.001, 0.999])
min_thresold, max_thresold
# + id="5irmux7j83wj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 468} outputId="db8f5f5d-760f-4239-f81b-4a966ed38997"
df[df.price_per_sqft < min_thresold]
# + id="Mh_CoTo285E6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 468} outputId="8bca50ab-1b96-490c-cc9a-2302ee40e54a"
df[df.price_per_sqft > max_thresold]
# + [markdown] id="df4rvd8B89Bi" colab_type="text"
# #### Remove outliers
# + id="LlKY9Amy87Gm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0f143548-d498-475b-929f-5451301f11d8"
df2 = df[(df.price_per_sqft<max_thresold) & (df.price_per_sqft>min_thresold)]
df2.shape
# + id="ofN8IkqO8_va" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 288} outputId="786b5e9a-0f16-499c-bde4-ebfebfaaa536"
df2.describe()
# + [markdown] id="GHPAnUMt9KsP" colab_type="text"
# ## 2. Outlier detection and removal using z-score and standard deviation in python pandas
# + id="36NbhRe89BNl" colab_type="code" colab={}
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.figsize'] = (10,6)
# + [markdown] id="HGeScTIV9YCZ" colab_type="text"
# **We are going to use heights dataset from kaggle.com. Dataset has heights and weights both but I have removed weights to make it simple**
#
# https://www.kaggle.com/mustafaali96/weight-height
# + id="lhrjjiNi9VvY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="33351c8a-9c9c-4127-b042-05c60e009f45"
df = pd.read_csv("https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/2_outliers_z_score/heights.csv")
df.sample(5)
# + id="CC0tXE5w9ekx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 394} outputId="147451d4-786a-4621-a6db-353cb3870897"
import seaborn as sns
sns.set()
plt.hist(df.height, bins=20, rwidth=0.8)
plt.xlabel('Height (inches)')
plt.ylabel('Count')
plt.show()
# + [markdown] id="dimPwMTb9uyU" colab_type="text"
# Read this awesome article to get your fundamentals clear on normal distribution, bell curve and standard deviation.
# https://www.mathsisfun.com/data/standard-normal-distribution.html
#
# **Plot bell curve along with histogram for our dataset**
# + id="0MIMGya09iWq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 394} outputId="60f8601a-2cfb-4668-d937-c2d4d768e5a1"
from scipy.stats import norm
import numpy as np
plt.hist(df.height, bins=20, rwidth=0.8, density=True)
plt.xlabel('Height (inches)')
plt.ylabel('Count')
rng = np.arange(df.height.min(), df.height.max(), 0.1)
plt.plot(rng, norm.pdf(rng,df.height.mean(),df.height.std()));
# + id="MVGH4SEw95UD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="50ff1fd8-a6f9-41fe-b0d5-505e68aca902"
df.height.mean()
# + id="IMbvXHb-99oz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d7bd4aab-c95e-4ffb-a218-e6df9cb6f482"
df.height.mean()
# + [markdown] id="y4_z7cM8-Al9" colab_type="text"
#
# Here the mean is 66.37 and standard deviation is 3.84.
# + [markdown] id="EGwyLuii-DCF" colab_type="text"
# **(1) Outlier detection and removal using 3 standard deviation**
#
# One of the ways we can remove outliers is remove any data points that are beyond 3 standard deviation from mean. Which means we can come up with following upper and lower bounds
# + id="DQa8RYIM9-7k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8aa84dee-7b95-48e4-c4cb-78079d49c400"
upper_limit = df.height.mean() + 3*df.height.std()
upper_limit
# + id="b0SQj6Y7-Jqe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="331c9d96-f773-4395-e51f-95868179deda"
lower_limit = df.height.mean() -3*df.height.std()
lower_limit
# + [markdown] id="fdoY8up8-MqS" colab_type="text"
#
# Here are the outliers that are beyond 3 std dev from mean
# + id="Nji-oD-e-LIt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="350c2e81-4949-4742-8ade-fd3a155a007c"
df[(df.height>upper_limit) | (df.height<lower_limit)]
# + [markdown] id="9z4cyY_u-Qx8" colab_type="text"
# Above the heights on higher end is 78 inch which is around 6 ft 6 inch. Now that is quite unusual height. There are people who have this height but it is very uncommon and it is ok if you remove those data points. Similarly on lower end it is 54 inch which is around 4 ft 6 inch. While this is also a legitimate height you don't find many people having this height so it is safe to consider both of these cases as outliers
#
# **Now remove these outliers and generate new dataframe**
# + id="arFSFULS-Om3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="76e0c269-f4ed-4c19-dfc3-9515a53b0a10"
df_no_outlier_std_dev = df[(df.height<upper_limit) & (df.height>lower_limit)]
df_no_outlier_std_dev.head()
# + id="Fn7aN-tC-T_h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ca998b76-4382-4861-9b7a-e3505f47c6e3"
df_no_outlier_std_dev.shape
# + id="UFS061Am-VOl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="493e93a1-686e-45d5-d33c-87701aa288f8"
df.shape
# + [markdown] id="0lFpMiZL-asr" colab_type="text"
# Above shows original dataframe data 10000 data points. Out of that we removed 7 outliers (i.e. 10000-9993)
# + [markdown] id="Hj_IMgTj-fS0" colab_type="text"
# ### (2) Outlier detection and removal using Z Score
# **Z score is a way to achieve same thing that we did above in part (1)**
#
# **Z score indicates how many standard deviation away a data point is.**
#
# For example in our case mean is 66.37 and standard deviation is 3.84.
#
# If a value of a data point is 77.91 then Z score for that is 3 because it is 3 standard deviation away (77.91 = 66.37 + 3 * 3.84)
#
# **Calculate the Z Score**
#
# $Z=\frac{x-\mu}{\sigma}$
#
# $\mu - mean$\
# $\sigma - standard deviation$
# + id="XiEr_oBX-WSt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="84e99ffa-241d-43a9-9ac7-0a4e4bc1d4db"
df['zscore'] = ( df.height - df.height.mean() ) / df.height.std()
df.head(5)
# + [markdown] id="2GjfyEyp_9Gi" colab_type="text"
# Above for first record with height 73.84, z score is 1.94. This means 73.84 is 1.94 standard deviation away from mean
# + id="SVd_78TW-YNU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="900fc79c-b180-4ae0-f105-1145a28d6489"
(73.84-66.37)/3.84
# + [markdown] id="3K7PEEjvAAj_" colab_type="text"
# **Get data points that has z score higher than 3 or lower than -3. Another way of saying same thing is get data points that are more than 3 standard deviation away**
# + id="Qqplul4x_-wX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="f1170f2c-801f-4eb1-d751-df353601d072"
df[df['zscore']>3]
# + id="oOMKSQndAE4E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="ea3d4208-5e80-42bc-9af1-f605038b0348"
df[df['zscore']<-3]
# + [markdown] id="YlF9X2b0AIb2" colab_type="text"
# Here is the list of all outliers
#
#
# + id="QBP7iFb0AGvF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="d3a7d083-f86a-4d7a-cc9f-1207ba49fe28"
df[(df.zscore<-3) | (df.zscore>3)]
# + [markdown] id="XyeJFbz1AL4V" colab_type="text"
# **Remove the outliers and produce new dataframe**
# + id="oWfSUB8RAKNM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="785fa7eb-c205-4650-f389-783e88ea5fbb"
df_no_outliers = df[(df.zscore>-3) & (df.zscore<3)]
df_no_outliers.head()
# + id="qmMJpbYhAQXD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="90e591a6-7ec2-4534-df38-8c7dbdf800ac"
df_no_outliers.shape
# + id="yqQo_gDAAR0t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5e1ccdcf-e027-4f4f-b20c-d65e52eb4764"
df.shape
# + [markdown] id="omQizuY2AU4Q" colab_type="text"
#
# Above shows original dataframe data 10000 data points. Out of that we removed 7 outliers (i.e. 10000-9993)
# + [markdown] id="F6nUso_HAV_w" colab_type="text"
# ## 3. Outlier Detection and Removal Using IQR
# + id="bnQuftJ3AWXb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 648} outputId="96a855c1-e449-412f-b769-bcbc6ac19756"
import pandas as pd
df = pd.read_csv("https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/3_outlier_IQR/heights.csv")
df
# + id="S4_2PwGAATXJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 288} outputId="2c153765-1091-4806-97d9-4f8ae9334aec"
df.describe()
# + [markdown] id="n3wHu8PlAybZ" colab_type="text"
# **Detect outliers using IQR**
# + id="Zczy2YbpAter" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a42e4780-f78d-49eb-de2e-4df6e83b51a3"
Q1 = df.height.quantile(0.25)
Q3 = df.height.quantile(0.75)
Q1, Q3
# + id="9ce_VodaA07g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="84215ce2-b989-4787-c14b-3d21cefc8da4"
IQR = Q3 - Q1
IQR
# + id="hUNtU79aA2OP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e73d6166-51fc-4cec-b8aa-c00dcbd5ff0f"
lower_limit = Q1 - 1.5*IQR
upper_limit = Q3 + 1.5*IQR
lower_limit, upper_limit
# + [markdown] id="89sP_0FqA4-O" colab_type="text"
# **Here are the outliers**
# + id="3DljKShOA3e9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="587f821e-0027-4949-d5ab-e9547526f0a4"
df[(df.height<lower_limit)|(df.height>upper_limit)]
# + [markdown] id="Dafk-AK9A9oq" colab_type="text"
# **Remove outliers**
# + id="jjLv71D1A8EL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 498} outputId="cb1b80f7-84c3-4157-a08f-2c9e1a40d415"
df_no_outlier = df[(df.height>lower_limit)&(df.height<upper_limit)]
df_no_outlier
# + [markdown] id="LMFCq7sXBC2a" colab_type="text"
# https://medium.com/mytake/why-1-5-in-iqr-method-of-outlier-detection-5d07fdc82097
# + id="UN2c7ByyBACg" colab_type="code" colab={}
pass
| Notebook/notebooks/FeatureEngineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering
# ## Gaussian Mixture Models
#
# ### https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import seaborn as sns; sns.set()
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
# Generalizing E-M: Gaussian Mixture Models
from sklearn.mixture import GaussianMixture as GMM
# -
# ### Creating Toy data
# +
# Generate some data
X, y_true = make_blobs(n_samples=400, centers=4,
cluster_std=0.60, random_state=0)
X[:10]
# -
X = X[:, ::-1] # flip axes for better plotting
X[:10]
# ### KMeans
# +
# Plot the data with K Means Labels
kmeans = KMeans(4, random_state=0)
labels = kmeans.fit(X).predict(X)
# -
labels
# ### Plot data
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis');
"""
"""
def plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):
labels = kmeans.fit_predict(X)
# plot the input data
ax = ax or plt.gca()
ax.axis('equal')
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
# plot the representation of the KMeans model
centers = kmeans.cluster_centers_
radii = [cdist(X[labels == i], [center]).max()
for i, center in enumerate(centers)]
for c, r in zip(centers, radii):
ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))
kmeans = KMeans(n_clusters=4, random_state=0)
plot_kmeans(kmeans, X)
"""
np.random.RandomState
"""
# +
#
rng = np.random.RandomState(13)
X_stretched = np.dot(X, rng.randn(2, 2))
kmeans = KMeans(n_clusters=4, random_state=0)
plot_kmeans(kmeans, X_stretched)
# -
# ## Generalizing E-M: Gaussian Mixture Models
# +
# GMM
gmm = GMM(n_components=4).fit(X)
labels = gmm.predict(X)
# +
# plot GMM
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis');
# +
# probabilistic cluster assignments
probs = gmm.predict_proba(X)
print(probs[:5].round(3))
# +
# visualize this uncertainty by, for example, making the size of each point proportional to the certainty of its prediction
size = 50 * probs.max(1) ** 2 # square emphasizes differences
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=size);
# -
from matplotlib.patches import Ellipse
# +
# visualize the locations and shapes of the GMM clusters by drawing ellipses based on the GMM output
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
def plot_gmm(gmm, X, label=True, ax=None):
ax = ax or plt.gca()
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
else:
ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)
ax.axis('equal')
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
draw_ellipse(pos, covar, alpha=w * w_factor)
# +
# Previous function does not run, needs to be updated by:
# https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_covariances.html
gmm = GMM(n_components=4, random_state=42)
plot_gmm(gmm, X)
# +
# Previous function does not run, needs to be updated by:
# https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_covariances.html
gmm = GMM(n_components=4, covariance_type='full', random_state=42)
plot_gmm(gmm, X_stretched)
# -
# ### Attributes
# +
# probs = gmm.predict_proba(X)
print(probs.shape)
probs.round(3)
# +
# Covariances
gmm.covariances_
# +
# precisiona_
gmm.precisions_
# -
# Precisions:
# The precision matrices for each component in the mixture. A precision
# matrix is the inverse of a covariance matrix. A covariance matrix is
# symmetric positive definite so the mixture of Gaussian can be
# equivalently parameterized by the precision matrices. Storing the
# precision matrices instead of the covariance matrices makes it more
# efficient to compute the log-likelihood of new samples at test time.
# The shape depends on `covariance_type`::
# +
# precisions_cholesky_
gmm.precisions_cholesky_
# +
# Weights
gmm.weights_
# +
# Means
gmm.means_
# -
# # GMM as Density Estimation
from sklearn.datasets import make_moons
Xmoon, ymoon = make_moons(200, noise=.05, random_state=11)
plt.scatter(Xmoon[:, 0], Xmoon[:, 1]);
# +
gmm2 = GMM(n_components=2, covariance_type='full', random_state=0)
plot_gmm(gmm2, Xmoon)
# +
# n_components from 2 to 16
gmm22 = GMM(n_components=16, covariance_type='full', random_state=0)
plot_gmm(gmm2, Xmoon)
# +
# label True to label False
gmm16 = GMM(n_components=2, covariance_type='full', random_state=0)
plot_gmm(gmm16, Xmoon, label=False)
# +
# more components and ignore the cluster labels
gmm16 = GMM(n_components=16, covariance_type='full', random_state=0)
plot_gmm(gmm16, Xmoon, label=False)
# -
gmm16.predict_proba(X).shape
Xnew = gmm16.sample(400)
plt.scatter(Xnew[0][:, 0], Xnew[0][:, 1]);
Xnew[0][:,0]
# ## How many components
#
# https://en.wikipedia.org/wiki/Akaike_information_criterion
#
# https://en.wikipedia.org/wiki/Bayesian_information_criterion
#
#
# +
n_components = np.arange(1, 21)
models = [GMM(n, covariance_type='full', random_state=0).fit(Xmoon)
for n in n_components]
plt.plot(n_components, [m.bic(Xmoon) for m in models], label='BIC')
plt.plot(n_components, [m.aic(Xmoon) for m in models], label='AIC')
plt.legend(loc='best')
plt.xlabel('n_components');
# +
# using 10 components basted on elbow
gmm17 = GMM(n_components=10, covariance_type='full', random_state=0)
plot_gmm(gmm17, Xmoon, label=False)
# +
# when generating from previous model we see an issue
Xnew2 = gmm17.sample(400)
plt.scatter(Xnew2[0][:, 0], Xnew[0][:, 1]);
# -
| clustering/.ipynb_checkpoints/GMM-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# import sklearn
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
import graphviz
iris = load_iris(as_frame = True)
iris
iris_df = iris["data"]
iris_df
target = iris["target"]
target
target.unique()
iris_df.info()
estimator = DecisionTreeClassifier()
estimator.fit(iris_df, target)
new_data = [[5.8,3.0,5.1,1.9]]
estimator.predict(new_data)
iris.feature_names
iris.target_names
data = tree.export_graphviz(estimator,
out_file = None,
feature_names = iris.feature_names,
class_names = iris.target_names,
filled = True,
rounded = True,
special_characters = True)
graph = graphviz.Source(data)
graph.render("iris_graph")
| notebooks/code/Bonus - Decission Trees - Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !python mainpro_CK+.py --model=Resnet18
# +
#验证模型正确性
"""
visualize results for test image
"""
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
cut_size = 44
transform_test = transforms.Compose([
transforms.ToTensor()
])
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
img = io.imread('images/anger_rgb.png')
# img = raw_img[:, :, np.newaxis]
# img = np.concatenate((img, img, img), axis=2)
# img = Image.fromarray(img)
inputs = transform_test(img)
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
net = ResNet18()
checkpoint = torch.load(os.path.join('CK+_Resnet18/1/', 'Test_model.t7'))
net.load_state_dict(checkpoint['net'])
net.cuda()
net.eval()
c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
inputs = inputs.cuda()
inputs = Variable(inputs, volatile=True)
outputs = net(inputs)
for i in range(7):
print('origin %10.3f' % outputs[0][i])
score = F.softmax(outputs,1)
max = score[0][0]
maxindex = 0
for i in range(7):
print('%10.3f' % score[0][i])
if(score[0][i] > max):
max = score[0][i]
maxindex = i
print("The Expression is %s" %str(class_names[maxindex]))
# +
#转成onnx模型
# !pip install onnx
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
transform_test = transforms.Compose([
transforms.ToTensor()
])
img = io.imread('images/anger_rgb.png')
# img = img[:, :, np.newaxis]
# img = np.concatenate((img, img, img), axis=2)
# img = Image.fromarray(img)
inputs = transform_test(img)
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
#导入模型,用训练模式
net = ResNet18()
checkpoint = torch.load(os.path.join('CK+_Resnet18/1/', 'Test_model.t7'))
net.load_state_dict(checkpoint['net'])
net.cuda()
net.train(False)
#模拟input
c, h, w = np.shape(inputs)
inputs = inputs.view(-1,c, h, w)
inputs = inputs.cuda()
inputs = Variable(inputs, volatile=True)
#导出模型
torch_out = torch.onnx._export(net, # model being run
inputs, # model input (or a tuple for multiple inputs)
"CK+_ResNet18_privateTest.onnx", # where to save the model
verbose=True,
input_names=['data'],
output_names=['outTensor'],
export_params=True,
training=False)
# +
# 验证onnx模型
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import onnx
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
transform_test = transforms.Compose([
transforms.ToTensor()
])
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
# Load the ONNX model
model = onnx.load("CK+_ResNet18_privateTest.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
import caffe2.python.onnx.backend as backend
import numpy as np
rep = backend.prepare(model) # or "CPU"
# For the Caffe2 backend:
# rep.predict_net is the Caffe2 protobuf for the network
# rep.workspace is the Caffe2 workspace for the network
# (see the class caffe2.python.onnx.backend.Workspace)
#input
img = io.imread('images/anger_rgb.png')
# img = img[:, :, np.newaxis]
# img = np.concatenate((img, img, img), axis=2)
# img = Image.fromarray(img)
inputs = transform_test(img)
c, h, w = np.shape(inputs)
inputs = inputs.view(-1,c, h, w)
outputs = rep.run(inputs.numpy().astype(np.float32))
# To run networks with more than one input, pass a tuple
# rather than a single numpy ndarray.
print(outputs[0])
torch_data = torch.from_numpy(outputs[0])
score = F.softmax(torch_data,1)
max = score[0][0]
maxindex = 0
for i in range(7):
print('%10.3f' % score[0][i])
if(score[0][i] > max):
max = score[0][i]
maxindex = i
print("The Expression is %s" %str(class_names[maxindex]))
# +
# onnx模型简单化
# !pip install onnx-simplifier
# !python -m onnxsim "CK+_ResNet18_privateTest.onnx" "CK+_ResNet18_privateTest_sim.onnx"
#验证模型
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import onnx
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
transform_test = transforms.Compose([
transforms.ToTensor()
])
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
# Load the ONNX model
model = onnx.load("CK+_ResNet18_privateTest_sim.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
import caffe2.python.onnx.backend as backend
import numpy as np
rep = backend.prepare(model) # or "CPU"
# For the Caffe2 backend:
# rep.predict_net is the Caffe2 protobuf for the network
# rep.workspace is the Caffe2 workspace for the network
# (see the class caffe2.python.onnx.backend.Workspace)
#input
img = io.imread('images/anger.png')
img = img[:, :, np.newaxis]
img = np.concatenate((img, img, img), axis=2)
img = Image.fromarray(img)
inputs = transform_test(img)
c, h, w = np.shape(inputs)
inputs = inputs.view(-1,c, h, w)
outputs = rep.run(inputs.numpy().astype(np.float32))
# To run networks with more than one input, pass a tuple
# rather than a single numpy ndarray.
print(outputs[0])
torch_data = torch.from_numpy(outputs[0])
score = F.softmax(torch_data,1)
max = score[0][0]
maxindex = 0
for i in range(7):
print('%10.3f' % score[0][i])
if(score[0][i] > max):
max = score[0][i]
maxindex = i
print("The Expression is %s" %str(class_names[maxindex]))
# +
# !pip install -U onnx-coreml
#转成corml模型
import onnx;
from onnx_coreml import convert
onnx_model = onnx.load("CK+_ResNet18_privateTest.onnx")
cml_model= convert(onnx_model,image_input_names='data',target_ios='13')
cml_model.save("CK+_ResNet18_privateTest_sim.mlmodel")
# -
| ck+_resnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: feml
# language: python
# name: python3
# ---
# ## Predicting Survival on the Titanic
#
# ### History
# Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.
#
# ### Assignment:
#
# Build a Machine Learning Pipeline, to engineer the features in the data set and predict who is more likely to Survive the catastrophe.
#
# Follow the Jupyter notebook below, and complete the missing bits of code, to achieve each one of the pipeline steps.
# +
import re
# to handle datasets
import pandas as pd
import numpy as np
# for visualization
import matplotlib.pyplot as plt
# to divide train and test set
from sklearn.model_selection import train_test_split
# feature scaling
from sklearn.preprocessing import StandardScaler
# to build the models
from sklearn.linear_model import LogisticRegression
# to evaluate the models
from sklearn.metrics import accuracy_score, roc_auc_score
# to persist the model and the scaler
import joblib
# ========== NEW IMPORTS ========
# Respect to notebook 02-Predicting-Survival-Titanic-Solution
# pipeline
from sklearn.pipeline import Pipeline
# for the preprocessors
from sklearn.base import BaseEstimator, TransformerMixin
# for imputation
from feature_engine.imputation import AddMissingIndicator, MeanMedianImputer, CategoricalImputer
# for encoding categorical variables
from feature_engine.encoding import RareLabelEncoder, OneHotEncoder
# -
# ## Prepare the data set
# +
# load the data - it is available open source and online
data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl', na_values='?')
# display data
data.head()
# +
# retain only the first cabin if more than
# 1 are available per passenger
def get_first_cabin(row):
try:
return row.split()[0]
except:
return np.nan
data['cabin'] = data['cabin'].apply(get_first_cabin)
# +
# extracts the title (Mr, Ms, etc) from the name variable
def get_title(passenger):
line = passenger
if re.search('Mrs', line):
return 'Mrs'
elif re.search('Mr', line):
return 'Mr'
elif re.search('Miss', line):
return 'Miss'
elif re.search('Master', line):
return 'Master'
else:
return 'Other'
data['title'] = data['name'].apply(get_title)
# +
# cast numerical variables as floats
data['fare'] = data['fare'].astype('float')
data['age'] = data['age'].astype('float')
# +
# drop unnecessary variables
data.drop(labels=['name','ticket', 'boat', 'body','home.dest'], axis=1, inplace=True)
# display data
data.head()
# +
# # save the data set
# data.to_csv('titanic.csv', index=False)
data
# -
# # Begin Assignment
#
# ## Configuration
# +
# list of variables to be used in the pipeline's transformers
NUMERICAL_VARIABLES = ["age", "fare"]
CATEGORICAL_VARIABLES = ["sex", "cabin", "embarked", "title"]
CABIN = ["cabin"]
# -
# ## Separate data into train and test
# +
X_train, X_test, y_train, y_test = train_test_split(
data.drop('survived', axis=1), # predictors
data['survived'], # target
test_size=0.2, # percentage of obs in test set
random_state=0) # seed to ensure reproducibility
X_train.shape, X_test.shape
# -
# ## Preprocessors
#
# ### Class to extract the letter from the variable Cabin
class ExtractLetterTransformer(BaseEstimator, TransformerMixin):
# Extract fist letter of variable
def __init__(self, *, variables):
self.variables = variables
def fit(self, _X, _y):
return self
def transform(self, X):
X = X.copy()
for var in self.variables:
X[var] = X[var].apply(lambda s: s[0])
return X
# ## Pipeline
#
# - Impute categorical variables with string missing
# - Add a binary missing indicator to numerical variables with missing data
# - Fill NA in original numerical variable with the median
# - Extract first letter from cabin
# - Group rare Categories
# - Perform One hot encoding
# - Scale features with standard scaler
# - Fit a Logistic regression
# set up the pipeline
titanic_pipe = Pipeline([
# ===== IMPUTATION =====
# impute categorical variables with string 'missing'
('categorical_imputation', CategoricalImputer(variables=CATEGORICAL_VARIABLES)),
# add missing indicator to numerical variables
('missing_indicator', AddMissingIndicator(variables=NUMERICAL_VARIABLES)),
# impute numerical variables with the median
('median_imputation', MeanMedianImputer(variables=NUMERICAL_VARIABLES)),
# Extract first letter from cabin
('extract_letter', ExtractLetterTransformer(variables=CABIN)),
# == CATEGORICAL ENCODING ======
# remove categories present in less than 5% of the observations (0.05)
# group them in one category called 'Rare'
('rare_label_encoder', RareLabelEncoder(n_categories=1, variables=CATEGORICAL_VARIABLES)),
# encode categorical variables using one hot encoding into k-1 variables
('categorical_encoder', OneHotEncoder(drop_last=True, variables=CATEGORICAL_VARIABLES)),
# scale using standardization
('scaler', StandardScaler()),
# logistic regression (use C=0.0005 and random_state=0)
('Logit', LogisticRegression(C=0.0005, random_state=0)),
])
# +
# train the pipeline
titanic_pipe.fit(X_train, y_train)
# -
# ## Make predictions and evaluate model performance
#
# Determine:
# - roc-auc
# - accuracy
#
# **Important, remember that to determine the accuracy, you need the outcome 0, 1, referring to survived or not. But to determine the roc-auc you need the probability of survival.**
# +
# make predictions for train set
class_ = titanic_pipe.predict(X_train)
pred = titanic_pipe.predict_proba(X_train)[:, 1]
# determine mse and rmse
print('train roc-auc: {}'.format(roc_auc_score(y_train, pred)))
print('train accuracy: {}'.format(accuracy_score(y_train, class_)))
print()
# make predictions for test set
class_ = titanic_pipe.predict(X_test)
pred = titanic_pipe.predict_proba(X_test)[:, 1]
# determine mse and rmse
print('test roc-auc: {}'.format(roc_auc_score(y_test, pred)))
print('test accuracy: {}'.format(accuracy_score(y_test, class_)))
print()
# -
# That's it! Well done
#
# **Keep this code safe, as we will use this notebook later on, to build production code, in our next assignement!!**
| section-04-research-and-development/titanic-assignment/03-titanic-survival-pipeline-assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
from datetime import datetime, timedelta
import pathlib
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
linkSequentialData = 'data/sequential_data'
operations = ['network_create_delete', 'boot_delete', 'image_create_delete']
def loadJson(link):
with open(link) as f:
data = json.load(f)
return data
traces = {}
for operation in operations:
path = pathlib.Path().absolute().parent / linkSequentialData / 'traces' / operation
pathesTraces = list(path.glob('*.json'))
traces[operation] = {}
traces[operation]['link'] = pathesTraces
traces[operation]['name'] = list(map(lambda x: x.name[:x.name.find('.json')], traces[operation]['link']))
traces[operation]['data'] = list(map(lambda x: loadJson(x), traces[operation]['link']))
# +
#print(json.dumps(traces[operations[0]]['data'][805], indent=2))
# -
# the class for trace data
class Trace(object):
def __init__(self, operation, host, name, service, project, startTimestamp, endTimestamp, traceID, parentID, baseID):
self.operation = operation
self.host = host
self.name = name
self.service = service
self.project = project
self.startTimestamp = datetime.strptime(startTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
if endTimestamp != 'Null':
self.endTimestamp = datetime.strptime(endTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
self.duration = self.endTimestamp - self.startTimestamp
else:
self.endTimestamp = 'Null'
self.duration = 'Null'
self.traceID = traceID
self.parentID = parentID
self.baseID = baseID
def __repr__(self):
return 'operation: ' + self.operation + '\nhost: ' + self.host + '\nname: ' + self.name + '\nservice: ' + self.service + '\nproject: ' + self.project + '\nstartTimestamp: ' + str(self.startTimestamp) + '\nendTimestamp: ' + str(self.endTimestamp) + '\nduration: ' + str(self.duration) + '\ntraceID: ' + self.traceID + '\nparentID: ' + self.parentID + '\nbaseID: ' + self.baseID + '\n'
def __str__(self):
return 'operation: ' + self.operation + '\nhost: ' + self.host + '\nname: ' + self.name + '\nservice: ' + self.service + '\nproject: ' + self.project + '\nstartTimestamp: ' + str(self.startTimestamp) + '\nendTimestamp: ' + str(self.endTimestamp) + '\nduration: ' + str(self.duration) + '\ntraceID: ' + self.traceID + '\nparentID: ' + self.parentID + '\nbaseID: ' + self.baseID + '\n'
# +
# parse of json data to trace object
def parseTrace(operation, df):
traces = []
for item in df['children']:
traces.append(Trace(operation,
item.get('info').get('host'),
item.get('info').get('name'),
item.get('info').get('service'),
item.get('info').get('project'),
item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('timestamp'),
item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', {'timestamp': 'Null'}).get('timestamp'),
item.get('trace_id'),
item.get('parent_id'),
item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id')))
if len(item['children']) != 0:
traces = traces + parseTrace(operation, item)
return traces
for operation in operations:
traces[operation]['parsedData'] = list(map(lambda x: parseTrace(operation, x), traces[operation]['data']))
# +
# check repetitions in trace's structure
def checkRepetitionsInTrace(df):
for item in df['children']:
mockField = {'info': {'host': item.get('info').get('host')},
'service': item.get('info').get('service'),
'project': item.get('info').get('project'),
'parent_id': item.get('parent_id'),
'base_id': item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('base_id'),
'trace_id': item.get('trace_id')
}
if (item.get('info').get('service') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('service')) or (item.get('info').get('service') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('service')):
print('service in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if (item.get('info').get('project') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('project')) or (item.get('info').get('project') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('project')):
print('project in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if (item.get('info').get('host') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('info').get('host')) or (item.get('info').get('host') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('info').get('host')):
print('host in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if (item.get('parent_id') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('parent_id')) or (item.get('parent_id') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('parent_id')):
print('parent_id in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if (item.get('trace_id') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('trace_id')) or (item.get('trace_id') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('trace_id')):
print('trace_id in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if (item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', mockField).get('base_id') != item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('base_id')):
print('base_id in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if len(item['children']) != 0:
checkRepetitionsInTrace(item)
for operation in operations:
for x in traces[operation]['data']:
checkRepetitionsInTrace(x)
# +
# collect all values of node's attributes
hosts = []
names = []
services = []
projects = []
for operation in operations:
for arr in traces[operation]['parsedData']:
for x in arr:
if x.host not in hosts:
hosts.append(x.host)
if x.name not in names:
names.append(x.name)
if x.service not in services:
services.append(x.service)
if x.project not in projects:
projects.append(x.project)
print('hosts =', hosts)
print('names =', names)
print('services =', services)
print('projects =', projects)
# +
# collect all values of json's attribute name
infoNames = []
for operation in operations:
for item in traces[operation]['data']:
if item.get('info').get('name') not in infoNames:
infoNames.append(item.get('info').get('name'))
print('info names =', infoNames)
# +
# remove microseconds of timestamp
def removeMicroseconds(x):
return datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%f').replace(microsecond = 0)
timeStart = datetime.strptime('2019-11-19 17:38:39', '%Y-%m-%d %H:%M:%S')
timeEnd = datetime.strptime('2019-11-20 01:30:00', '%Y-%m-%d %H:%M:%S')
# check all timestamps for range of the experiment time
def checkTimestamps(df):
for item in df['children']:
ts = removeMicroseconds(item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-start').get('timestamp'))
if not((ts >= timeStart) and (ts <= timeEnd)):
print('time out the range in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
te = item.get('info').get('meta.raw_payload.' + item.get('info').get('name') + '-stop', {'timestamp': 'Null'}).get('timestamp')
if te != 'Null':
te = removeMicroseconds(te)
if not((te >= timeStart) and (te <= timeEnd)):
print('time out the range in', item.get('info').get('meta.raw_payload.' + item['info']['name'] + '-start').get('base_id'), 'trace_id =', item.get('trace_id'))
if len(item['children']) != 0:
checkTimestamps(item)
for operation in operations:
for x in traces[operation]['data']:
checkTimestamps(x)
# +
# find the max trace duration
m = timedelta(0)
for operation in operations:
for arr in traces[operation]['parsedData']:
for x in arr:
if x.duration != 'Null':
m = max(m, x.duration)
print('Max duration of trace is', m)
# -
# find the max trace duration
sum(len(arr) for i in range(len(operations)) for arr in traces[operations[i]]['parsedData'])
# +
# calculate number of traces per second
allTimestamps = pd.date_range(timeStart, timeEnd, freq = 'S')
allTimestamps.freq = None
allTimestamps = list(allTimestamps)
countTimestampPerSecond = {timestamp : {'number' : 0,
'hosts' : []} for timestamp in allTimestamps}
def countTraces(arr, item):
for x in arr:
countTimestampPerSecond[x]['number'] += 1
if item.host not in countTimestampPerSecond[x]['hosts']:
countTimestampPerSecond[x]['hosts'].append(item.host)
for operation in operations:
for arr in traces[operation]['parsedData']:
for x in arr:
ts = x.startTimestamp.replace(microsecond = 0)
if x.duration != 'Null':
te = x.endTimestamp.replace(microsecond = 0)
if ts != te:
at = pd.date_range(ts, te, freq = 'S')
at.freq = None
at = list(at)
countTraces(at, x)
else:
countTraces([ts], x)
else:
countTraces([ts], x)
print('The number of traces is', sum(value['number'] for key, value in countTimestampPerSecond.items()))
print('The max number of active traces per second is', max(value['number'] for key, value in countTimestampPerSecond.items()))
print('The average number of active traces per second is', int(sum(value['number'] for key, value in countTimestampPerSecond.items())/len(list(key for key, value in countTimestampPerSecond.items()))))
print('The number of seconds without traces is', sum(value['number'] == 0 for key, value in countTimestampPerSecond.items()))
# +
# calculate list of free seconds for each node
hostNames = ['wally113', 'wally122', 'wally117', 'wally124', 'wally123']
gaps = {host: [] for host in hostNames}
for key, value in countTimestampPerSecond.items():
for host in list(set(hostNames) - set(value['hosts'])):
gaps[host].append(key)
for key, value in gaps.items():
print('The node ', key, ' has ', len(value), ' (',round(len(value)/len(allTimestamps) * 100, 2),'%) ', 'seconds without traces', sep = '')
# +
# get the start timestamp of the parent trace
def getTimeOfParent(parentID, arr):
for item in arr:
if item.traceID == parentID:
return item.startTimestamp
return 'Null'
# create the graph from list of trace objects
def createGraph(arr):
trace = nx.DiGraph()
for item in arr:
trace.add_node(item.traceID,
host = item.host,
name = item.name,
project = item.project,
startTimestamp = item.startTimestamp,
endTimestamp = item.endTimestamp,
duration = item.duration,
traceID = item.traceID,
parentID = item.parentID,
baseID = item.baseID)
if item.baseID != item.parentID:
trace.add_edge(item.parentID, item.traceID,
delay = item.startTimestamp - getTimeOfParent(item.parentID, arr))
else:
trace.add_edge(item.parentID, item.traceID)
return trace
for operation in operations:
traces[operation]['graph'] = list(map(lambda x: createGraph(x), traces[operation]['parsedData']))
# +
# example of the smallest trace json file
g = traces[operations[0]]['graph'][805]
plt.figure(figsize=(10, 10))
pos = nx.spring_layout(g)
nx.draw_networkx_labels(g, pos, nx.get_node_attributes(g, 'name'))
nx.draw_networkx_edge_labels(g, pos, nx.get_edge_attributes(g, 'delay'))
nx.draw(g, pos = pos, node_color = 'pink', node_size = 1200, font_size = 14, width = 3)
plt.show()
# +
from networkx.drawing.nx_agraph import graphviz_layout, to_agraph
import pygraphviz as pgv
from IPython.display import Image
g.graph['graph'] = {'rankdir':'TD'}
g.graph['node'] = {'shape':'circle'}
g.graph['edges'] = {'arrowsize':'1.0'}
A = to_agraph(g)
A.layout('dot')
A.draw('a.png')
Image(filename='a.png')
# -
| notebooks/.ipynb_checkpoints/Analysis of the traces-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
AoI_csv_dir = 'input/aoi_ids.csv'
label_csv_dir = 'input/labels.csv'
features_csv_dir = 'processing/features_table.csv'
BAG_Panden = '/home/data/citycentre/BAG_Panden.shp'
# # Dependencies
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
#from PIL import Image
import pandas as pd
from shutil import copyfile
import matplotlib.image as mpimg
import numpy
import geopandas as gpd
import fiona
import rasterio
import rasterio.mask
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from time import time
from scipy.stats import randint
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.mixture import GaussianMixture
from sklearn.svm import LinearSVC
from sklearn.cluster import MeanShift
from sklearn.manifold import LocallyLinearEmbedding
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from shapely.geometry import shape
from shapely.geometry import Polygon
import shapefile
import shutil
import os
import glob
# # Functions
def ids_to_shp_visualization(ids_list, directory, name, shape_file=BAG_Panden):
i=0
if not os.path.exists(directory+"/temporary"):
os.makedirs(directory+"/temporary")
for identifica in ids_list:
for feat in fiona.open(shape_file, "r"):
if identifica==feat['properties']['Identifica']:
try:
feat2=feat['properties']['Identifica']
feat1=[feat['geometry']]
area_per_roof = feat['properties']['SHAPE_Area']
with fiona.open(shape_file, "r") as shapef:
meta=shapef.meta
with fiona.open(directory+'/temporary/'+str(i)+'.shp', 'w', **meta) as sink:
sink.write(feat)
i=i+1
#if i==1:
# break
except ValueError:
continue
files = glob.glob(directory+"/temporary/*.shp")
w = shapefile.Writer()
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save(directory+"/"+name+".shp")
shutil.rmtree(directory+"/temporary/")
# # Model
aoi_list = []
[aoi_list.append(ID) for ID in pd.read_csv(AoI_csv_dir, dtype=str).ID]
print('The IDs have been added.')
label_df = pd.read_csv(label_csv_dir, dtype={'ID':str}).set_index('ID')
label_df.label = label_df.label.replace(3,2)
label_df.shape
features_df = pd.read_csv(features_csv_dir, dtype={'ID':str}).set_index('ID')
features_df.loc(['0599100010050372'])
features_with_label = pd.concat([features_df, label_df], axis=1)
# +
# Split-out validation dataset
X_train = features_with_label.loc[label_df.index].drop('label', 1).dropna()
y_train = features_with_label.loc[label_df.index].dropna().label
X_test = features_with_label.loc[aoi_list].drop('label', 1).dropna()
#print(X_train.shape)
#print(y_train.shape)
#print(X_test.shape)
# +
#y_train.head()
# -
seed = 0
scoring = 'accuracy'
models = []
models.append(('GBC', GradientBoostingClassifier()))
models.append(('LR', LogisticRegression()))
models.append(('NB', GaussianNB()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('SVM', SVC()))
models.append(('SGD', SGDClassifier()))
models.append(('LSVM', LinearSVC()))
# +
cv_results_mean = []
cv_results_std = []
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=5, random_state=seed)
cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
cv_results_mean.append(cv_results.mean())
cv_results_std.append(cv_results.std())
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
fig = plt.figure(figsize=(16, 8))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.ylim([0,1])
plt.boxplot(results)
ax.set_xticklabels(names)
plt.xlabel('Model', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.show()
# +
clf = LogisticRegression()
model_train = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred_proba = clf.predict_proba(X_test).tolist()
#print(accuracy_score(y_test, y_pred))
#print(confusion_matrix(y_test, y_pred))
#print(classification_report(y_test, y_pred))
pred_proba = clf.predict_proba(X_test)
proba = clf.fit(X_train, y_train).predict_proba(X_test)
# -
model_aoi_ids = pd.DataFrame(y_pred_proba, index=X_test.index,\
columns=['nonveg_pred_proba', \
'green_pred_proba', 'tree_pred_proba'])
# # AoI IDs Table
# +
model_aoi_ids['probability'] = model_aoi_ids[['nonveg_pred_proba','green_pred_proba',\
'tree_pred_proba']].max(axis=1)
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.nonveg_pred_proba, 'classification')] = '1'
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.green_pred_proba, 'classification')] = '2'
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.tree_pred_proba, 'classification')] = '4'
model_aoi_ids.loc[(model_aoi_ids.classification == '1', 'category')] = 'Non-Vegetation'
model_aoi_ids.loc[(model_aoi_ids.classification == '2', 'category')] = 'Vegetation'
model_aoi_ids.loc[(model_aoi_ids.classification == '4', 'category')] = 'Trees'
# -
output_model_aoi_ids = pd.concat([model_aoi_ids, features_with_label.loc[X_test.index]], axis=1, join='inner')
output_model_aoi_ids.loc[(output_model_aoi_ids['category'] == 'Non-Vegetation', 'area_interest')] = output_model_aoi_ids['total_area']
output_model_aoi_ids.loc[(output_model_aoi_ids['category'] == 'Vegetation', 'area_interest')] = output_model_aoi_ids['area_2_02']
output_model_aoi_ids.loc[(output_model_aoi_ids['category'] == 'Trees', 'area_interest')] = output_model_aoi_ids['total_area']
# # AoI Summary Table
sum_value = output_model_aoi_ids.loc[output_model_aoi_ids['classification'] == 2].area_2_01.sum()
model_aoi_summary = output_model_aoi_ids.groupby(['category'])[['category']].count()
model_aoi_summary = model_aoi_summary.rename(columns={'category':'building_count'})
model_aoi_summary['building_pc'] = model_aoi_summary.building_count/np.sum(model_aoi_summary.building_count)
model_aoi_summary['area_sum'] = output_model_aoi_ids.groupby(['category'])[['area_interest']].sum()
model_aoi_summary.set_value('Non-Vegetation', 'area_sum', model_aoi_summary.iloc[0].area_sum + sum_value)
model_aoi_summary['area_pc'] = model_aoi_summary.area_sum/np.sum(model_aoi_summary.area_sum)
output_model_aoi_summary = model_aoi_summary
# ## Visualization
# +
nonveg_id = []
veg_id = []
tree_id = []
[nonveg_id.append(i) for i in model_aoi_ids.loc[(model_aoi_ids.classification == '1', 'category')].index]
[veg_id.append(i) for i in model_aoi_ids.loc[(model_aoi_ids.classification == '2', 'category')].index]
[tree_id.append(i) for i in model_aoi_ids.loc[(model_aoi_ids.classification == '4', 'category')].index]
print("We now have a list of the IDS for each shapefile.")
# -
# # Output
ids_to_shp_visualization(nonveg_id, 'output', 'nonvegetation')
ids_to_shp_visualization(veg_id, 'output', 'vegetation')
ids_to_shp_visualization(tree_id, 'output', 'trees')
output_model_aoi_ids.to_csv('output/output_model_aoi_ids.csv')
model_aoi_summary.to_csv('output/output_model_aoi_summary.csv')
| classify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pet Adoption Speed - Classification
# +
# import the libraries
# %matplotlib inline
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
# sklearn :: utils
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
# sklearn :: models
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
# sklearn :: evaluation metrics
from sklearn.metrics import cohen_kappa_score
# convert scientific notation to decimals
pd.set_option('display.float_format', lambda x: '%.2f' % x)
sns.set_style('whitegrid')
# -
# ________________________
# # Load Data
pets_df = pd.read_csv('data/train.csv')
pets_test_df = pd.read_csv('data/test.csv')
pets_df.columns
pets_df.head()
state_df = pd.read_csv('data/state_labels.csv')
breed_df = pd.read_csv('data/breed_labels.csv')
color_df = pd.read_csv('data/color_labels.csv')
state_df.head()
breed_df.head()
color_df.head()
# ____________________________
# # Data Cleaning
# +
# Replace int numbers with meaningful strings.
def change_values(df):
yes_no_columns = ['Vaccinated', 'Dewormed', 'Sterilized']
df[yes_no_columns] = df[yes_no_columns].replace(to_replace = [1,2,3], value=['Yes', 'No', 'Not_Sure'])
df['Gender'] = df['Gender'].replace(to_replace = [1,2,3], value=['Male','Female','Mixed'])
df['Type'] = df['Type'].replace(to_replace = [1,2], value=['Dog', 'Cat'])
df['FurLength'] = df['FurLength'].replace(to_replace = [1,2,3,0], value=['Short','Medium','Long','Not_Specified'])
df['MaturitySize'] = df['MaturitySize'].replace(to_replace = [1,2,3,4,0],
value=['Small', 'Medium','Large','Extra_Large','Not_Specified'])
df['Health'] = df['Health'].replace(to_replace = [1,2,3,0], value=['Healthy', 'Minor_Injury', 'Serious_Injury','Not_Specified'])
# -
change_values(pets_df)
pets_df[['Vaccinated', 'Dewormed', 'Sterilized', 'Type', 'Gender', 'Health', 'MaturitySize', 'FurLength']].head(10)
change_values(pets_test_df)
pets_test_df[['Vaccinated', 'Dewormed', 'Sterilized', 'Type', 'Gender','Health', 'MaturitySize', 'FurLength']].head(10)
# _________________________
# # Merging
def merge_colors(pets, colors):
df_merge = pets.copy()
df_merge = pd.merge(pets, colors, left_on='Color1', right_on='ColorID', how='left')
df_merge.rename(columns={'ColorID':'Color1_ID', 'ColorName':'Color1_Name'}, inplace=True)
df_merge = pd.merge(df_merge, colors, left_on='Color2', right_on='ColorID', how='left')
df_merge.rename(columns={'ColorID':'Color2_ID', 'ColorName':'Color2_Name'}, inplace=True)
df_merge = pd.merge(df_merge, colors, left_on='Color3', right_on='ColorID', how='left')
df_merge.rename(columns={'ColorID':'Color3_ID', 'ColorName':'Color3_Name'}, inplace=True)
df_merge = df_merge.drop(columns = ['Color1', 'Color2', 'Color3'])
return df_merge
df = merge_colors(pets_df,color_df)
df_t = merge_colors(pets_test_df, color_df)
def merge_breed(pets, breeds):
breeds = breeds.drop(columns = 'Type')
df_merge = pets.copy()
df_merge = pd.merge(pets, breeds, left_on='Breed1', right_on='BreedID', how='left')
df_merge.rename(columns={'BreedID':'Main_Breed_ID', 'BreedName':'Main_Breed_Name'}, inplace=True)
df_merge = pd.merge(df_merge, breeds, left_on='Breed2', right_on='BreedID', how='left')
df_merge.rename(columns={'BreedID':'Second_Breed_ID', 'BreedName':'Second_Breed_Name'}, inplace=True)
df_merge = df_merge.drop(columns = ['Breed1', 'Breed2'])
return df_merge
df = merge_breed(df, breed_df)
df_t = merge_breed(df_t, breed_df)
def merge_state(pets, states):
df_merge = pets.copy()
df_merge = pd.merge(pets, states, left_on='State', right_on='StateID', how='left')
df_merge = df_merge.drop(columns = ['State'])
return df_merge
merged_df = merge_state(df, state_df)
merged_df_test = merge_state(df_t, state_df)
# _______________
# # Missing Values
merged_df.isnull().sum()
merged_df_test.isnull().sum()
# +
# Fill missing values in colors:
def colors_fill_mv(df):
# Put an ID = -1 --> No color
df[['Color2_ID', 'Color3_ID']] = df[['Color2_ID', 'Color3_ID']].fillna(-1)
df[['Color2_Name', 'Color3_Name']] = df[['Color2_Name', 'Color3_Name']].fillna('No_Color')
return df
# -
merged_df = colors_fill_mv(merged_df)
merged_df_test = colors_fill_mv(merged_df_test)
# +
# Fill missing values in breeds:
def breeds_fill_mv(df):
# Put an ID = -1 --> No Breed
df[['Main_Breed_ID', 'Second_Breed_ID']] = df[['Main_Breed_ID', 'Second_Breed_ID']].fillna(-1)
df[['Main_Breed_Name', 'Second_Breed_Name']] = df[['Main_Breed_Name', 'Second_Breed_Name']].fillna('No_Breed')
return df
# -
merged_df = breeds_fill_mv(merged_df)
merged_df_test = breeds_fill_mv(merged_df_test)
merged_df.isnull().sum()
# __________________
# # Feature Engineering
def name_columns(df):
#Create new feature, default has_name = True
df['has_name'] = True
for idx in df.index[df['Name'].isnull()]:
df.at[idx,'has_name'] = False
return df
newdf = name_columns(merged_df)
newdf_test = name_columns(merged_df_test)
def description_columns(df):
#Create new feature, default has_description = True
df['has_description'] = True
for idx in df.index[df['Description'].isnull()]:
df.at[idx,'has_description'] = False
return df
newdf = description_columns(newdf)
newdf_test = description_columns(newdf_test)
def name_letters(df):
df['letters_morethan2'] = True
for idx in df.index[df['has_name'] == True]:
if (len(df.loc[idx, 'Name']) <= 2):
df.at[idx,'letters_morethan2'] = False
return df
newdf = name_letters(newdf)
newdf_test = name_letters(newdf_test)
newdf[newdf['letters_morethan2'] == False].head()
# +
# get_dummies
def get_dum(df):
categorical = ['Gender','Sterilized','Vaccinated', 'Type','Dewormed', 'FurLength', 'MaturitySize', 'Health',
'Color1_Name', 'Color2_Name', 'Color3_Name', 'Main_Breed_Name', 'Second_Breed_Name', 'StateName']
df_dummies = pd.get_dummies(df[categorical])
new_df = pd.concat([df_dummies, df], axis =1)
return new_df, df_dummies
# -
newdf_dum, dummies = get_dum(newdf)
dummies.columns
newdf_test_dum, test_dummies = get_dum(newdf_test)
test_dummies.columns
# _____________
# # Train Models
# +
# select the columns
unique_dum = list(dummies.columns & test_dummies.columns)
X_columns = ['Age', 'Fee', 'Quantity', 'PhotoAmt','has_name','has_description','letters_morethan2'] + unique_dum
#X_columns = feature_imp[0].values
y_column = ['AdoptionSpeed']
# -
len(X_columns)
# +
# split the data using sklearn
df_train = newdf_dum.copy()
threshold = 0.8
X = df_train[X_columns]
y = df_train[y_column]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1.0-threshold, shuffle=True, random_state =5)
print('X_train', X_train.shape)
print('y_train', y_train.shape)
print('X_test', X_test.shape)
print('y_test', y_test.shape)
# +
# train a KNN Classifier
knn_model = KNeighborsClassifier()
knn_model.fit(X_train, y_train.values.ravel())
knn_pred = knn_model.predict(X_test)
# +
gnb_model = GaussianNB()
gnb_model.fit(X_train, y_train.values.ravel())
gnb_pred = gnb_model.predict(X_test)
# +
rf_model = RandomForestClassifier(150)
rf_model.fit(X_train, y_train.values.ravel())
rf_pred = rf_model.predict(X_test)
# +
gb_model = GradientBoostingClassifier(n_estimators=150)
gb_model.fit(X_train, y_train.values.ravel())
gb_pred = gb_model.predict(X_test)
# -
# _____________
# # Model Evaluation
knn_kappa = cohen_kappa_score(y_test, knn_pred, weights ='quadratic')
print('kappa', round(knn_kappa, 4))
print(confusion_matrix(y_test, knn_pred))
gnb_kappa = cohen_kappa_score(y_test, gnb_pred, weights ='quadratic')
print('kappa', round(gnb_kappa, 4))
print(confusion_matrix(y_test, gnb_pred))
rf_kappa = cohen_kappa_score(y_test, rf_pred, weights ='quadratic')
print('kappa', round(rf_kappa, 4))
print(confusion_matrix(y_test, rf_pred))
gb_kappa = cohen_kappa_score(y_test, gb_pred, weights ='quadratic')
print('kappa', round(gb_kappa, 4))
print(confusion_matrix(y_test, gb_pred))
# +
# Cross Validation
def cv(model):
k = 7
results = []
kf = KFold(n_splits=k)
for train_index, test_index in kf.split(X):
X_train, X_test = X.values[train_index], X.values[test_index]
y_train, y_test = y.values[train_index], y.values[test_index]
model.fit(X_train, y_train.ravel())
y_pred = model.predict(X_test)
kappa = cohen_kappa_score(y_test, y_pred, weights ='quadratic')
results.append(round(kappa, 4))
print('Kappa for each fold:', results)
print('AVG(kappa)', round(np.mean(results), 4))
print('STD(kappa)', round(np.std(results), 4))
# -
cv(knn_model)
cv(rf_model)
cv(gnb_model)
cv(gb_model)
# +
model = gb_model
fi = []
for i, col in enumerate(X_test.columns):
fi.append([col, model.feature_importances_[i]])
feature_imp = pd.DataFrame(fi).sort_values(1, ascending=False)
feature_imp
# -
feature_imp = feature_imp[feature_imp[1] >= 0.002]
feature_imp
# __________________
# # Submission
# +
X_train = df_train[X_columns]
y_train = df_train[y_column]
df_prediction = newdf_test_dum[X_columns]
# +
rf2_model = RandomForestClassifier(150)
rf2_model.fit(X_train, y_train.values.ravel())
newdf_test['AdoptionSpeed'] = rf2_model.predict(df_prediction)
# +
gb2_model = GradientBoostingClassifier(n_estimators=200)
gb2_model.fit(X_train, y_train.values.ravel())
newdf_test['AdoptionSpeed'] = gb2_model.predict(df_prediction)
# -
newdf_test[['PetID', 'AdoptionSpeed']].to_csv('submission_v8.csv', index=False)
| Pet Adoption.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2djxYuulyWv3" colab_type="text"
# <NAME>, <NAME>, <NAME>
# + [markdown] id="90SgMrMmyWv-" colab_type="text"
# ## Framing the Problem
#
# We are asked to determine the natural log of the sum of all transactions per user. Presumably, we will be provided a data set with a number of features and we will use those features to predict how much money a user will spend on the site. Then G Stores will use the analysis to develop a marketing strategy and optimize its sales.
#
# ### Problem Details
#
# Moreover, this is a supervised learning problem as we will be analyzing labeled data. Specifically, this is a regression problem since we are asked to calculate a value from a set of labeled data. There does not seem to be a need to analyzea continuous flow of data. The data will come in a fixed size file and therefore this should be a batch regression task.
#
# ### Error Metric
#
# <h3><center>$ RMSE = \sqrt{\frac{1}{m}\sum_{n=1}^{m}[h(x{(i)}-y{(i)}]^2 }$</center></h3>
#
# ***
#
# + [markdown] id="pj8oluSUyWwC" colab_type="text"
# ## The Data
#
# First we will import some libraries in anticipation of our requirements.
# + id="rPpKsOq7yWwG" colab_type="code" colab={}
import pandas as pd # Pandas Library for Data Analysis
import numpy as np # Numerical Python library for Matrix Manipulation
# + id="S0c3SzxLyWwM" colab_type="code" colab={}
# Code to read csv file into Colaboratory:
# !pip install -U -q PyDrive #Uses 'pip installer' to install PyDrive, a package for working with Google Drive.
#The next four lines import several commands from pydrive, this will allows us to log into and read files from google drive into the program
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
link = 'https://drive.google.com/open?id=1DwWjoqzMr61s3DaoiCB2cDm1iBrs5s8z' #Shareable link (Externally Provided)
fluff, id = link.split('=')
# print (id) # Debugging line
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('Filename.csv')
train_set = pd.read_csv('Filename.csv') #loads data into a pandas DataFrame
link = 'https://drive.google.com/open?id=1MtQGaO0otJ5ykJg64KPLFaOOu3aUURHd' #Shareable link(Externally Provided)
fluff, id = link.split('=')
# print (id)
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('Filename.csv')
test_set = pd.read_csv('Filename.csv') #loads data into a pandas DataFrame
#test_set.info()
# + [markdown] id="uOe4ro1RyWwU" colab_type="text"
# ## What does the data look like?
#
# Pandas provides several methods to visualize the data, in this section we will use the info(), head(), describe() and plot() methods to get an understanding of how the data is distributed, what data types are included and what values are missing if any.
#
# + [markdown] id="IX571gk4RQ0i" colab_type="text"
# ### head()
# We use the .head() method to visualize the top 5 rows of the data frame along all the columns.
# + id="iGDHhNnVyWwW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="da6a4d1f-6fca-483f-f066-9ff856229ff2"
train_set.head()
# + [markdown] id="V4sh6teKQBxw" colab_type="text"
# It is clear that a lot of columns are missing information that allegedely was included in a different version of the data set. The following code will remove those columns.
# + id="IulMbNAEyWwa" colab_type="code" colab={}
# This data is very noise and needs to be cleaned up removing a bunch of columns whose data is not included in demo dataset
list_na = ['device.browserSize', 'device.browserVersion', 'device.flashVersion', 'device.language', 'device.mobileDeviceBranding', 'device.mobileDeviceInfo',
'device.mobileDeviceMarketingName', 'device.mobileDeviceModel', 'device.mobileInputSelector', 'device.operatingSystemVersion', 'device.screenColors',
'device.screenResolution', 'geoNetwork.cityId', 'geoNetwork.latitude', 'geoNetwork.longitude', 'geoNetwork.metro', 'geoNetwork.networkLocation', 'geoNetwork.region',
'trafficSource.adwordsClickInfo.criteriaParameters','geoNetwork.networkDomain', 'device.operatingSystem',] #creates a list of column names to be removed (.drop())
train_set = train_set.drop(list_na, axis = 1) # Drops empty columns from train_set
test_set = test_set.drop(list_na, axis = 1) # Drops empty columns from test_set
# + [markdown] id="h9qneSZaQ5ug" colab_type="text"
# ### info()
# We use the info() method to get a quick description of the data,
# for example the total number of roads and each attribute's type
# and number of non-null values.
# + id="KbjRVwzQRpJX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 739} outputId="523fddb2-438b-4577-d56c-81defba24463"
train_set.info()
# + [markdown] id="3_sZzfnIR7jQ" colab_type="text"
# ### fillna()
# We need to replace null values from transaction totals columns with zero (0). We choose zero (0) versus other values because impting the median for instance would introduce bias into the algorithm. In addition, notice that there are a lot more null data points from transaction totals (-20000 null to 639 non null.) If we chose to only use those points for which there are transaction totals, we would have very little data to make the program useful. For this project we will assume that those data points for which there is no transacitno totals, are customers who in fact browsed the online catalog but did not make a purchase. This hypothesis is supported by the total.Transactions column.
# We will use the fillna() method to replace NaN values with zeros.
# + id="-OvtOMB3QkyN" colab_type="code" colab={}
train_set['totals.transactionRevenue'].fillna(0, inplace=True)
train_set['totals.totalTransactionRevenue'].fillna(0, inplace=True)
train_set['totals.transactions'].fillna(0, inplace=True)
test_set['totals.totalTransactionRevenue'].fillna(0, inplace=True)
test_set['totals.transactionRevenue'].fillna(0, inplace=True)
test_set['totals.transactions'].fillna(0, inplace=True)
# + [markdown] id="_X5H_WsExARu" colab_type="text"
# ### describe()
#
# The describe method provides a basic statistical summary. It calculate basic descriptive statistics such as mean, variance, min-max and quartile summaries.
# + id="V0eZoXg3yWwd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="0ac0cacb-c856-4494-903d-408570067a8f"
train_set.describe()
# + [markdown] id="GaCEZwYWxjWQ" colab_type="text"
# ### Preliminary Data Cleaning
#
# Columns totals.bounces and totals.newVisits, totals.visits do not provide any meaningful information, colum trafficSource.adwordsClickinfo.page is ddificult to interpret. We will remove the entirety of these columns as we are unsure what the underlying principles are or how to interpret them.
# + id="WopKEC0fujFQ" colab_type="code" colab={}
list_drop = ['totals.bounces', 'totals.newVisits', 'totals.visits', 'trafficSource.adwordsClickInfo.page','trafficSource.adwordsClickInfo.adNetworkType',
'trafficSource.adwordsClickInfo.gclId', 'trafficSource.adContent', 'trafficSource.adwordsClickInfo.slot', 'trafficSource.keyword', 'trafficSource.referralPath']
train_set = train_set.drop(list_drop, axis=1)
test_set = test_set.drop(list_drop, axis=1)
# + [markdown] id="r2S68DNPUCm9" colab_type="text"
# # Visualizing The Data
#
# ### Scatter Plots
#
# Let's Review what the data looks like using the matplot Python library.
# + id="mhUXAA4uyWwh" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
# + colab_type="code" id="RujNiMHoyLe8" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="8af9d198-9892-46b3-9eca-dd0426ac7608"
train_set.head() # What does the data look like after removing some columns and replacing Nan values.
# + [markdown] id="ssc-UpMQVTrW" colab_type="text"
# Let's use some exploratory data anlysis with totals.hits vs totals.transactionRevenue
# + id="98pdmsxdUbE8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="98982420-1e96-44bb-f78c-b30d83a8ab62"
ax1 = train_set.plot(kind='scatter', x='totals.hits',y='totals.transactionRevenue', legend=True, color='magenta', alpha=0.75)
ax2 = test_set.plot(kind='scatter', x='totals.hits',y='totals.transactionRevenue', legend=True, color='green', alpha=0.75, ax=ax1)
# + id="oDSh9WOLWMy5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 452} outputId="3147e252-fac0-4511-b634-0bb60438a5de"
train_set.corr() # Tentative Correlation matrix
# + [markdown] id="hp6Iep6hXOcj" colab_type="text"
# Although relationships aren't too strong, some candidates arise such as page views and hits as it is to be expected.
# + id="lxRETyV6WdWn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 790} outputId="8bef31ad-4b40-4fb2-f0f1-acdc9e02d963"
from pandas.plotting import scatter_matrix #imports a scatter_matrix tool to visualize relationships across multiple features.
attributes = ['totals.hits', 'totals.pageviews', 'totals.transactionRevenue', 'totals.totalTransactionRevenue'] # Selects attributes to be plotted on scatter_diagram
scatter_matrix(train_set[attributes], figsize=(12, 8)) # Plots a scatter diagram selecting the attribute list from the train_set DataFrame.
# + [markdown] id="0lcXVZE7zXI8" colab_type="text"
# ### Visualizing Candidates
# From the scatter_matrix, total pageviews came out as a good candidate to predict total revenue. Let's take a closer look using the DataFrame.plot() feature.
# + id="kxgE1QyUYGUw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="be418b7d-bd81-4ceb-d91e-a3501e802529"
#train_set.plot(kind='scatter', x='totals.pageviews', y='totals.totalTransactionRevenue', alpha=0.75)
ax3 = train_set.plot(kind='scatter', x='totals.pageviews',y='totals.totalTransactionRevenue', legend=True, color='purple', alpha=0.75)
ax4 = test_set.plot(kind='scatter', x='totals.pageviews',y='totals.totalTransactionRevenue', legend=True, color='cyan', alpha=0.75, ax=ax3)
# + [markdown] id="wybfK1Fwa1G4" colab_type="text"
# # What about attribute combinations?
#
# It is possible that combinations of multiple attributes is a better predictor than single attributes. For example, hits per visit may be more insightful than just number of hits or number of visits.
# + id="Lhm3-c5GbYep" colab_type="code" colab={}
# The following four lines of code perform an element wise division of columns total.hits and visitNumber, and totals.hits and totals.pageviews on both the train_set and the test_Set
train_set['hits_per_visit'] = train_set['totals.hits']/train_set['visitNumber']
train_set['hits_per_views'] = train_set['totals.hits']/train_set['totals.pageviews']
test_set['hits_per_visit'] = test_set['totals.hits']/test_set['visitNumber']
test_set['hits_per_views'] = test_set['totals.hits']/test_set['totals.pageviews']
# + id="xbC_v9iwWJLI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="97aaee6a-8c10-4b7b-f4e7-b16e4fe8f18b"
corr_matrix = train_set.corr() # Crates a matrix showing the correlation amongst all variables in the DataFrame
corr_matrix['totals.transactionRevenue'].sort_values(ascending=True) # Sorts the columns in ascending order of Correlation
# + [markdown] id="h8mdF9Ig6ncp" colab_type="text"
# ### total.pageviews
#
# From the table above, appears to be a promising attribute to predict total revenue. Let's use the plot() function to visualize the relationship.
# + id="63ZCeaGeWfFo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 307} outputId="3c75d329-0959-4389-b004-ed3ce2566b4a"
train_set.plot(kind='scatter', x='totals.pageviews', y='totals.totalTransactionRevenue',
alpha=0.75) #Scatter plot
# + [markdown] id="PJCc64Xq7gKS" colab_type="text"
# Finally, let us use the describe() method one last time to ensure we've removed Nan values and Unecessary Columns.
# + id="fXxO8AmLXw9m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="c0760b65-2fc4-4929-d3dc-c7cef18902f9"
test_set.describe()
# + [markdown] id="wJUS-wd30ROH" colab_type="text"
# # Preparing the data for Machine Learning Algorithms
# + [markdown] id="spT9uEX48tgW" colab_type="text"
# Since this is a supervised learning problem, we will create copies of our data sets. In one copy, we will remove the feature 'labels' from the data set and in the other one, we will simply copy all those labels.
# + id="OuA-0MYH0P3k" colab_type="code" colab={}
google = train_set.drop('totals.transactionRevenue', axis=1) # Drops labels from training set and saves the new DataFrame as google
google_labels = train_set['totals.transactionRevenue'].copy() # Creates a DataFrame containing the feature labels as google_labels
# + [markdown] id="9OC5TYVFImcU" colab_type="text"
# # Handling Qualitative Data
#
# As discussed previously, this is a regression problem and so it takes numerical data as input, and outputs a number as well. However, from our previous use of the info() method, we know that multiple of these attributes are qualitative and so we will use Sci-kit learn tools as well as self defined tools to convert theses labels into numbers, specifically, label encoders.
#
# ### Qualitative Data Tooling Prep
# + id="9oGsFiDepaC4" colab_type="code" colab={}
from sklearn.impute import SimpleImputer
# This Scikit-Learn tool is useful for replacing values in a set. For example,
# we can use SimpleImputer to calculate the median of set of numbers and use it
# to replace Nan values in that same set of numbers.
imputer = SimpleImputer(strategy='median')
#imputer.fit(train_set)
#imputer.statistics_
# + id="7CFgPPhhIufd" colab_type="code" colab={}
from sklearn.preprocessing import OneHotEncoder # This SKLEARN tool converts qualitative text data into numerical values.
# it assigns a unique numerical value per each unique text value in a set of objects. Specifically, OneHotEncoder, assigns 'One' to a specific text value and 'zero' to
# any OTHER value. It is essentially a binary classifier.
OneHotEncoder() # Initializes the Encoder
from sklearn.preprocessing import LabelEncoder # This SKLEARN tool converts qualitative text data into numerical values.
# it assigns a unique numerical value per each unique text value in a set of objects. It is a non binary classifier.
encode = LabelEncoder() # Initializes the Encoder as 'encode'
# + id="ctO2egCyKDua" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="70697a5d-d600-44e0-d176-0b8534df2b44"
def cat_encoder(df,cat_string):
# This is a user defined function. It uses the LabelEncoder from sklearn. It takes as an input a dataframe 'df' and a column name string as 'cat_string'.
# The function selects the column from the dataframe and saves it as 'cat' for category.
cat = df[cat_string]
# Then the function sends 'cat' to the SKLEARN LabelEncoder named 'encode' and returns the output as 'Encoded'
Encoded = encode.fit_transform(cat)
# The Encoder replaces text data with numerical identifiers per each unique text attribute.
# The function then prints a key of all the tex values it has replaced.
print(encode.classes_)
# Finally, the orignial column is replaced in the dataframe by the encoded version of itself.
df[cat_string] = Encoded
return Encoded
# The next commands will iterate over each colun name in the data set,
# for each column, it will determine whether the data in it is qualitative,
# if it is, it will call upon the encoder function defined above and replace text
# values in that column with numerical attributes.
# ***Note that test_set and train_set are assumed to have the same structure
# and thus by identifying text columns in train_set,
# we can tell they are also text columns in test_set.***
# For columns which are not 'object' type, the iterator will calculate the median
# of each column and use it to replace any Nan values in it.
# Again this is done on both the test_set and train_set
for value in train_set.columns:
#print(train_set[value].dtype) # Column type # Debugging
#print(value) # Debugging
if train_set[value].dtype == 'object': # is it object?
cat_encoder(train_set, value) # Encode train_set
cat_encoder(test_set, value) # Encode test_set
else:
median = train_set[value].median() # Calculates median of each column
train_set[value].fillna(median, inplace=True) # replaces Nan with median.
test_set[value].fillna(median, inplace=True) # replaces Nan with median.
# + [markdown] id="w35ZJyBMKBek" colab_type="text"
# ### Method for handling qualitative data.
#
# We had three options for dealing with the qualitative and missing data in the data set, we could remove it, replace it with zeros or replace it with the medians.
#
# We considered that factors such as device, browser, location, and social engagement types could have a significant effect on the purchases made by visitors. For example, it can be expected that people visiting from NYC will have a higher purchasing power that visitors from third world or less developed countries. For that reason, we decided to convert all qualitative data of this kind to numerical identifiers so they could be inputed into a supervised learning algorithm, specifically regression.
#
# Let's review the data one last time now that we have performed siginificant data preparation steps.
# + id="LtDGGr98KO1H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="5de2bc74-1698-4a16-812e-5037bdc84634"
train_set.describe()
# + [markdown] id="xrWSp8DnLxJ5" colab_type="text"
# ### Data Scaling
#
# Mathematical Regression usually does NOT perform well when the data is on very different scales. For example, trying to predict billions of dollars from two variables, one in millions of dollars and the other one in cents will cause significant prediciton error in the millions of dollars.
#
# We will use the built in StandarScaler from SKLEARN to perform some basic data scaling
# + id="LuJpsXskvtyQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="0f63b01c-505b-4853-af3a-e42bc4ab1757"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit_transform(test_set)
scaler.fit_transform(train_set)
# + [markdown] id="cLrqSj5rMWUw" colab_type="text"
# ## Final Review
#
# Now that the data is cleaned and prepared, let us take one last look to make sure we have a consistent number of attributes and data tuples.
# + id="Zufq-JhJpb40" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 605} outputId="cdfaa793-eee5-49b3-934b-e2b4cde54a75"
train_set.info()
# + id="5DPTlGg8Cp_2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 605} outputId="ae373d9e-d32f-406b-b483-d4c73394fee0"
test_set.info()
# + [markdown] id="rcIQ3D_LhPYv" colab_type="text"
# # Regression
#
# + id="9x-UJhTNhRU4" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(train_set, google_labels)
some_data = test_set.iloc[:5]
some_labels = google_labels.iloc[:5]
some_data_prepared = some_data
# + id="y3CzUMpw001P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a757abad-d56d-4b9a-da4c-1bec9c083ac0"
print('Predictions:', lin_reg.predict(some_data_prepared))
# + id="vO5psl-uDGMq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="015f90cb-562a-419d-863f-4527227dd110"
print('Labels:', list(some_labels))
# + id="KCqzcYW_Dbtg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="864647e4-c1fb-42d4-a49f-35be9f3bc271"
from sklearn.metrics import mean_squared_error
google_test = test_set.drop('totals.transactionRevenue', axis=1) #Drops labels from training set
google_test_labels = test_set['totals.transactionRevenue'].copy()
google_predictions = (lin_reg.predict(test_set))
lin_mse = mean_squared_error(google_test_labels, google_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
# + [markdown] id="ZzKHjHuSM1k8" colab_type="text"
# # Discussion
#
# We obtained an error of 0.08% one possible reason is that our labels for the training set were heavily biased. Recall that the totals.transactionRevenue column was disporportinoately biased to Nan values. There were about 20,000 Nan values to 193 non null values. We recall that we replaced these value with zero and so it makes sense that our model is hevily biased towards zero and since most of the test_set is similarly distributed, we have developed an algorithm very accurate at predicting zero revenue from a mostly zero database. There are steps that can be taken to reduce the accuracy of our model to make it more reasonable. However, based on this specific data set, no model developed by the data set can be realistic based on the nature of the original data.
# Anothe option could have been to eliminate Nan values from the transaction column, but that would leave us with 193 data points which isn't useful in a Big-Data application.
#
# The recommendaiton is that we use a distributed computating platform such as Hadoop Spark to perform the analysis on the entire original data set.
# + id="eGMaiBJvM5QX" colab_type="code" colab={}
| Google_Analytics_Customer_Revenue_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PCA & Clustering
#
# The goal of this code is to input activity time-series data from a neural recording and cluster the cells/rois (samples) based on the neural activity (features). Clustering is performed on trial-averaged event-related responses; data from different trial conditions are concatenated and fed into dimensionality reduction (PCA) and finally into multiple clustering algorithms. The optimal hyperparameters for PCA and clustering methods are automatically determined based on the best silhouette score.
#
# 1) PCA to reduce dimensionality of trial-averaged event-related responses (rois x time), with respect to time dimension. Intuitive concept: PCA is performed on the time dimension (each time point is treated as a feature/variable. That means the resulting principal components (PCs) are linear combinations of the original time points. The first PCs represent each ROI's datapoint resides in n dimensional space where n is the number of samples in the event-related window. PCA finds new set of (orthogonal) axes that maximizes the variance in the activity. These new axes are linear combinations of the original axes
#
#
# 2) Clustering: The roi data are now characterized by a reduced set of optimized axes describing time. We now cluster using either kMeans clustering or spectral clustering.
#
# 1. KMeans clustering: Assuming data clouds are gaussian. The three main steps of kMeans clustering are **A)** Initialize the K value, **B)** Calculate the distance between test input and K trained nearest neighbors, **C)** Return class category by taking the majority of votes
#
# 2. Spectral clustering: Not assuming any particular shape of the cluster data points. The three main steps of spectral clustering are **A)** create graph theory similarity matrix for each ROI based on how close other ROIs are in the PCA space, **B)** perform eigendecomposition of the similarity matrix, **C)** Use kmeans clustering on the transformed data.
#
# Prerequisites
# ------------------------------------
#
# All data should reside in a parent folder. This folder's name should be the name of the session and ideally be the same as the base name of the recording file.
#
# Data need to be run through the NAPECA event_rel_analysis code in order to generate the event_data_dict.pkl file, which contains event-related activity across different behavioral conditions for all neurons/ROIs.
#
#
# How to run this code
# ------------------------------------
#
# In this jupyter notebook, just run all cells in order (shift + enter).
#
# __You can indicate specific files and parameters to include in the second cell__
#
# Required Packages
# -----------------
# Python 3.7, seaborn, matplotlib, pandas, scikit-learn, statsmodels, numpy, h5py
#
# Custom code requirements: utils
#
# Parameters
# ----------
#
# fname_signal : string
#
# Name of file that contains roi activity traces. Must include full file name with extension. Accepted file types: .npy, .csv. IMPORTANT: data dimensions should be rois (y) by samples/time (x)
#
# fname_events : string
#
# Name of file that contains event occurrences. Must include full file name with extension. Accepted file types: .pkl, .csv. Pickle (pkl) files need to contain a dictionary where keys are the condition names and the values are lists containing samples/frames for each corresponding event. Csv's should have two columns (event condition, sample). The first row are the column names. Subsequent rows contain each trial's event condition and sample in tidy format. See example in sample_data folder for formatting, or this link: https://github.com/zhounapeuw/NAPE_imaging_postprocess/raw/main/docs/_images/napeca_post_event_csv_format.png
#
# fdir : string
#
# Root file directory containing the raw tif, tiff, h5 files. Note: leave off the last backslash. For example: ../napeca_post/sample_data if clone the repo directly
#
# trial_start_end : list of two entries
#
# Entries can be ints or floats. The first entry is the time in seconds relative to the event/ttl onset for the start of the event analysis window (negative if before the event/ttl onset. The second entry is the time in seconds for the end of the event analysis window. For example if the desired analysis window is 5.5 seconds before event onset and 8 seconds after, `trial_start_end` would be [-5.5, 8].
#
# baseline_end : int/float
#
# Time in seconds for the end of the baseline epoch. By default, the baseline epoch start time will be the first entry ot `trial_start_end`. This baseline epoch is used for calculating baseline normalization metrics.
#
# event_sort_analysis_win : list with two float entries
#
# Time window [a, b] in seconds during which some visualization calculations will apply to. For example, if the user sets flag_sort_rois to be True, ROIs in heatmaps will be sorted based on the mean activity in the time window between a and b.
#
# pca_num_pc_method : 0 or 1 (int)
#
# Method for calculating number of principal components to retain from PCA preprocessing. 0 for bend in scree plot, 1 for num PCs that account for 90% variance.
# User should try either method and observe which result fits the experiment. Sometimes may not impact the results.
#
# flag_save_figs : boolean
#
# Set as True to save figures as JPG and vectorized formats.
#
# selected_conditions : list of strings
#
# Specific conditions that the user wants to analyze; needs to be exactly the name of conditions in the events CSV or pickle file
#
# flag_plot_reward_line: boolean
#
# If set to True, plot a vertical line for secondary event. Time of vertical line is dictated by the variable second_event_seconds
#
# second_event_seconds: int/float
#
# Time in seconds (relative to primary event onset) for plotting a vertical dotted line indicating an optional second event occurrence
#
# max_n_clusters : integer
#
# Maximum number of clusters expected for clustering models. As general rule, select this number based on maximum expected number of clusters in the data + ~5. Keep in mind that larger values will increase processing time
#
# possible_n_nearest_neighbors : array of integers
#
# In spectral clustering, set n_neighbors to n from the range of possible_n_nearest_neighbors for each data point and create connectivity graph (affinity matrix).
# +
import pickle
import math
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR, LinearSVC
from sklearn.metrics import accuracy_score, silhouette_score, adjusted_rand_score, silhouette_samples
from sklearn.cluster import AgglomerativeClustering, SpectralClustering, KMeans
from sklearn.model_selection import KFold, LeaveOneOut, train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
from sklearn import linear_model
from sklearn.manifold import TSNE
import scipy.stats as stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
from patsy import (ModelDesc, EvalEnvironment, Term, EvalFactor, LookupFactor, dmatrices, INTERCEPT)
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
import sys
import os
import re
import glob
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import json
import utils
# +
"""
USER-DEFINED VARIABLES
"""
fname_signal = 'VJ_OFCVTA_7_260_D6_neuropil_corrected_signals_15_50_beta_0.8.npy' # name of your npy or csv file that contains activity signals
fname_events = 'event_times_VJ_OFCVTA_7_260_D6_trained.csv' # name of your pickle or csv file that contains behavioral event times (in seconds)
# fdir signifies to the root path of the data. Currently, the abspath phrase points to sample data from the repo.
# To specify a path that is on your local computer, use this string format: r'your_root_path', where you should copy/paste
# your path between the single quotes (important to keep the r to render as a complete raw string). See example below:
# r'C:\Users\stuberadmin\Documents\GitHub\NAPE_imaging_postprocess\napeca_post\sample_data'
fdir = os.path.abspath("../napeca_post/sample_data/VJ_OFCVTA_7_260_D6") # for an explicit path, eg. r'C:\2pData\Vijay data\VJ_OFCVTA_7_D8_trained'
fs = 5 # sampling rate of activity data
# trial extraction info
trial_start_end = [-2, 8] # trial [start, end] times (in seconds); centered on event onset
baseline_end = -0.2 # baseline epoch end time (in seconds) for performing baseline normalization; I set this to -0.2 to be safe I'm not grabbing a sample that includes the event
event_sort_analysis_win = [0, 5] # time window (in seconds)
pca_num_pc_method = 0 # 0 for bend in scree plot, 1 for num PCs that account for 90% variance
# variables for clustering
max_n_clusters = 10 # from Vijay: Maximum number of clusters expected. This should be based on the number of functional neuron groups you expect + ~3. In your data,
# might be worth increasing this, but it will take more time to run.
'''In spectral clustering: get n nearest neighbors for each data point and create connectivity graph (affinity matrix)'''
possible_n_nearest_neighbors = np.arange(1, 10) #np.array([3,5,10]) # This should be selected for each dataset
# appropriately. When 4813 neurons are present, the above number of nearest neighbors ([30,40,30,50,60]) provides a good sweep of the
# parameter space. But it will need to be changed for other data.
# optional arguments
selected_conditions = None # set to a list of strings if you want to filter specific conditions to analyze
flag_plot_reward_line = False # if there's a second event that happens after the main event, it can be indicated if set to True; timing is dictated by the next variables below
second_event_seconds = 1 # time in seconds
flag_save_figs = False # set to true if you want to save plots
# set to True if the data you are loading in already has data from different conditions concatenated together
group_data = False
group_data_conditions = ['cs_plus', 'cs_minus']
# +
# declare paths and names
fname = os.path.split(fdir)[1]
signals_fpath = os.path.join(fdir, fname_signal)
save_dir = os.path.join(fdir, 'event_rel_analysis')
signals = utils.load_signals(signals_fpath)
trial_start_end_sec = np.array(trial_start_end) # trial windowing in seconds relative to ttl-onset/trial-onset, in seconds
baseline_start_end_sec = np.array([trial_start_end_sec[0], baseline_end])
baseline_begEnd_samp = baseline_start_end_sec*fs
baseline_svec = (np.arange(baseline_begEnd_samp[0], baseline_begEnd_samp[1] + 1, 1) -
baseline_begEnd_samp[0]).astype('int')
if group_data:
conditions = group_data_conditions
if selected_conditions:
conditions = selected_conditions
num_conditions = len(conditions)
populationdata = np.squeeze(np.apply_along_axis(utils.zscore_, -1, signals, baseline_svec))
num_samples_trial = int(populationdata.shape[-1]/len(group_data_conditions))
tvec = np.round(np.linspace(trial_start_end_sec[0], trial_start_end_sec[1], num_samples_trial), 2)
else:
events_file_path = os.path.join(fdir, fname_events)
glob_event_files = glob.glob(events_file_path) # look for a file in specified directory
if not glob_event_files:
print(f'{events_file_path} not detected. Please check if path is correct.')
if 'csv' in glob_event_files[0]:
event_times = utils.df_to_dict(glob_event_files[0])
elif 'pkl' in glob_event_files[0]:
event_times = pickle.load( open( glob_event_files[0], "rb" ), fix_imports=True, encoding='latin1' ) # latin1 b/c original pickle made in python 2
event_frames = utils.dict_samples_to_time(event_times, fs)
# identify conditions to analyze
all_conditions = event_frames.keys()
conditions = [ condition for condition in all_conditions if len(event_frames[condition]) > 0 ] # keep conditions that have events
conditions.sort()
if selected_conditions:
conditions = selected_conditions
num_conditions = len(conditions)
### define trial timing
# convert times to samples and get sample vector for the trial
trial_begEnd_samp = trial_start_end_sec*fs # turn trial start/end times to samples
trial_svec = np.arange(trial_begEnd_samp[0], trial_begEnd_samp[1])
# calculate time vector for plot x axes
num_samples_trial = len( trial_svec )
tvec = np.round(np.linspace(trial_start_end_sec[0], trial_start_end_sec[1], num_samples_trial+1), 2)
"""
MAIN data processing function to extract event-centered data
extract and save trial data,
saved data are in the event_rel_analysis subfolder, a pickle file that contains the extracted trial data
"""
data_dict = utils.extract_trial_data(signals, tvec, trial_begEnd_samp, event_frames,
conditions, baseline_start_end_samp = baseline_begEnd_samp, save_dir=None)
#### concatenate data across trial conditions
# concatenates data across trials in the time axis; populationdata dimentionss are ROI by time (trials are appended)
populationdata = np.concatenate([data_dict[condition]['ztrial_avg_data'] for condition in conditions], axis=1)
# remove rows with nan values
nan_rows = np.unique(np.where(np.isnan(populationdata))[0])
if nan_rows.size != 0:
populationdata = np.delete(populationdata, obj=nan_rows, axis=0)
print('Some ROIs contain nan in tseries!')
cmax = np.nanmax(np.abs([np.nanmin(populationdata), np.nanmax(populationdata)])) # Maximum colormap value.
# +
def standardize_plot_graphics(ax):
"""
Standardize plots
"""
[i.set_linewidth(0.5) for i in ax.spines.itervalues()] # change the width of spines for both axis
ax.spines['right'].set_visible(False) # remove top the right axis
ax.spines['top'].set_visible(False)
return ax
def fit_regression(x, y):
"""
Fit a linear regression with ordinary least squares
"""
lm = sm.OLS(y, sm.add_constant(x)).fit() # add a column of 1s for intercept before fitting
x_range = sm.add_constant(np.array([x.min(), x.max()]))
x_range_pred = lm.predict(x_range)
return lm.pvalues[1], lm.params[1], x_range[:,1], x_range_pred, lm.rsquared
def CDFplot(x, ax, **kwargs):
"""
Create a cumulative distribution function (CDF) plot
"""
x = np.array(x)
ix= np.argsort(x)
ax.plot(x[ix], ECDF(x)(x)[ix], **kwargs)
return ax
def fit_regression_and_plot(x, y, ax, plot_label='', color='k', linecolor='r', markersize=3,
show_pval=True):
"""
Fit a linear regression model with ordinary least squares and visualize the results
"""
#linetype is a string like 'bo'
pvalue, slope, temp, temppred, R2 = fit_regression(x, y)
if show_pval:
plot_label = '%s p=%.2e\nr=%.3f'% (plot_label, pvalue, np.sign(slope)*np.sqrt(R2))
else:
plot_label = '%s r=%.3f'% (plot_label, np.sign(slope)*np.sqrt(R2))
ax.scatter(x, y, color=color, label=plot_label, s=markersize)
ax.plot(temp, temppred, color=linecolor)
return ax, slope, pvalue, R2
def make_silhouette_plot(X, cluster_labels):
"""
Create silhouette plot for the clusters
"""
n_clusters = len(set(cluster_labels))
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(4, 4)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax.set_xlim([-0.4, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax.set_ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels, metric='cosine')
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels, metric='cosine')
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = colors_for_cluster[i]
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.9)
# Label the silhouette plots with their cluster numbers at the middle
ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i+1))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax.set_title("The silhouette plot for the various clusters.")
ax.set_xlabel("The silhouette coefficient values")
ax.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax.axvline(x=silhouette_avg, color="red", linestyle="--")
ax.set_yticks([]) # Clear the yaxis labels / ticks
ax.set_xticks([-0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
# +
# variables for plotting
# calculated variables
window_size = int(populationdata.shape[1]/num_conditions) # Total number of frames in a trial window; needed to split processed concatenated data
sortwindow_frames = [int(np.round(time*fs)) for time in event_sort_analysis_win] # Sort responses between first lick and 10 seconds.
sortresponse = np.argsort(np.mean(populationdata[:,sortwindow_frames[0]:sortwindow_frames[1]], axis=1))[::-1]
# sortresponse corresponds to an ordering of the neurons based on their average response in the sortwindow
# +
fig, axs = plt.subplots(2,num_conditions,figsize=(3*2,3*2), sharex='all', sharey='row')
# loop through conditions and plot heatmaps of trial-avged activity
for t in range(num_conditions):
if num_conditions == 1:
ax = axs[0]
else:
ax = axs[0,t]
plot_extent = [tvec[0], tvec[-1], populationdata.shape[0], 0 ] # set plot limits as [time_start, time_end, num_rois, 0]
im = utils.subplot_heatmap(ax, ' ', populationdata[sortresponse, t*window_size: (t+1)*window_size],
clims = [-cmax, cmax], extent_=plot_extent)
ax.set_title(conditions[t])
ax.axvline(0, linestyle='--', color='k', linewidth=0.5)
if flag_plot_reward_line:
ax.axvline(second_event_seconds, linestyle='--', color='k', linewidth=0.5)
### roi-avg tseries
if num_conditions == 1:
ax = axs[1]
else:
ax = axs[1,t]
mean_ts = np.mean(populationdata[sortresponse, t*window_size:(t+1)*window_size], axis=0)
stderr_ts = np.std(populationdata[sortresponse, t*window_size:(t+1)*window_size], axis=0)/np.sqrt(populationdata.shape[0])
ax.plot(tvec, mean_ts)
shade = ax.fill_between(tvec, mean_ts - stderr_ts, mean_ts + stderr_ts, alpha=0.2) # this plots the shaded error bar
ax.axvline(0, linestyle='--', color='k', linewidth=0.5)
if flag_plot_reward_line:
ax.axvline(second_event_seconds, linestyle='--', color='k', linewidth=0.5)
ax.set_xlabel('Time from event (s)')
if t==0:
ax.set_ylabel('Neurons')
ax.set_ylabel('Mean norm. fluor.')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
cbar = fig.colorbar(im, ax = axs, shrink = 0.7)
cbar.ax.set_ylabel('Heatmap Z-Score Activity', fontsize=13);
if flag_save_figs:
fig.savefig(os.path.join(save_dir, 'results', tempstr+'.pdf'), format='pdf')
fig.savefig(os.path.join(save_dir, dt_string+'_'+clusterkey + '+' + conditions[0] + '_'+conditions[1]+'.png'), format='png', dpi=300)
# -
# ## Do PCA to reduce dimensionality in the time-domain
#
# PCA: A linear algebra-based method to optimize how a set of variables can explain the variability of a dataset. Optimizing: meaning finding a new set of axes (ie. variables) that are linear combinations of the original axes where each new axis attempts to capture the most amount of variability in the data as possible while remaining linearly independent from the other new axes.
#
# In this case, we are finding a new linearly independent parameter space that maximizes the explained variance into the top new axes
def num_pc_explained_var(explained_var, explained_var_thresh=90):
"""
Select pcs for those that capture more than threshold amount of variability in the data
"""
cum_sum = 0
for idx, PC_var in enumerate(explained_var):
cum_sum += PC_var
if cum_sum > explained_var_thresh:
return idx+1
# +
load_savedpca_or_dopca = 'dopca'
# Select 'dopca' for doing PCA on the data. Select 'savedpca' for loading my previous results
# perform PCA across time
if load_savedpca_or_dopca == 'dopca':
pca = PCA(n_components=min(populationdata.shape[0],populationdata.shape[1]), whiten=True)
pca.fit(populationdata)
with open(os.path.join(fdir, 'pcaresults.pickle'), 'wb') as f:
pickle.dump(pca, f)
elif load_savedpca_or_dopca == 'savedpca':
with open(os.path.join(fdir, 'OFCCaMKII_pcaresults.pickle'), 'rb') as f:
pca = pickle.load(f)
# pca across time
transformed_data = pca.transform(populationdata)
# transformed data: each ROI is now a linear combination of the original time-serie
# np.save(os.path.join(save_dir, dt_string+'_'+clusterkey+'_' + "transformed_data.npy"),transformed_data)
# grab eigenvectors (pca.components_); linear combination of original axes
pca_vectors = pca.components_
print(f'Number of PCs = {pca_vectors.shape[0]}')
# Number of PCs to be kept is defined as the number at which the
# scree plot bends. This is done by simply bending the scree plot
# around the line joining (1, variance explained by first PC) and
# (num of PCs, variance explained by the last PC) and finding the
# number of components just below the minimum of this rotated plot
x = 100*pca.explained_variance_ratio_ # eigenvalue ratios
xprime = x - (x[0] + (x[-1]-x[0])/(x.size-1)*np.arange(x.size))
# define number of PCs
num_retained_pcs_scree = np.argmin(xprime)
num_retained_pcs_var = num_pc_explained_var(x, 90)
if pca_num_pc_method == 0:
num_retained_pcs = num_retained_pcs_scree
elif pca_num_pc_method == 1:
num_retained_pcs = num_retained_pcs_var
# +
print(f'Number of PCs to keep = {num_retained_pcs}')
# plot PCA plot
fig, ax = plt.subplots(figsize=(2,2))
ax.plot(np.arange(pca.explained_variance_ratio_.shape[0]).astype(int)+1, x, 'k')
ax.set_ylabel('Percentage of\nvariance explained')
ax.set_xlabel('PC number')
ax.axvline(num_retained_pcs, linestyle='--', color='k', linewidth=0.5)
ax.set_title('Scree plot')
[i.set_linewidth(0.5) for i in ax.spines.values()]
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.subplots_adjust(left=0.3)
fig.subplots_adjust(right=0.98)
fig.subplots_adjust(bottom=0.25)
fig.subplots_adjust(top=0.9)
if flag_save_figs:
fig.savefig(os.path.join(save_dir, dt_string+'_'+clusterkey+'_' + conditions[0] + '_'+conditions[1]+'_scree_plot.png'), format='png', dpi=300)
colors_for_key = {}
colors_for_key[conditions[0]] = (0,0.5,1)
colors_for_key[conditions[1]] = (1,0.5,0)
### plot retained principal components
numcols = 2.0
fig, axs = plt.subplots(int(np.ceil(num_retained_pcs/numcols)), int(numcols), sharey='all',
figsize=(2.2*numcols, 2.2*int(np.ceil(num_retained_pcs/numcols))))
for pc in range(num_retained_pcs):
ax = axs.flat[pc]
for k, tempkey in enumerate(conditions):
ax.plot(tvec, pca_vectors[pc, k*window_size:(k+1)*window_size], color=colors_for_key[tempkey],
label='PC %d: %s'%(pc+1, tempkey))
ax.axvline(0, linestyle='--', color='k', linewidth=1)
ax.set_title(f'PC {pc+1}')
# labels
if pc == 0:
ax.set_xlabel('Time from cue (s)')
ax.set_ylabel( 'PCA weights')
fig.tight_layout()
for ax in axs.flat[num_retained_pcs:]:
ax.set_visible(False)
plt.tight_layout()
if flag_save_figs:
fig.savefig(os.path.join(save_dir, dt_string+'_'+clusterkey+'_' + conditions[0] + '_'+conditions[1]+'_PCA.png'), format='png', dpi=300)
# -
# ## Clustering
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# +
# calculate optimal number of clusters and nearest neighbors using silhouette scores
min_clusters = np.min([max_n_clusters+1, int(populationdata.shape[0])])
possible_n_clusters = np.arange(2, max_n_clusters+1) #This requires a minimum of 2 clusters.
# When the data contain no clusters at all, it will be quite visible when inspecting the two obtained clusters,
# as the responses of the clusters will be quite similar. This will also be visible when plotting the data in
# the reduced dimensionality PC space (done below).
possible_clustering_models = np.array(["Spectral", "Kmeans"])
silhouette_scores = np.nan*np.ones((possible_n_clusters.size,
possible_n_nearest_neighbors.size,
possible_clustering_models.size))
# loop through iterations of clustering params
for n_clustersidx, n_clusters in enumerate(possible_n_clusters):
kmeans = KMeans(n_clusters=n_clusters, random_state=0) #tol=toler_options
for nnidx, nn in enumerate(possible_n_nearest_neighbors):
spectral = SpectralClustering(n_clusters=n_clusters, affinity='nearest_neighbors', n_neighbors=nn, random_state=0)
models = [spectral,kmeans]
for modelidx,model in enumerate(models):
model.fit(transformed_data[:,:num_retained_pcs])
silhouette_scores[n_clustersidx, nnidx, modelidx] = silhouette_score(transformed_data[:,:num_retained_pcs],
model.labels_,
metric='cosine')
if modelidx == 0:
print(f'Done with numclusters = {n_clusters}, num nearest neighbors = {nn}: score = {silhouette_scores[n_clustersidx, nnidx, modelidx]}.3f')
else:
print(f'Done with numclusters = {n_clusters}, score = {silhouette_scores[n_clustersidx, nnidx, modelidx]}.3f')
print(silhouette_scores.shape)
print('Done with model fitting')
silhouette_dict = {}
silhouette_dict['possible_clustering_models'] = possible_clustering_models
silhouette_dict['num_retained_pcs'] = num_retained_pcs
silhouette_dict['possible_n_clusters'] = possible_n_clusters
silhouette_dict['possible_n_nearest_neighbors'] = possible_n_nearest_neighbors
silhouette_dict['silhouette_scores'] = silhouette_scores
silhouette_dict['shape'] = 'cluster_nn'
#with open(os.path.join(save_dir,dt_string+'_'+ clusterkey+'_' + 'silhouette_scores.pickle'), 'wb') as f:
# pickle.dump(temp, f)
# -
# ## Recluster with optimal params
# +
# Identify optimal parameters from the above parameter space
temp = np.where(silhouette_dict['silhouette_scores']==np.nanmax(silhouette_dict['silhouette_scores']))
n_clusters = silhouette_dict['possible_n_clusters'][temp[0][0]]
n_nearest_neighbors = silhouette_dict['possible_n_nearest_neighbors'][temp[1][0]]
num_retained_pcs = silhouette_dict['num_retained_pcs']
method = silhouette_dict['possible_clustering_models'][temp[2][0]]
print(n_clusters, n_nearest_neighbors, num_retained_pcs, method)
# Redo clustering with these optimal parameters
model = None
if method == 'Spectral':
model = SpectralClustering(n_clusters=n_clusters,
affinity='nearest_neighbors',
n_neighbors=n_nearest_neighbors,
random_state=0)
else:
model = KMeans(n_clusters=n_clusters, random_state=0)
# model = AgglomerativeClustering(n_clusters=9,
# affinity='l1',
# linkage='average')
model.fit(transformed_data[:,:num_retained_pcs])
temp = silhouette_score(transformed_data[:,:num_retained_pcs], model.labels_, metric='cosine')
print(f'Number of clusters = {len(set(model.labels_))}, average silhouette = {temp}.3f')
# Save this optimal clustering model.
# with open(os.path.join(save_dir, 'clusteringmodel.pickle'), 'wb') as f:
# pickle.dump(model, f)
# +
# Since the clustering labels are arbitrary, I rename the clusters so that the first cluster will have the most
# positive response and the last cluster will have the most negative response.
def reorder_clusters(data, sort_win_frames, rawlabels):
uniquelabels = list(set(rawlabels))
responses = np.nan*np.ones((len(uniquelabels),))
for l, label in enumerate(uniquelabels):
responses[l] = np.mean(data[rawlabels==label, sort_win_frames[0]:sort_win_frames[1]])
temp = np.argsort(responses).astype(int)[::-1]
temp = np.array([np.where(temp==a)[0][0] for a in uniquelabels])
outputlabels = np.array([temp[a] for a in list(np.digitize(rawlabels, uniquelabels)-1)])
return outputlabels
newlabels = reorder_clusters(populationdata, sortwindow_frames, model.labels_)
# Create a new variable containing all unique cluster labels
uniquelabels = list(set(newlabels))
# np.save(os.path.join(summarydictdir, dt_string+'_'+ clusterkey+'_' + 'spectral_clusterlabels.npy'), newlabels)
colors_for_cluster = [[0.933, 0.250, 0.211],
[0.941, 0.352, 0.156],
[0.964, 0.572, 0.117],
[0.980, 0.686, 0.250],
[0.545, 0.772, 0.247],
[0.215, 0.701, 0.290],
[0, 0.576, 0.270],
[0, 0.650, 0.611],
[0.145, 0.662, 0.878]]
# +
# Plot z-score activity for each cluster over time
sortwindow = [15, 100]
fig, axs = plt.subplots(len(conditions),len(uniquelabels),
figsize=(2*len(uniquelabels),2*len(conditions)))
if len(axs.shape) == 1:
axs = np.expand_dims(axs, axis=0)
numroisincluster = np.nan*np.ones((len(uniquelabels),))
for c, cluster in enumerate(uniquelabels):
for k, tempkey in enumerate(conditions):
temp = populationdata[np.where(newlabels==cluster)[0], k*window_size:(k+1)*window_size]
numroisincluster[c] = temp.shape[0]
ax=axs[k, cluster]
sortresponse = np.argsort(np.mean(temp[:,sortwindow[0]:sortwindow[1]], axis=1))[::-1]
plot_extent = [tvec[0], tvec[-1], len(sortresponse), 0 ]
im = utils.subplot_heatmap(ax, ' ', temp[sortresponse],
clims = [-cmax, cmax], extent_=plot_extent)
axs[k, cluster].grid(False)
if k!=len(conditions)-1:
axs[k, cluster].set_xticks([])
axs[k, cluster].set_yticks([])
axs[k, cluster].axvline(0, linestyle='--', color='k', linewidth=0.5)
if flag_plot_reward_line:
axs[k, cluster].axvline(second_event_seconds, linestyle='--', color='k', linewidth=0.5)
if cluster==0:
axs[k, 0].set_ylabel('%s'%(tempkey))
axs[0, cluster].set_title('Cluster %d\n(n=%d)'%(cluster+1, numroisincluster[c]))
fig.text(0.5, 0.05, 'Time from cue (s)', fontsize=12,
horizontalalignment='center', verticalalignment='center', rotation='horizontal')
fig.tight_layout()
fig.subplots_adjust(wspace=0.1, hspace=0.1)
fig.subplots_adjust(left=0.03)
fig.subplots_adjust(right=0.93)
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(top=0.83)
cbar = fig.colorbar(im, ax = axs, shrink = 0.7)
cbar.ax.set_ylabel('Z-Score Activity', fontsize=13);
if flag_save_figs:
plt.savefig(os.path.join(save_dir, 'cluster_heatmap.png'))
plt.savefig(os.path.join(save_dir, 'cluster_heatmap.pdf'))
# -
tvec_convert_dict = {}
for i in range(len(tvec)):
tvec_convert_dict[i] = tvec[i]
# +
# Plot amount of fluorescence normalized for each cluster by conditions over time
fig, axs = plt.subplots(1,len(uniquelabels),
figsize=(3*len(uniquelabels),1.5*len(conditions)))
for c, cluster in enumerate(uniquelabels):
for k, tempkey in enumerate(conditions):
temp = populationdata[np.where(newlabels==cluster)[0], k*window_size:(k+1)*window_size]
numroisincluster[c] = temp.shape[0]
sortresponse = np.argsort(np.mean(temp[:,sortwindow[0]:sortwindow[1]], axis=1))[::-1]
sns.lineplot(x="variable", y="value",data = pd.DataFrame(temp[sortresponse]).rename(columns=tvec_convert_dict).melt(),
ax = axs[cluster],
palette=plt.get_cmap('coolwarm'),label = tempkey,legend = False)
axs[cluster].grid(False)
axs[cluster].axvline(0, linestyle='--', color='k', linewidth=0.5)
axs[cluster].spines['right'].set_visible(False)
axs[cluster].spines['top'].set_visible(False)
if cluster==0:
axs[cluster].set_ylabel('Normalized fluorescence')
else:
axs[cluster].set_ylabel('')
axs[cluster].set_xlabel('')
axs[cluster].set_title('Cluster %d\n(n=%d)'%(cluster+1, numroisincluster[c]))
axs[0].legend()
fig.text(0.5, 0.05, 'Time from cue (s)', fontsize=12,
horizontalalignment='center', verticalalignment='center', rotation='horizontal')
fig.tight_layout()
fig.subplots_adjust(wspace=0.1, hspace=0.1)
fig.subplots_adjust(left=0.03)
fig.subplots_adjust(right=0.93)
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(top=0.83)
if flag_save_figs:
plt.savefig(os.path.join(save_dir, 'cluster_roiAvg_traces.png'))
plt.savefig(os.path.join(save_dir, 'cluster_roiAvg_traces.pdf'))
# +
# Perform TSNE on newly defined clusters
num_clusterpairs = len(uniquelabels)*(len(uniquelabels)-1)/2
numrows = int(np.ceil(num_clusterpairs**0.5))
numcols = int(np.ceil(num_clusterpairs/np.ceil(num_clusterpairs**0.5)))
fig, axs = plt.subplots(numrows, numcols, figsize=(3*numrows, 3*numcols))
tempsum = 0
for c1, cluster1 in enumerate(uniquelabels):
for c2, cluster2 in enumerate(uniquelabels):
if cluster1>=cluster2:
continue
temp1 = transformed_data[np.where(newlabels==cluster1)[0], :num_retained_pcs]
temp2 = transformed_data[np.where(newlabels==cluster2)[0], :num_retained_pcs]
X = np.concatenate((temp1, temp2), axis=0)
tsne = TSNE(n_components=2, init='random',
random_state=0, perplexity=np.sqrt(X.shape[0]))
Y = tsne.fit_transform(X)
if numrows*numcols==1:
ax = axs
else:
ax = axs[int(tempsum/numcols),
abs(tempsum - int(tempsum/numcols)*numcols)]
ax.scatter(Y[:np.sum(newlabels==cluster1),0],
Y[:np.sum(newlabels==cluster1),1],
color=colors_for_cluster[cluster1], label='Cluster %d'%(cluster1+1), alpha=1)
ax.scatter(Y[np.sum(newlabels==cluster1):,0],
Y[np.sum(newlabels==cluster1):,1],
color=colors_for_cluster[cluster2+3], label='Cluster %d'%(cluster2+1), alpha=1)
ax.set_xlabel('tsne dimension 1')
ax.set_ylabel('tsne dimension 2')
ax.legend()
tempsum += 1
fig.tight_layout()
# -
| napeca_post/event_related_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv
# language: python
# name: venv
# ---
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import topnet
import imageio
#Data can be accessed at http://brainiac2.mit.edu/isbi_challenge/home
if __name__ == '__main__':
# Grab and normalize blobs image.
f = np.asarray(imageio.imread('isbi12/images/train-volume01.jpg')[:600, :600], dtype=np.float32)
f -= f.min()
f /= f.max()
f *= 255
plt.figure()
plt.title('Original Image')
plt.imshow(f)
plt.show()
# Parameters
hom_dim = 1
card = 10000
bad_pers = [float('-inf'), float('inf'),70,float('inf')]
lr = 5e-2
a = (1 - 1 / np.prod(f.shape))
steps = 5000
kernel_size = 4
eps = 20
pool_mode = 'simplex'
def update_func(grad_dgm, dgm, cof, x):
bsm, dsm = topnet.compute_dgm_grad(grad_dgm, cof, x)
return bsm
def SpawnTopLoss(x):
x_noisy = topnet.UniformNoise(x, eps)
x_down = topnet.Spool(x_noisy, kernel_size, pool_mode)[0]
dgm = topnet.Cubical(x_down, card, hom_dim, update_func)[0]
bad_top = -1*topnet.AbsPersInRegion(dgm, bad_pers)
return bad_top
spawn_optima = topnet.TopBackprop(f, SpawnTopLoss, a, lr, steps)
plt.figure()
plt.title('STUMP Optima')
plt.imshow(spawn_optima)
plt.show()
plt.imshow(f-spawn_optima)
def VanillaTopLoss(x):
dgm = topnet.Cubical(x, card, hom_dim, update_func)[0]
bad_top = topnet.SqPersInRegion(dgm, bad_pers)
return bad_top
| CellSegmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: '(''learning'': conda)'
# language: python
# name: python37764bitlearningcondadead39b8b5cb441aa05edba06d4d913f
# ---
# # Customer Churn Analysis
#
# ---
#
# Customer churn or customer attrition can broadly be classified as voluntary and involuntary churn. Voluntary churn is when the customer decides to cancel the service. In contrast, involuntary churn is when the customer is impacted by some external factor(s) and then stops using the service.
#
# Differentiating the customers who churn and the customers who don't vary from domain to domain. For instance, not using the service during the last 30 days or not depositing money in an account during the previous three months can be considered churned customers. The reasons for churning also vary, and some common ones can be better prices, poor service, etc.
# **The best way to avoid customer churns is to know your customer. The best way to know your customer is to analyze historical data of customer behavior.**
# In this notebook, I will use a commercial bank's customer data to analyze customer behavior and model churning probability. Predicting the churning probability can be used as an early warning system to indicate customer's propensity to churn.
# We can use two approaches to analyze customer data and model churn:
#
# * **Classification**
#
# * **Survival Analysis**
#
#
# In ```Classification``` we use supervised machine learning algorithms to predict the probability of churn.
#
# In ```Survival Analysis```, we estimate **average time-to-churn** among all customers. To do that, we use the **Kaplan-Meier** survival function and **Cox's Proportional Hazard** model.
# ## Features description
#
# ---
#
# * **CustomerId** - Unique identifier of the customer
#
#
# * **Surname** - The surname of the customer
#
#
# * **CreditScore** - Credit bureau score. Estimate for customer solvency.
#
#
# * **Geography** - Customer location or origin.
#
#
# * **Gender** - Customer gender.
#
#
# * **Age** - Age of the customer.
#
#
# * **Tenure** - The number of years the cutomer has been a client of the bank.
#
#
# * **Balance** - Residues on the customer accounts.
#
#
# * **NumOfProducts** - The number of products the customer purchased in the bank
#
#
# * **HasCrCard** - Whether or not the customer has a credit card. (0=No, 1=Yes)
#
#
# * **IsActiveMember** - Whether or not the customer is active. (0=No, 1=Yes)
#
#
# * **EstimatedSalary** - Estimated annual salary of the customer.
#
#
# * **Exited** - Target variable. Indicates whether or not the customer left the bank. (0=No, 1=Yes)
# # Exploratory Data Analysis
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# -
plt.style.use('seaborn')
# ### Read the data
data = pd.read_csv("data/churn_modeling.csv")
data.head()
# We have 10,000 observation and 14 features. Among the 14 features one is target and first three features seems redundant which we have to remove.
data.shape
# ### Drop redundant columns
#
# ---
#
# The columns ```RowNumber```, ```CustomerId```, and ```Surname``` do not carry any information useful for data analysis and churn prediction. Hence, we can drop them.
data = data.drop(columns=["RowNumber", "CustomerId", "Surname"])
data.head()
# ### Simple Descriptive Statistics
data.describe().round(2)
# ### Distribution of the target variable
#
# ---
#
# Below we see that we have 20.37% churned customers and 79.63% of not churned customers. This can be indicator of imbalanced data or skewness. This may lead to more false negatives. We will handle this issue in modeling part.
pd.DataFrame(data["Exited"].value_counts(normalize=True) * 100)
# +
plt.figure(figsize=(10,8))
data['Exited'].value_counts().plot.pie(explode=[0, 0.1], autopct="%1.2f%%");
# -
# ### Checking missing values, abnormalities, and outliers
# We have no missing values
pd.DataFrame(data.isna().sum())
# Checking some abnormal observations in features. To do so, I will use the ```value_counts()``` method to see how many unique values we have and if there are observations that should not be there. Alongside that, I will also check the summary statistics of each feature and plot some graphs.
# The histogram of credit bureau score follows normal distribution with some extrem values chopped at the right tail.
pd.DataFrame(data['CreditScore'].describe().round(2))
# +
plt.figure(figsize=(10,8))
plt.hist(data["CreditScore"], bins=15)
plt.title("Histogram of Credit Score", fontsize=20);
# -
# Most of the customers are from France. The number of customers originating from Germany and Spain are almost equal.
# +
plt.figure(figsize=(10,8))
data["Geography"].value_counts(dropna=False).plot.bar()
plt.title("Geography", fontsize=20);
# -
# The gender is distributed almost equally across the observations and does not contain any abnormal observation.
# +
plt.figure(figsize=(10,8))
data["Gender"].value_counts(dropna=False).plot.bar()
plt.title("Gender", fontsize=20);
# +
plt.figure(figsize=(10,8))
sns.countplot(data=data, x="Geography", hue="Gender")
plt.title("Gender distribution across countries");
# -
# The distribution of the age variable looks normal. However, we see some extreme values at the right tail, which may turn out to be outliers.
pd.DataFrame(data['Age'].describe().round(2)).iloc[1:]
# +
plt.figure(figsize=(10,8))
plt.hist(data["Age"], bins=25)
plt.title("Histogram of Age", fontsize=20);
# -
# The tenure variable histogram shows that most customers are the bank clients for at least one year and at most nine years.
# +
plt.figure(figsize=(10,8))
plt.hist(data["Tenure"], bins=11)
plt.title("Histogram of Tenure", fontsize=20);
# -
# There are few people whose balance is more than 200K and many customers whose balance is between zero and 40K.
# +
plt.figure(figsize=(10,8))
plt.hist(data["Balance"], bins=15)
plt.title("Histogram of Balance", fontsize=20);
# -
# The count plot of the number of products variable shows that most parts of customers own either one or two products.
# +
plt.figure(figsize=(10,8))
data["NumOfProducts"].value_counts().plot.bar()
plt.title("Number of products own by customer", fontsize=20);
# -
# ### Outlier analysis
# The boxplots show some possible outliers in ```Age``` and ```NumOfProducts``` variables. Let check if they are outside of the interquartile range to make sure we have real outliers.
# +
fig, axarr = plt.subplots(3, 2, figsize=(20, 18))
sns.boxplot(y='CreditScore', x='Exited', hue='Exited', data=data, ax=axarr[0][0])
sns.boxplot(y='Age', x='Exited', hue='Exited', data=data, ax=axarr[0][1])
sns.boxplot(y='Tenure',x='Exited', hue='Exited', data=data, ax=axarr[1][0])
sns.boxplot(y='Balance',x='Exited', hue='Exited', data=data, ax=axarr[1][1])
sns.boxplot(y='NumOfProducts', x='Exited', hue='Exited', data=data, ax=axarr[2][0])
sns.boxplot(y='EstimatedSalary',x='Exited', hue='Exited', data=data, ax=axarr[2][1]);
# -
# Checking each observation of ```Age``` and ```NumOfProducts``` against being outside of iterquartile range shows that we have some outliers.
for feature in data[['Age', "NumOfProducts"]]:
Q1 = data[feature].quantile(0.25)
Q3 = data[feature].quantile(0.75)
IQR = Q3 - Q1
lower = Q1 - (1.5* IQR)
upper = Q3 + (1.5 * IQR)
if data[(data[feature] > upper)].any(axis=None):
print(f"{feature} has outliers")
else:
print(f"{feature} does not have outliers")
# **Some notable points based on box plots**:
#
# ---
# * There is no significant difference in the credit score distribution between retained and churned customers.
#
#
# * The older customers tend to churn more than the younger ones alluding to a difference in service preference in the age categories. The bank may need to review their target market or review the strategy for retention between the different age groups.
#
#
# * With regard to the tenure, the clients on either extreme end (spent little time with the bank or a lot of time with the bank) are more likely to churn compared to those that are of average tenure.
#
#
# * The bank is losing customers with significant bank balances which is likely to hit their available capital for lending.
#
#
# * Neither the number of products nor the salary has a significant effect on the likelihood to churn.
# ## Bivariate Analysis
# Let check if already churned customers have some balance left on their accounts. It turned out that 1,537 churned customers still have a non-zero balance. On average, they left $120,746. Pretty high amount to lose. This suggests that there may be some errors in the data.
data[(data["Exited"] == 1) & (data["Balance"] != 0.0)].shape[0]
data[(data["Exited"] == 1) & (data["Balance"] != 0.0)]["Balance"].mean().round(2)
# ### Let see each variable's influance on the target variable
data.head()
# +
bins = [0, 500, 600, 700, 800]
labels = ["low", "medium", "medium_high", "extreme_high"]
data["binned_score"] = pd.cut(data['CreditScore'], bins=bins, labels=labels)
# -
# The cross tabulation below shows that the customers who has medium and medium high credit score are more likely to churn.
pd.crosstab(index=data["binned_score"],
columns=data["Exited"], normalize=True).round(3) * 100
# We see that 11.39% of female customers churn compared to 8.98% male customers.
pd.crosstab(index=data["Gender"],
columns=data["Exited"],
normalize=True) * 100
# In univariate analysis we saw that the most parts of the customers originated from France. However, the churn is in France and in Germany are very close. This may indicate that the bank have some problems related to service or product in Germany.
pd.crosstab(index=data["Geography"],
columns=data["Exited"], normalize=True) * 100
# From the cross tabulation table we see that the bank loss customers between 30-60 age range. This suggests that the bank may not targeted the proper market by age or the bank does not have age tailored product for that age group.
# +
bins = [0, 30, 45, 60, 92]
labels = ["young", "middle", "old", "elder"]
data["binned_age"] = pd.cut(data['Age'], bins=bins, labels=labels)
# -
pd.crosstab(index=data["binned_age"],
columns=data["Exited"], normalize=True) * 100
# Having credit card, somewhat reduces the churn rate. We see that the customers who churned only 613 of them does not have credit cards.
pd.crosstab(index=data["HasCrCard"],
columns=data["Exited"], normalize=True) * 100
# There is some obfuscation around the ```IsActiveMember``` variable. However, the relationship seems natural. While the customer is inactive, there is high tendency to churn that customer.
pd.crosstab(index=data["IsActiveMember"],
columns=data["Exited"])
# The effect of tenure on churn rate is not so obvious. Here, we see the churn rate by gender and average tenure.On average tenure does not influance whether the cutomer leave or not. We have the same situation for the ```Balance``` and ```EstimatedSalary``` variable.
data.groupby(["Gender", "Exited"]).agg({"Tenure" : "mean"}).round(2)
data.groupby(["Gender", "Exited"]).agg({"Balance" : "mean"}).round(2)
data.groupby(["Gender", "Exited"]).agg({"EstimatedSalary" : "mean"}).round(2)
# #### Correlation Matrix
numerical_features = ["CreditScore", "Age", "Balance", "EstimatedSalary"]
# +
plt.figure(figsize=(10,8))
sns.heatmap(data[numerical_features].corr(), annot=True, fmt=".3f", cmap="magma" )
plt.title("Correlation Matrix", fontsize=20);
# -
# # Feature Engineering
# Let remove helper columns created for bivariate analysis.
data.head()
data = data.drop(columns=["binned_score", "binned_age"])
data.head()
# ### Derive some features which will help to predict customer churned or not.
# First, the ratio between `Balance` and `EstimatedSalary` will give us the customer's balance as a share of annual salary. This can be considered as a proxy of customer propensity to save. If a customer holds a lot, then the churn for them has to be low.
data["Balance_over_Salary"] = data["Balance"] / data["EstimatedSalary"]
# Considering the fact that `Tenure` is a function of `Age`, I introduce two new features. The fist is tenure divided by age, and the second is age minus tenure.
# +
data["Tenure_over_Age"] = data["Tenure"] / data["Age"]
data["Age_Tenure"] = data["Age"] - data["Tenure"]
# -
# To account for credit behavior given age, I derive a new feature: `CreditScore` divided by `Age`.
data["CreditScore_over_Age"] = data["CreditScore"] / data["Age"]
data.head()
# ### Correlation Matrix after adding derived features
numerical_features = ["CreditScore", "Age", "Tenure", "Balance", "EstimatedSalary",
"Balance_over_Salary", "Tenure_over_Age", "Age_Tenure", "CreditScore_over_Age"]
# +
plt.figure(figsize=(15,10))
sns.heatmap(data[numerical_features].corr(), annot=True, fmt=".3f", cmap="magma" )
plt.title("Correlation Matrix", fontsize=20);
# -
# ## Export Data
#
# ---
#
# Let export this processed data for modeling
data.to_csv("data/data.csv", index=False)
| Lecture 11/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import pandas as pd
import os
import time
from tqdm import tqdm
from rdkit import Chem
from rdkit import DataStructs
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
from IPython.display import clear_output
# -
df_trn = pd.read_csv('./data/train.csv')
with open('./data/train.csv', 'r') as csv_file:
data = csv_file.read()
PATH = './data/train/'
# +
all_captions = []
all_img_name_vector = []
for line in data.split('\n')[1:-1]:
image_id, smiles = line.split(',')
caption = '<' + smiles + '>'
full_image_path = PATH + image_id
all_img_name_vector.append(full_image_path)
all_captions.append(caption)
train_captions, img_name_vector = shuffle(all_captions, all_img_name_vector, random_state=42)
# num_examples = 908765 # 학습에 사용할 데이터 수, Baseline에서는 제공된 데이터 모두 사용하였습니다.
num_examples = 1000 # 학습에 사용할 데이터 수, Baseline에서는 제공된 데이터 모두 사용하였습니다.
train_captions = train_captions[:num_examples]
img_name_vector = img_name_vector[:num_examples]
# -
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
# +
image_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# +
encode_train = sorted(set(img_name_vector))
image_dataset = tf.data.Dataset.from_tensor_slices(encode_train)
image_dataset = image_dataset.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(16)
# -
image_dataset
# +
# for i, (img, path) in enumerate(image_dataset):
# print(i)
# batch_features = image_features_extract_model(img)
# batch_features = tf.reshape(batch_features, (batch_features.shape[0], -1, batch_features.shape[3]))
# for bf, p in zip(batch_features, path):
# path_of_feature = p.numpy().decode("utf-8")
# np.save(path_of_feature, bf.numpy())
# -
# +
def calc_max_length(tensor):
return max(len(t) for t in tensor)
max_length = calc_max_length(train_captions)
# -
tokenizer = tf.keras.preprocessing.text.Tokenizer(lower=False, char_level=True)
tokenizer.fit_on_texts(train_captions)
top_k = len(tokenizer.word_index)
train_seqs = tokenizer.texts_to_sequences(train_captions)
cap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')
img_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector, cap_vector, test_size=0.02, random_state=42)
len(img_name_train), len(cap_train), len(img_name_val), len(cap_val)
BATCH_SIZE = 512
BUFFER_SIZE = 1000
embedding_dim = 512
units = 1024
vocab_size = top_k + 1
num_steps = len(img_name_train) // BATCH_SIZE
features_shape = 2048
attention_features_shape = 64
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
dataset = dataset.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
dataset_val = tf.data.Dataset.from_tensor_slices((img_name_val, cap_val))
dataset_val = dataset_val.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset_val = dataset_val.batch(BATCH_SIZE)
dataset_val = dataset_val.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
attention_weights = tf.nn.softmax(self.V(score), axis=1)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class CNN_Encoder(tf.keras.Model):
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
context_vector, attention_weights = self.attention(features, hidden)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
x = self.fc1(output)
x = tf.reshape(x, (-1, x.shape[2]))
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
# +
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# -
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=25)
start_epoch = 0
if ckpt_manager.latest_checkpoint:
start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
ckpt.restore(ckpt_manager.latest_checkpoint)
loss_plot = []
@tf.function
def train_step(img_tensor, target, validation=False):
loss = 0
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<']] * target.shape[0], 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in range(1, target.shape[1]):
predictions, hidden, _ = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
dec_input = tf.expand_dims(target[:, i], 1)
trainable_variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
total_loss = (loss / int(target.shape[1]))
return loss, total_loss
dataset
# +
EPOCHS = 25
for epoch in range(start_epoch, EPOCHS):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate(dataset):
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
loss_plot.append(total_loss / (batch+1))
ckpt_manager.save()
print ('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss/(batch+1)))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# -
plt.plot(loss_plot, label='loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.legend()
plt.show()
# +
# 가장 높은 확률로 예측
def predict(img_tensor):
hidden = decoder.reset_state(batch_size=img_tensor.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<']] * img_tensor.shape[0], 1)
features = encoder(img_tensor)
result = []
for i in range(max_length):
predictions, hidden, _ = decoder(dec_input, features, hidden)
predictions = np.argmax(predictions, axis=1)
result.append(predictions)
dec_input = tf.expand_dims(predictions, 1)
return np.array(result)
# 비교적 높은 확률들로 예측
def predict_(img_tensor):
hidden = decoder.reset_state(batch_size=img_tensor.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<']] * img_tensor.shape[0], 1)
features = encoder(img_tensor)
result = []
for i in range(max_length):
predictions, hidden, _ = decoder(dec_input, features, hidden)
predictions = tf.random.categorical(predictions, 1)[:, 0].numpy()
result.append(predictions)
dec_input = tf.expand_dims(predictions, 1)
return np.array(result)
def map_func_pred(img_name):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor
# -
val_result = []
for batch in tqdm(dataset_val):
val_result.extend(predict(batch[0]).T)
val_result = np.array(val_result)
preds = []
for rid in range(cap_val.shape[0]):
pred = ''.join([tokenizer.index_word[i] for i in val_result[rid]])
pred = pred.split('>')[0]
preds.append(pred)
error_idx = []
for i, pred in enumerate(preds):
m = Chem.MolFromSmiles(pred)
if m == None:
error_idx.append(i)
error_idx = np.array(error_idx)
error_idx_ = error_idx.copy()
drop_error = []
while True:
error_idx_dict = {}
for i, e in enumerate(error_idx_):
error_idx_dict[i] = e
img_name_val_, cap_val_ = np.array(img_name_val)[error_idx_], np.array(cap_val)[error_idx_]
dataset_val_ = tf.data.Dataset.from_tensor_slices((img_name_val_, cap_val_))
dataset_val_ = dataset_val_.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset_val_ = dataset_val_.batch(BATCH_SIZE)
dataset_val_ = dataset_val_.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
val_result_ = []
for batch in dataset_val_:
val_result_.extend(predict_(batch[0]).T)
val_result_ = np.array(val_result_)
preds_ = []
for rid in range(val_result_.shape[0]):
pred = ''.join([tokenizer.index_word[i] for i in val_result_[rid] if i not in [0]])
pred = pred.split('>')[0]
preds_.append(pred)
for i, pred in enumerate(preds_):
m = Chem.MolFromSmiles(pred)
if m != None:
preds[error_idx_dict[i]] = pred
drop_idx = np.where(error_idx==error_idx_dict[i])[0]
drop_error.append(drop_idx[0])
error_idx_ = np.delete(error_idx, drop_error)
clear_output(wait=True)
print(len(list(drop_error)), '/', error_idx.shape[0])
if error_idx.shape[0]-len(list(drop_error)) < 10 :
break
count = 0
answer = []
for rid, pred in enumerate(preds):
true = ''.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])[1:-1]
answer.append(true)
if true == pred:
count+=1
print('val_accuracy : ', count/cap_val.shape[0])
# +
score = []
for i, pred in enumerate(preds):
m1 = Chem.MolFromSmiles(answer[i])
m2 = Chem.MolFromSmiles(pred)
if m2 != None:
fp1 = Chem.RDKFingerprint(m1)
fp2 = Chem.RDKFingerprint(m2)
similarity = DataStructs.FingerprintSimilarity(fp1,fp2)
else:
similarity = 0
score.append(similarity)
print('val_similarity :', np.mean(score))
# -
| [Dacon] 2009-smiles/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: ipykernel_py2
# ---
# ## Monte Carlo - Forecasting Stock Prices - Part I
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# Download the data for Microsoft (‘MSFT’) from Morningstar for the period ‘2002-1-1’ until today.
# Use the .pct_change() method to obtain the log returns of Microsoft for the designated period.
# Assign the mean value of the log returns to a variable, called “U”, and their variance to a variable, called “var”.
# Calculate the drift, using the following formula:
#
# $$
# drift = u - \frac{1}{2} \cdot var
# $$
# Store the standard deviation of the log returns in a variable, called “stdev”.
# ******
# Repeat this exercise for any stock of interest to you. :)
| Python for Finance - Code Files/103 Monte Carlo - Predicting Stock Prices - Part I/Online Financial Data (APIs)/Python 2 APIs/MC Predicting Stock Prices - Part I - Exercise_Morn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/uguryildiz/olasilik/blob/master/permutasyon.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="rrOhh9RnAFwb" colab_type="text"
# # Tanım
#
# Bir kümenin tüm elemanlarının bir sıraya göre düzenlenmesi veya küme zaten sıralanmışsa, küme elemanlarının yeniden sıralanması işlemi matematikte permütasyon olarak adlandırılır. Permütasyon aşağıdaki formül ile hesaplanır.
#
# * P ( n , r ) = n ! / ( n - r ) !
#
# n : kümenin toplam eleman sayısı,
#
# r : sıralanacak eleman sayısı
#
# Aşağıda üç elemanlı (A,B,C) bir kümenin üçlü permütasyonu görülmektedir.
#
# * ABC
# * ACB
# * BAC
# * BCA
# * CAB
# * CBA
#
# Permütasyon hesabında pratik hesaplama yöntemleri bulunmaktadır. Örneğin beş elemanlı bir kümenin üçlü permütasyon hesabı aşağıda görülmektedir.
#
# * P(5,3) = 5.4.3 = 60
#
# Burada toplam eleman sayısından başlayarak sıralanacak eleman sayısı kadar sayı birer azaltılarak birbirleriyle çarpılır. Diğer pratik hesaplama yöntemleri ise aşağıda görülmektedir.
#
# * P(n,n) = n!
# * P(n,1) = n
# * P(n,0) = 1
#
# + [markdown] id="o7L337WtUK6W" colab_type="text"
# # Örnek Problem
# Bir kitaplıktaki 3 fen bilgisi, 4 matematik, 5 türkçe kitabı matematik kitapları en başta ve aynı derse ait kitaplar yan yana olmak şartıyla kaç farklı şekilde sıralanırlar?
# + [markdown] id="8WZUvzeYUNyj" colab_type="text"
# # Çözüm
#
# Bütün matematik kitapları en başta olacaktır, matematik kitaplarının 4 lü permütasyonu P(4,4) = 4.3.2.1 ile hesaplanır. Bunula beraber türkçe ardından fen bilgisi kitapları yada fen bilgisi ardından türkçe kitapları sıralanmalıdır. Türkçe ve fen bilgisi yer değişiminden 2 grubun 2'li permütasyonu P(2,2) = 2.1 ile hesaplanır. Türkçe kitaplarının 5'li permütasyonu P(5,5) = 5.4.3.2.1 ile hesaplanır. Fen bilgisi kitaplarının 3'lü permütasyonu P(3,3) = 3.2.1 ile hesaplanır.
# Toplam permütasyon Pt = P(4,4) x P(2,2) x P(5,5) x P(3,3) olarak hesaplanır.
# + [markdown] id="6Um7Zl7SUP5Z" colab_type="text"
# # Python Kodu
# Permütasyon hesaplayan fonksiyon ve problemin cevapı aşağıda görülmektedir.
# + id="FL0i3luTEIZN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a1cf3ce5-4c3b-40d8-b6ce-35b05f26d489"
def P(n,r):
start = (n - r) + 1;
stop = n + 1;
t = 1;
for i in range(start,stop):
t = t * i;
return t
print(P(4,4) * P(2,2) * P(5,5) * P(3,3));
| permutasyon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # CSV Database Shortener
# By <NAME> @ Coma Science Group, GIGA Research, University of Liege
# Creation date: 2017-04-05
# License: MIT
# v1.2.0
# 2017-2019
#
# ## INSTALL NOTE:
# You need to pip install pandas before launching this script.
# Tested on Python 2.7.15
#
# ## DESCRIPTION:
# Extracts a subset of rows from a csv database based on a list of names provided in a second csv file. In other words, we keep from the reference database only the records that have an id that can be found in the filter database.
# You have two csv files: one being the full database full of demographics infos, the other one being the list of patients names for your study.
# If you want to filter the full database to extract only the patients in your smaller list, then use this notebook.
#
# ## USAGE:
# Any two csv files can be used for the shortening, you just need to have a "name" field in both. The first csv will be used as the reference, and its rows will be extracted if same names are found in the second database.
#
# ## TODO:
# * Nothing here!
# Forcefully autoreload all python modules
# %load_ext autoreload
# %autoreload 2
# +
# AUX FUNCTIONS
import os, sys
cur_path = os.path.realpath('.')
sys.path.append(os.path.join(cur_path, 'csg_fileutil_libs')) # for unidecode and cleanup_name, because it does not support relative paths (yet?)
import re
from csg_fileutil_libs.aux_funcs import compute_names_distance_matrix, cleanup_name, cleanup_name_df, cleanup_name_customregex_df, replace_buggy_accents, save_df_as_csv, _tqdm, df_to_unicode, df_to_unicode_fast
# +
# PARAMETERS
# Reference database, from which records will be extracted (need to include a "name" column with all the patients names)
ref_db = r'databases_output\fmp_db_subjects_aggregated.csv_etiosedatfixed_dicomsdatediag_dicompathsedat.csv_acute.csv'
ref_db_idcol = 'Name' # id column name for the reference database
# Filter database, the one used to filter the reference database's records by matching names (need to include a "name" column with all the patients names)
filt_db = r'databases_original\CSG_demographics_QC_2_final 36 subjects_FOR_Stephen (ENCRYPTED).csv'
filt_db_idcol = 'name' # id column name for the filter database (both databases will be joined on this key and then filtered)
# Output where to store the CSV files
output_dir = r'databases_output'
# How to filter names in the filter database (remove useless terms) - can use regex
filter_name = {'_': ' ',
'repos': '',
'ecg': '',
'[0-9]+': '',
}
# -
# ----------------------------------------
# # Loading databases
# +
import pandas as pd
# Load reference database
cref = pd.read_csv(ref_db, sep=';')
cref.dropna(axis=0, subset=[ref_db_idcol], inplace=True) # drop lines where the name is empty, important to avoid errors
# Clean up names in full/reference database (to more easily compare)
cref[ref_db_idcol+'_orig'] = cref[ref_db_idcol] # make a backup first
cref = cleanup_name_df(cref, col=ref_db_idcol)
# Show
cref
# -
# Load filter database
cfilt = pd.read_csv(filt_db, sep=';').dropna(how='all').dropna(subset=[filt_db_idcol], how='all')
# Reorder by name
cfilt.sort_values(filt_db_idcol, inplace=True)
# Removing useless terms from the patient name
if filter_name:
cfilt = cleanup_name_customregex_df(cfilt, filter_name)
# Cleanup name in filtering db
cfilt = cleanup_name_df(cfilt, col=filt_db_idcol)
# Print db
print("Filter database contains %i rows." % len(cfilt))
cfilt
# Sanity check: number of subjects in the filter database with missing id/name (they will be dropped, we need an id to filter!)
missing_id = cfilt[filt_db_idcol].isnull() | (cfilt[filt_db_idcol] == '')
print('Filter database contains %i rows with a missing id/name, they will be dropped.' % sum(missing_id))
cfilt[missing_id]
# ------------------------
# ## Comparison of the two csv databases
# +
# Merging parameters - EDIT ME - do not hesitate to try different parameters until the matching seems good to you
dist_threshold_letters = 0.2 # percentage of letters matching
dist_threshold_words = 0.4 # percentage of words matching
dist_threshold_words_norm = True # normalize words jaccard distance? Can be True, False or None
dist_minlength = 4 # minimum length of words to compare distance jaccard words
# Merge the two databases names
dmat = compute_names_distance_matrix(cfilt[filt_db_idcol], cref[ref_db_idcol], dist_threshold_letters, dist_threshold_words, dist_threshold_words_norm, dist_minlength)
print('Reference & Filter databases were merged successfully!')
print('List of matchs (please check if this looks fine!):')
dmat
# -
# Save the list of names found in the filter database but missing in the reference database
missing_list = [key for key, val in dmat.items() if val is None]
cmissing = pd.DataFrame(missing_list, columns=[ref_db_idcol])
#cmissing.to_csv(os.path.join(output_dir, 'shorten_missing.csv'), index=False, sep=';')
save_df_as_csv(df_to_unicode_fast(cmissing), os.path.join(output_dir, 'shorten_missing.csv'), fields_order=False, keep_index=False)
print('Saved list of missing subjects in shorten_missing.csv')
print('Missing subjects (no demographics found in the reference database): %i' % len(missing_list))
cmissing
# +
# Shorten (filter) reference demographics database
# In other words, we keep from the reference database only the records that have an id that can be found in the filter database
found_list = [item[0] for item in filter(None, dmat.values())]
cfound = cref[cref[ref_db_idcol].isin(found_list)]
# Add a column to show what was the filtering name
dmat_inv = {ref_db_idcol: [], (ref_db_idcol+'_filter'): []}
for key, vals in dmat.items():
if vals is None:
continue
for v in vals:
dmat_inv[ref_db_idcol].append(v)
dmat_inv[ref_db_idcol+'_filter'].append(key)
# create a dataframe
df_dmat_inv = pd.DataFrame(dmat_inv)
df_dmat_inv[ref_db_idcol] = df_dmat_inv[ref_db_idcol].apply(str)
# merge on name column
cfound2 = pd.merge(cfound, df_dmat_inv, how='outer', on=ref_db_idcol)
# reorder columns to place name_filter just after name
cfound2 = cfound2[cfound2.columns[[0, -1] + range(1,len(cfound2.columns)-1)]]
# Restore original name (without cleanup)
cfound2[ref_db_idcol+'_clean'] = cfound2[ref_db_idcol] # make a backup of the cleaned up name first, so that we can easily compare and understand how the filtering worked
cfound2[ref_db_idcol] = cfound2[ref_db_idcol+'_orig'] # restore the original names
# reorder columns to place name_orig just after name
cfound2 = cfound2[cfound2.columns[[0, -1] + range(1,len(cfound2.columns)-1)]]
# Save into a csv file
#cfound2.to_csv(os.path.join(output_dir, 'shorten_found.csv'), index=False, sep=';')
save_df_as_csv(df_to_unicode_fast(cfound2), os.path.join(output_dir, 'shorten_found.csv'), fields_order=False, keep_index=False, blankna=True)
print('Saved list of found subjects in shorten_found.csv')
print('Found subjects: %i' % len(found_list))
cfound2
# -
# ----------------------------------------------------
# ## Test
# +
from csg_fileutil_libs.distance import distance
from csg_fileutil_libs.aux_funcs import distance_jaccard_words_split
subj = 'de caliafiera'
c = 'de caliafiera teng'
print(distance.nlevenshtein(subj, c, method=1))
print(distance_jaccard_words_split(subj, c, partial=True, norm=None, dist=dist_threshold_letters, minlength=3))
| csg_datafusion_shortendb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import sys
from functools import partial
sys.path.append('./adwin')
from mdl.smdl import SMDL
from mdl.model import Norm1D
from cf.mycf import ChangeFinder
from bocpd.mybocpd import BOCD, StudentT, constant_hazard
from adwin2 import ADWIN2
from scaw.SCAW2 import MDLCPStat_adwin2, lnml_gaussian
from generate_data import generate_multiple_jumping_variance
from evaluation import calc_auc_average, calc_falarms_benefit, InvRunLen
# +
###
N = 10
N_trial = 10
MU = 0.0
COEF = 0.1
# true change points
cps_true = np.array([1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]) - 1
###
# -
# # ChangeFinder
# +
smooth1 = 5
smooth2 = 5
order = 3
for r_cf in [0.003, 0.005, 0.01, 0.03, 0.1]:
for order in [2, 3, 4, 5]:
scores_cf = []
for seed in range(N_trial):
X = generate_multiple_jumping_variance(N, mu=MU, coef=COEF, seed=seed)
# ChangeFinder
cf = ChangeFinder(r=r_cf, order1=order, order2=order, smooth1=smooth1, smooth2=smooth2)
scores = []
for x in X:
score, _ = cf.update(x)
scores.append(score)
scores = np.array(scores)
scores_cf.append(scores)
scores_cf = np.array(scores_cf)
auc_list = calc_auc_average(scores_cf)
print('r_cf =', r_cf, 'order =', order, ':', np.mean(auc_list), '+/-', np.std(auc_list))
# -
# # BOCPD
# +
ALPHA = 0.1
BETA = 1.0
KAPPA = 1.0
MU = 0.0
DELAY = 15
mu = 0.0
for LAMBDA in [100, 600]:
for THRESHOLD in [0.1, 0.3]:
scores_bocpd = []
for seed in range(N_trial):
X = generate_multiple_jumping_variance(N, mu=MU, coef=COEF, seed=seed)
# BOCPD
bocd = BOCD(partial(constant_hazard, LAMBDA),
StudentT(ALPHA, BETA, KAPPA, MU), X)
change_points = []
scores = [np.nan] * DELAY
for x in X[:DELAY]:
bocd.update(x)
for x in X[DELAY:]:
bocd.update(x)
if bocd.growth_probs[DELAY] >= THRESHOLD:
change_points.append(bocd.t - DELAY + 1)
score = np.sum(bocd.growth_probs[:bocd.t - DELAY] * 1.0 / (1.0 + np.arange(1, bocd.t - DELAY + 1)))
scores.append(score)
scores_bocpd.append(scores)
scores_bocpd = np.array(scores_bocpd)
auc_list = calc_auc_average(scores_bocpd)
print('LAMBDA =', LAMBDA, 'THRESHOLD =', THRESHOLD, ':', np.mean(auc_list), '+/-', np.std(auc_list))
# -
# # Adwin2
# +
M = 5
for delta in [0.1, 0.3, 0.5, 0.7, 0.9]:
scores_ad = []
for seed in range(N_trial):
X = generate_multiple_jumping_variance(N, mu=MU, coef=COEF, seed=seed)
ad = ADWIN2()
scores = ad.transform(X, delta=delta, M=M)
scores_ad.append(InvRunLen(scores))
scores_ad = np.array(scores_ad)
auc_list = calc_auc_average(scores_ad)
print('delta =', delta, ':', np.mean(auc_list), '+/-', np.std(auc_list))
# -
# # D-MDL
# +
h = 100
mu_max = 5.0
sigma_min = 0.005
scores_list_0th = []
scores_list_1st = []
scores_list_2nd = []
for i in range(N_trial):
X = generate_multiple_jumping_variance(N, mu=MU, coef=COEF, seed=i)
len_X = len(X)
norm1d = Norm1D()
smdl = SMDL(norm1d)
scores_0th = np.array([np.nan]*h + [ smdl.calc_change_score(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_0th.append(scores_0th)
norm1d = Norm1D()
smdl = SMDL(norm1d)
scores_1st = np.array([np.nan]*h + [ smdl.calc_change_score_1st(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_1st.append(scores_1st)
norm1d = Norm1D()
smdl = SMDL(norm1d)
scores_2nd = np.array([np.nan]*h + [ smdl.calc_change_score_2nd(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_2nd.append(scores_2nd)
scores_list_0th = np.array(scores_list_0th)
scores_list_1st = np.array(scores_list_1st)
scores_list_2nd = np.array(scores_list_2nd)
auc_list_0th = calc_auc_average(scores_list_0th)
auc_list_1st = calc_auc_average(scores_list_1st)
auc_list_2nd = calc_auc_average(scores_list_2nd)
# -
print(np.mean(auc_list_0th), '+/-', np.std(auc_list_0th))
print(np.mean(auc_list_1st), '+/-', np.std(auc_list_1st))
print(np.mean(auc_list_2nd), '+/-', np.std(auc_list_2nd))
| jupyter/synthetic/synthetic_abrupt_variance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data Description
# As mentioned, we collected Wikipedia articles corresponding to the five major social science disciplines: Economics, Political Science, Anthropology, Sociology and Psychology. To understand how we did this one needs to know the structure of how information is stored into [categories](https://en.wikipedia.org/wiki/Wikipedia:Contents/Categories) on Wikipedia (very exciting!). Each category consists of several subcategories with even more sub-subcategories and on to infinity. Besides the subcategories each level of the depthness also contains the corresponding pages belonging to that level's subcategories. This is visualised in the image below.
# + tags=["hide-input"]
# Create illustration of wikipedia structure
import pygraphviz as pgv
G=pgv.AGraph(directed=True)
G.add_node("ROOT", label="Category: Social sciences", fontsize=20)
G.add_node("ROOT_i", label="Depth 0", shape = "plaintext", fontsize=20)
disciplines = ["Anthropology",
"Economics",
"Sociology",
"Political Science",
"Psychology"]
for i,k in enumerate(disciplines):
G.add_node("Child_%i" % i, label=f"Subcategory: {k}")
G.add_edge("ROOT", "Child_%i" % i)
G.add_node("Grandchild_%i" % i, label = f"List of {k} sub-subcategories")
G.add_edge("Child_%i" % i, "Grandchild_%i" % i)
G.add_node("Greatgrandchild_%i" % i, label = f"... n list of {k} sub-subcategories")
G.add_edge("Grandchild_%i" % i, "Greatgrandchild_%i" % i)
G.add_node("Child_%ix" % i, label="Depth 1", shape = "plaintext", fontsize=20)
G.add_node("Grandchild_%ix" % i, label="Depth 2", shape = "plaintext", fontsize=20)
G.add_node("Greatgrandchild_%ix" % i, label="Depth n", shape = "plaintext", fontsize=20)
G.add_edge("ROOT_i", "Child_%ix" % i)
G.add_edge("Child_%ix" % i, "Grandchild_%ix" % i)
G.add_edge("Grandchild_%ix" % i, "Greatgrandchild_%ix" % i)
G.layout(prog='dot')
G.draw('wikipedia_struture.png')
# -
# 
# Perhaps a specific depth level could be the key to an representative sampling strategy, we thought and so we used the tool [PetScan](https://petscan.wmflabs.org/) to access the articles at a predefined depth. This enabled us to find all the pages for each discipline depending on the depth of query which in our case were set to 2. A nice bonus to this approach is that Petscan can be accessed programmatically through Python and thus provide us with a relevant list of pages to collect from Wikipedia. A sort of structured way to sample from an otherwise chaotic encyclopedia. But now that we have the data, let's get serious.
# 
# The table below shows the five first observations of our data-set, which includes the following variables:
#
# * `name`: The name of the Wikipedia article.
# * `parent`: The discipline to which the article belongs.
# * `edges`: Contains all links to another Wikipedia page.
# * `text`: The raw text of the article.
# * `cleaned_text`: Punctuation removed, lower-cased.
# * `lemmatized`: The cleaned text in lemmatized form, stop words removed.
# * `tokens`: The lemmatized tokenized into a list of words.
# * `gcc`: Dummy for if the article is part of the giant component in the network.
#
# Once again, the data can be downloaded from the following (if you really want to see for yourself) [link](https://drive.google.com/file/d/1ChfKERqmc41asg1dbKisEl3jMdNwxFPP/view?usp=sharing).
# + tags=["hide-input"]
import pandas as pd
import numpy as np
from ast import literal_eval
from collections import defaultdict
df = pd.read_pickle('Final_df.pickle')
df = df[['title', 'parent', 'edges', 'text', 'cleaned_text', "lemmatized", "tokens", "gcc"]]
df["title"] = df["title"].apply(lambda x: " ".join(x.split("_")))
df["parent"] = df["parent"].apply(lambda x: " ".join(x.split("_")))
display(df)
# -
# In the table below we display summary statistics including the average number of articles for each discipline, number of edges and word count. As expected, the distribution is rather skewed with Political Science for example having more than double the amount of articles compared to sociology for instance. Remember this detail - Political Science is dominating our dataset and therefore most likely our analysis...
# + tags=["hide-input"]
#Create descriptives table
tab = defaultdict(list)
for discipline in df['parent'].unique():
avg_edges = []
avg_pagelen = []
for row in df.loc[df['parent']==discipline].iterrows():
avg_edges.append(len(row[1]['edges']))
avg_pagelen.append(len(row[1]['tokens']))
tab['Discipline'].append(discipline)
tab['Number of articles'].append(df.loc[df['parent']==discipline].shape[0])
tab['Avg. edges'].append(np.mean(avg_edges))
tab['Avg. word count'].append(np.mean(avg_pagelen))
tab = pd.DataFrame(tab)
tab.set_index('Discipline').round(2)
| main_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''ProgramData'': virtualenv)'
# language: python
# name: python37464bitprogramdatavirtualenv2203a48eb30e4608bccee8d0c91a3fd7
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction import text
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
#modules for n-gram model
#warnings ot surpressed
import tensorflow as tf
from tensorflow import keras
from keras import models
from keras.layers import Dense
from keras.layers import Dropout
from keras.optimizers import Adam
from keras import backend as K #backend to use outside metrics on n-gram model
# -
train = pd.read_csv('Train.csv')
#train['message'] = train['message'].apply(lambda x: ' '.join(x.lower() for x in x.split()))
#Number of samples
sample_tot = len(train.index.values)
print(sample_tot)
#Number of classes
sent_val = set(train['sentiment'].values)
sent_count = len(set(train['sentiment'].values))
print(sent_val)
print(sent_count)
#number of samples per class
for i in set(train['sentiment'].values):
count = 0
for x in train['sentiment']:
if x == i:
count+=1
print("Sentiment "+ str(i)+' '+ "observations :"+ str(count))
#median words per tweet
def word_count(text):
num_words = [len(s.split()) for s in text]
return np.median(num_words)
words = word_count(train['message'])
print(words)
# +
#distr. tweet lengths
def sample_plotter(text2):
plt.hist([len(s) for s in text2], 50)
plt.xlabel('tweet length')
plt.ylabel('nuber of tweets')
plt.title('Sample length distribution')
plt.show()
sample_plotter(train['message'])
# -
train.head()
#word sample ratios
word_sample_ratio = sample_tot/words
print(round(word_sample_ratio, 0))
#sentiment prep
def adder(text):
num = int(text)
num = num + 1
return num
train['sentiment'] = train['sentiment'].apply(adder)
train['sentiment'] = train['sentiment'].replace(3, 2)
train['sentiment']
#stop words
stop2 = text.ENGLISH_STOP_WORDS
stop = set(stopwords.words('english'))
X = train.iloc[:, 1].values
y = train.iloc[:, 0].values
#test and train
from sklearn.model_selection import train_test_split
train_text, test_text, train_val, test_val = train_test_split(X, y, test_size=0.20, shuffle = True, random_state=32)
# +
#tokenizing into uni+bi-grams and vectorizing
from sklearn.feature_extraction.text import TfidfTransformer
t_vector = TfidfVectorizer(ngram_range=(1, 2),
strip_accents = 'unicode',
decode_error = 'replace',
analyzer = 'word',
min_df = .1,
max_df = .50,
stop_words = stop)
x_train = t_vector.fit_transform(train_text)
x_val = t_vector.transform(test_text)
# +
#Selecting top 20 000 Features for n-gram model
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
b_vect = SelectKBest(f_classif, k = min(20000, x_train.shape[1]))
b_vect.fit(x_train, train_val)
x_train = b_vect.transform(x_train).astype('float32')
x_val = b_vect.transform(x_val).astype('float32')
# -
x_train
x_val
# +
#building multilayer perceptron
#not optimized
drop_rate = 0.2
layers = 2
clasif = models.Sequential()
clasif.add(Dropout(rate = drop_rate, input_shape = x_train.shape[1:]))
for lvl in range(layers - 1):
clasif.add(Dense(units = 3, activation = 'relu'))
clasif.add(Dropout(rate = 0.1))
clasif.add(Dense(units = 3,activation = 'softmax'))
# +
#N-gram model training and validation. Haven't used balance library
#Metrics calc for metrics not available in Keras. Funcs from Stackoverflow
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
optimizer = Adam(lr = 1e-3)
clasif.compile(optimizer, loss = 'sparse_categorical_crossentropy', metrics = ['acc', f1_m, precision_m, recall_m])
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2)]
history = clasif.fit(
x_train,
train_val,
epochs=1000,
batch_size = 128,
callbacks=callbacks,
validation_data=(x_val, test_val),
verbose=2, # Logs once per epoch.
)
history = history.history
print('Validation accuracy: {acc}, loss: {loss}, f1_score: {f1}'.format(acc=history['val_acc'][-1], loss=history['val_loss'][-1], f1 = history['val_f1_m']))
# -
clasif.predict_classes(x_val)
train.head()
# +
'''
Lowercaase, remove punct and numbers
'''
import string
def clean_text(text):
# remove numbers
text_nonum = re.sub(r'\d+', '', text)
# remove punctuations and convert characters to lower case
text_nopunct = "".join([char.lower() for char in text_nonum if char not in string.punctuation])
# substitute multiple whitespace with single whitespace
# Also, removes leading and trailing whitespaces
text_no_doublespace = re.sub('\s+', ' ', text_nopunct).strip()
return text_no_doublespace
# -
train['message'] = train['message'].apply(clean_text)
def counter(text):
# remove numbers
count = len(text)
return count
train['count'] = train['message'].apply(counter)
train.pop('tweetid')
train.head()
from nltk import word_tokenize
def tokenizer(text3):
tokenized = word_tokenize(text3)
return tokenized
train['tokenized'] = train["message"].apply(tokenizer)
train.head()
# +
texts = train['message']
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1, 2),
min_df = 2,
max_df = .95,
stop_words = stop2)
x_train = tfidf_vectorizer.fit_transform(texts) #features
# -
x_train.shape
# +
from sklearn.decomposition import TruncatedSVD
lsa = TruncatedSVD(n_components=100,
n_iter=10,
random_state=3)
x_train = lsa.fit_transform(x_train)
x_train.shape
# -
'''
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df = 1, ngram_range = (1,1),
stop_words = stop2)
tfidf = vectorizer.fit_transform(text4)
return tfidf
'''
def cleaner_text(text2):
tweet = [w for w in text2 if w not in list(stop2)]
return tweet
train['tokey'] = train['tokenized'].apply(cleaner_text)
train.head()
train['tokey2'] = train['tokenized'].apply(vect_funct)
train.head()
y_train = train['sentiment'].values
from sklearn.model_selection import train_test_split
X_trainer, X_tester, y_trainer, y_tester = train_test_split(x_train, y_train, test_size=0.20, shuffle = True, random_state=32)
# +
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import label_binarize
'''Classifiers'''
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.multiclass import OneVsRestClassifier
from catboost import CatBoostClassifier
'''Metrics/Evaluation'''
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, confusion_matrix
from scipy import interp
from itertools import cycle
# +
#Preliminary model evaluation using default parameters
#Creating a dict of the models
model_dict = {"CatBoost" : CatBoostClassifier(depth=9,
bootstrap_type= 'Bayesian',
loss_function = 'MultiClass',
iterations=80, learning_rate=0.4,
l2_leaf_reg=26),
"SVC": SVC(max_iter = 100, kernel = 'linear' ),
'Random Forest': RandomForestClassifier(random_state=32),
'K Nearest Neighbor': KNeighborsClassifier()}
#Function to get the scores for each model in a df
def model_score_df(model_dict):
model_name, ac_score_list, p_score_list, r_score_list, f1_score_list = [], [], [], [], []
for k,v in model_dict.items():
model_name.append(k)
v.fit(X_trainer, y_trainer)
y_pred = v.predict(X_tester)
ac_score_list.append(accuracy_score(y_tester, y_pred))
p_score_list.append(precision_score(y_tester, y_pred, average='macro'))
r_score_list.append(recall_score(y_tester, y_pred, average='macro'))
f1_score_list.append(f1_score(y_tester, y_pred, average='macro'))
model_comparison_df = pd.DataFrame([model_name, ac_score_list, p_score_list, r_score_list, f1_score_list]).T
model_comparison_df.columns = ['model_name', 'accuracy_score', 'precision_score', 'recall_score', 'f1_score']
model_comparison_df = model_comparison_df.sort_values(by='f1_score', ascending=False)
return model_comparison_df
model_score_df(model_dict)
# -
pip install -U imbalanced-learn
from imblearn.over_sampling import KMeansSMOTE, ADASYN, SMOTE, BorderlineSMOTE, SVMSMOTE, SMOTENC, RandomOverSampler
from imblearn.pipeline import make_pipeline
pipeline = make_pipeline(RandomOverSampler(random_state=32), CatBoostClassifier(depth=9,
bootstrap_type= 'Bayesian',
loss_function = 'MultiClass',
iterations=80, learning_rate=0.4, l2_leaf_reg=26))
pipeline.fit(X_trainer, y_trainer)
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#RandomOverSampler
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#SVMSMOTE
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#BorderlineSMOTE
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#KMeansSMOTE
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#SMOTE
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
#ADSYN
y_pred2 = pipeline.predict(X_tester)
print(accuracy_score(y_tester, y_pred2))
print(precision_score(y_tester, y_pred2, average = 'macro'))
print(recall_score(y_tester, y_pred2, average='macro'))
print(f1_score(y_tester, y_pred2, average='macro'))
# +
print("\n The best estimator across ALL searched params:\n", randm.best_estimator_)
print("\n The best score across ALL searched params:\n", randm.best_score_)
print("\n The best parameters across ALL searched params:\n", randm.best_params_)
# +
model = CatBoostClassifier()
parameters = {'depth': list(range(4, 10, 1)),
'iterations': list(range(10, 100, 10))}
randm = GridSearchCV(estimator=model, param_grid = parameters, cv = 2, n_jobs=-1)
randm.fit(X_trainer, y_trainer)
# -
from catboost import CatBoostClassifier
from sklearn.model_selection import GridSearchCV
model = CatBoostClassifier(depth=9, iterations=80, learning_rate=0.4, l2_leaf_reg=26)
parameters = {'bootstrap_type': ['Bayesian', 'Bernoulli', 'MVS', 'Poisson'],
'loss_function': ['Logloss', 'CrossEntropy', 'MultiClassOneVsAll', 'MAPE', 'MultiClass']}
randm = GridSearchCV(estimator=model, param_grid = parameters, cv = 2, n_jobs=-1)
randm.fit(X_trainer, y_trainer)
# Results from Random Search
# +
print("\n The best estimator across ALL searched params:\n", randm.best_estimator_)
print("\n The best score across ALL searched params:\n", randm.best_score_)
print("\n The best parameters across ALL searched params:\n", randm.best_params_)
# -
print(accuracy_score(y_tester, y_pred)
print(precision_score(y_tester, y_pred, average='macro')
print(recall_score(y_tester, y_pred, average='macro')
f1_score = f1_score(y_tester, y_pred)
print(ac_score)
print(p_score)
print(r_score)
print(f1_score)
model = CatBoostClassifier(depth=9, bootstrap_type= 'Bayesian', loss_function = 'MultiClass', iterations=80, learning_rate=0.4, l2_leaf_reg=26)
model.fit(X_trainer, y_trainer)
y_pred = model.predict(X_tester)
ac_score = (accuracy_score(y_tester, y_pred))
p_score = (precision_score(y_tester, y_pred, average='macro'))
r_score = (recall_score(y_tester, y_pred, average='macro'))
f1_score = (f1_score(y_tester, y_pred, average='macro'))
print(ac_score)
print(p_score)
print(r_score)
print(f1_score)
# +
from sklearn.model_selection import GridSearchCV
model = CatBoostClassifier(depth=9, iterations=80)
parameters = {'learning_rate': [0.6, 0.1, 0.4, 0.8],
'l2_leaf_reg': list(range(2, 30, 2))}
randm = GridSearchCV(estimator=model, param_grid = parameters, cv = 2, n_jobs=-1)
randm.fit(X_trainer, y_trainer)
# +
print("\n The best estimator across ALL searched params:\n", randm.best_estimator_)
print("\n The best score across ALL searched params:\n", randm.best_score_)
print("\n The best parameters across ALL searched params:\n", randm.best_params_)
# +
print("\n The best estimator across ALL searched params:\n", randm.best_estimator_)
print("\n The best score across ALL searched params:\n", randm.best_score_)
print("\n The best parameters across ALL searched params:\n", randm.best_params_)
| Classification/Quick Analysis/Quick Catboost Model for Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Preface
#
#
# We need some particular versions of the following modules;
#
#
# !pip install -r requirements.txt
# Add a string for your username here:
#
#
user = "ligon"
# ## USDA Nutrition DataBase
#
#
# The USDA maintains a database of nutritional information, where
# different kinds of food are identified by an NDB number. They do
# not provide any data on prices.
#
# To look up nutritional information, use api provided by the USDA at
# [https://ndb.nal.usda.gov/ndb/doc/index](https://ndb.nal.usda.gov/ndb/doc/index). You’ll need to sign up for a
# free api key (see directions on page), then add that key here:
#
#
apikey = {'ligon':"<KEY>"}
# ## Data on Prices
#
#
# Here’s an effort to describe some different kinds of food, along with
# data on food prices. This is all just based on my last trip to the
# grocery store, except that I’ve used the USDA database to look up NDB
# numbers. Note that one may need to protect leading zeros with “” for
# the NDB numbers.
#
# Also note that some kinds of foods need to have unit weights (in
# grams) supplied under “Units”; e.g., extra large eggs are taken to
# each weigh 56g. These conversions can also often be found on the USDA
# NDB website. Othertimes not—I still need to weigh a crumpet.
#
# Food is purchased in particular units (gallons, pounds, grams). And
# in some cases the natural units are things like donuts or eggs, in
# which case we may need to define our own units (see the example of
# “xl\_egg” below).
#
# | Food|Quantity|Units|Price|Date|Location|NDB|
# |---|---|---|---|---|---|---|
# | Milk, 2% fat|1|gallon|4.99|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45226447|
# | Eggs, extra large|12|xl\_egg|3.59|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45208918|
# | Crumpets|6|crumpet|3.19|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45324369|
# | Bananas|1|pound|3.15|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|“09040”|
# | Carrots, Organic|2|pound|2.29|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11124|
# | Cauliflower|2.51|pound|4.24|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11135|
# | Endive, Red|1.26|pound|6.27|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11213|
# | Figs, black mission|1|pound|4.98|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45170327|
# | Leeks, Organic|1|pound|1.29|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11246|
# | Lettuce, Little Gem|1|pound|5.98|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45276886|
# | Mushrooms, King Oyster|1|pound|12|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45218868|
# | Onion, yellow|1|pound|0.39|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45339306|
# | Orange juice|0.5|gallon|8.98|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45213207|
# | Parsnip|1|pound|1.98|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11298|
# | Potato, marble mix|1|pound|2.59|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45169597|
# | Rhubarb|1|pound|1.84|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|“09307”|
# | Potato, russet|10|pound|2.98|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|45364251|
# | Squash, Zucchini|1|pound|1.49|<span class="timestamp-wrapper"><span class="timestamp">[2019-09-14 Sat]</span></span>|Monterey Market, Berkeley|11477|
#
# And here’s a little code to help look up NDB codes for foods of
# different descriptions.
#
#
# +
import ndb
ndb.ndb_search(apikey[user],"Great Value Buttermilk Pancakes")
# -
# ## Price information
#
#
# The code below allows us to collect data on different kinds of food
# with their prices from google spreadsheets.
#
#
# ### Using data from Google Sheets
#
#
# Alternatively, rather than creating & uploading `csv` files we can
# also supply code to find files over the internet. Here we supply code
# which allows one to input data on prices in a format similar to that
# above (i.e., a spreadsheet with seven columns, labeled Food, Quantity,
# Units, Price, Date, Location, and NDB).
#
# Using google sheets in this way requires you establish some
# credentials. Follow Step 1 in [Turn on the API](https://developers.google.com/sheets/api/quickstart/python#step_1_turn_on_the_api_name).
#
#
# +
# ID of sheet and name of worksheet for google sheets.
# These must be public (until we work out google oauth)
SHEETs = [# Stigler's foods, modern prices
("https://docs.google.com/spreadsheet/ccc?key=<KEY>","Table 2"),
]
# -
# ### Compile data on food prices
#
#
# +
import ndb
import pandas as pd
import warnings
DFs = []
#for csv in CSVs: # Uncomment to use a list of csv files as inputs
# DFs.append(pd.read_csv(csv,dtype=str))
try:
if len(SHEETs):
for ID, RANGE_NAME in SHEETs:
try:
if "docs.google.com" in ID:
sheet = "%s&output=csv" % ID
else:
sheet = "https://docs.google.com/spreadsheet/ccc?key=%s&output=csv" % ID
DFs.append(pd.read_csv(sheet))
except ParserError:
warnings.warn("Can't read sheet at https://docs.google.com/spreadsheets/d/%s.\nCheck Sharing settings, so that anyone with link can view?" % ID)
except NameError: # SHEETS not defined?
pass
df = pd.concat(DFs,ignore_index=True,sort=False)
# Some columns which ought to be numeric are actually str; convert them
df['Price'] = df['Price'].astype(float)
df['Quantity'] = df['Quantity'].astype(float)
df
# -
# ### Look up nutritional information for foods
#
#
# Now we have a list of foods with prices. Do lookups on USDA database
# to get nutritional information.
#
#
# +
D = {}
for food in df.Food.tolist():
try:
NDB = df.loc[df.Food==food,:].NDB
D[food] = ndb.ndb_report(apikey[user],NDB).Quantity
except AttributeError:
warnings.warn("Couldn't find NDB Code %s for food %s." % (food,NDB))
D = pd.DataFrame(D,dtype=float)
D
# -
# ## Units & Prices
#
#
# Now, the prices we observe can be for lots of different quantities and
# units. The NDB database basically wants everything in either hundreds
# of grams (hectograms) or hundreds of milliliters (deciliters).
#
# Sometimes this conversion is simple; if the price we observe is for
# something that weighs two kilograms, that’s just 20 hectograms.
# Different systems of weights and volumes are also easy; a five pound
# bag of flour is approximately 22.68 hectograms.
#
# Othertimes things are more complicated. If you observe the price of a
# dozen donuts, that needs to be converted to hectograms, for example.
#
# A function `ndb_units` in the [ndb](ndb.py) module accomplishes this conversion
# for many different units, using the `python` [pint module](https://pint.readthedocs.io/en/latest/). A file
# [./Data/food\_units.txt](Data/food_units.txt) can be edited to deal with odd cases such as
# donuts, using a format described in the `pint` [documentation](https://pint.readthedocs.io/en/latest/defining.html).
#
# Here’s an example of the usage of `ndb.ndb_units`:
#
#
# +
import ndb
# Try your own quantities and units.
# If units are missing try adding to ./Data/food_units.txt
print(ndb.ndb_units(5,'lbs'))
print(ndb.ndb_units(1,'gallon'))
print(ndb.ndb_units(2,'tea_bag'))
print(ndb.ndb_units(12,'donut'))
# -
# Now, use the `ndb_units` function to convert all foods to either
# deciliters or hectograms, to match NDB database:
#
#
# +
# Convert food quantities to NDB units
df['NDB Quantity'] = df[['Quantity','Units']].T.apply(lambda x : ndb.ndb_units(x['Quantity'],x['Units']))
# Now may want to filter df by time or place--need to get a unique set of food names.
df['NDB Price'] = df['Price']/df['NDB Quantity']
df.dropna(how='any') # Drop food with any missing data
# To use minimum price observed
Prices = df.groupby('Food')['NDB Price'].min()
Prices.head()
# -
# ## Dietary Requirements
#
#
# We’ve figured out some foods we can buy, the nutritional content of
# those foods, and the price of the foods. Now we need to say
# something about nutritional requirements. Our data for this is based
# on US government recommendations available at
# [https://health.gov/dietaryguidelines/2015/guidelines/appendix-7/](https://health.gov/dietaryguidelines/2015/guidelines/appendix-7/).
# Note that we’ve tweaked the nutrient labels to match those in the NDB
# data.
#
# We’ve broken down the requirements into three different tables. The
# first is *minimum* quantities that we need to satisfy. For example,
# this table tells us that a 20 year-old female needs at least 46 grams
# of protein per day.
#
# | Nutrition|Source|C 1-3|F 4-8|M 4-8|F 9-13|M 9-13|F 14-18|M 14-18|F 19-30|M 19-30|F 31-50|M 31-50|F 51+|M 51+|
# |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
# | Energy|---|1000|1200|1400|1600|1800|1800|2200|2000|2400|1800|2200|1600|2000|
# | Protein|RDA|13|19|19|34|34|46|52|46|56|46|56|46|56|
# | Fiber, total dietary|---|14|16.8|19.6|22.4|25.2|25.2|30.8|28|33.6|25.2|30.8|22.4|28|
# | Folate, DFE|RDA|150|200|200|300|300|400|400|400|400|400|400|400|400|
# | Calcium, Ca|RDA|700|1000|1000|1300|1300|1300|1300|1000|1000|1000|1000|1200|1000|
# | Carbohydrate, by difference|RDA|130|130|130|130|130|130|130|130|130|130|130|130|130|
# | Iron, Fe|RDA|7|10|10|8|8|15|11|18|8|18|8|8|8|
# | Magnesium, Mg|RDA|80|130|130|240|240|360|410|310|400|320|420|320|420|
# | Niacin|RDA|6|8|8|12|12|14|16|14|16|14|16|14|16|
# | Phosphorus, P|RDA|460|500|500|1250|1250|1250|1250|700|700|700|700|700|700|
# | Potassium, K|AI|3000|3800|3800|4500|4500|4700|4700|4700|4700|4700|4700|4700|4700|
# | Riboflavin|RDA|0.5|0.6|0.6|0.9|0.9|1|1.3|1.1|1.3|1.1|1.3|1.1|1.3|
# | Thiamin|RDA|0.5|0.6|0.6|0.9|0.9|1|1.2|1.1|1.2|1.1|1.2|1.1|1.2|
# | Vitamin A, RAE|RDA|300|400|400|600|600|700|900|700|900|700|900|700|900|
# | Vitamin B-12|RDA|0.9|1.2|1.2|1.8|1.8|2.4|2.4|2.4|2.4|2.4|2.4|2.4|2.4|
# | Vitamin B-6|RDA|0.5|0.6|0.6|1|1|1.2|1.3|1.3|1.3|1.3|1.3|1.5|1.7|
# | Vitamin C, total ascorbic acid|RDA|15|25|25|45|45|65|75|75|90|75|90|75|90|
# | Vitamin E (alpha-tocopherol)|RDA|6|7|7|11|11|15|15|15|15|15|15|15|15|
# | Vitamin K (phylloquinone)|AI|30|55|55|60|60|75|75|90|120|90|120|90|120|
# | Zinc, Zn|RDA|3|5|5|8|8|9|11|8|11|8|11|8|11|
# | Vitamin D|RDA|600|600|600|600|600|600|600|600|600|600|600|600|600|
#
# This next table specifies *maximum* quantities. Our 20 year-old
# female shouldn’t have more than 2300 milligrams of sodium per day.
# Note that we can also add constraints here on nutrients that also
# appear above. For example, here we’ve added upper limits on Energy,
# as we might do if we were trying to lose weight.
#
# | Nutrition|Source|C 1-3|F 4-8|M 4-8|F 9-13|M 9-13|F 14-18|M 14-18|F 19-30|M 19-30|F 31-50|M 31-50|F 51+|M 51+|
# |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
# | Sodium, Na|UL|1500|1900|1900|2200|2200|2300|2300|2300|2300|2300|2300|2300|2300|
# | Energy|---|1500|1600|1800|2000|2200|2200|2500|2400|2600|2200|2400|1800|2400|
#
# Finally, we have some odd constraints given in this final table.
# Mostly the items given don’t correspond to items in the NDB data
# (e.g., copper), but in some cases it may be possible to match things
# up. We can’t use these without some additional work.
#
# | Nutrition|Source|C 1-3|F 4-8|M 4-8|F 9-13|M 9-13|F 14-18|M 14-18|F 19-30|M 19-30|F 31-50|M 31-50|F 51+|M 51+|
# |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
# | Carbohydrate, % kcal|AMDR|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|45-65|
# | Added sugars, % kcal|DGA|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|
# | Total fat, % kcal|AMDR|30-40|25-35|25-35|25-35|25-35|25-35|25-35|20-35|20-35|20-35|20-35|20-35|20-35|
# | Saturated fat, % kcal|DGA|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|<10%|
# | Linoleic acid, g|AI|7|10|10|10|12|11|16|12|17|12|17|11|14|
# | Linolenic acid, g|AI|0.7|0.9|0.9|1|1.2|1.1|1.6|1.1|1.6|1.1|1.6|1.1|1.6|
# | Copper, mcg|RDA|340|440|440|700|700|890|890|900|900|900|900|900|900|
# | Manganese, mg|AI|1.2|1.5|1.5|1.6|1.9|1.6|2.2|1.8|2.3|1.8|2.3|1.8|2.3|
# | Selenium, mcg|RDA|20|30|30|40|40|55|55|55|55|55|55|55|55|
# | Choline, mg|AI|200|250|250|375|375|400|550|425|550|425|550|425|550|
#
# - **Notes on Source:** In each of these tables, RDA = Recommended
# Dietary Allowance, AI = Adequate Intake, UL = Tolerable Upper
# Intake Level, AMDR = Acceptable Macronutrient Distribution
# Range, DGA = 2015-2020 Dietary Guidelines recommended limit; 14
# g fiber per 1,000 kcal = basis for AI for fiber.
#
#
# +
# Choose sex/age group:
group = "F 19-30"
# Define *minimums*
bmin = pd.read_csv('./diet_minimums.csv').set_index('Nutrition')[group]
# Define *maximums*
bmax = pd.read_csv('./diet_maximums.csv').set_index('Nutrition')[group]
# -
# ## Putting it together
#
#
# Here we take the different pieces of the puzzle we’ve developed and
# put them together in the form of a linear program we can solve.
#
#
# +
from scipy.optimize import linprog as lp
import numpy as np
tol = 1e-6 # Numbers in solution smaller than this (in absolute value) treated as zeros
c = Prices.apply(lambda x:x.magnitude).dropna()
# Compile list that we have both prices and nutritional info for; drop if either missing
use = list(set(c.index.tolist()).intersection(D.columns.tolist()))
c = c[use]
# Drop nutritional information for foods we don't know the price of,
# and replace missing nutrients with zeros.
Aall = D[c.index].fillna(0)
# Drop rows of A that we don't have constraints for.
Amin = Aall.loc[bmin.index]
Amax = Aall.loc[bmax.index]
# Minimum requirements involve multiplying constraint by -1 to make <=.
A = pd.concat([-Amin,Amax])
b = pd.concat([-bmin,bmax]) # Note sign change for min constraints
# Now solve problem!
result = lp(c, A, b, method='interior-point')
# Put back into nice series
diet = pd.Series(result.x,index=c.index)
print("Cost of diet for %s is $%4.2f per day." % (group,result.fun))
print("\nYou'll be eating (in 100s of grams or milliliters):")
print(diet[diet >= tol]) # Drop items with quantities less than precision of calculation.
tab = pd.DataFrame({"Outcome":np.abs(A).dot(diet),"Recommendation":np.abs(b)})
print("\nWith the following nutritional outcomes of interest:")
print(tab)
print("\nConstraining nutrients are:")
excess = tab.diff(axis=1).iloc[:,1]
print(excess.loc[np.abs(excess) < tol].index.tolist())
| diet_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Custom Tutorial 3: Using your model
#
# In this tutorial, we demonstrate how to fit your custom model to data. The process is nearly identical to that of the default sub-module, so we will principally focus on what's different between the default and custom versions here. Refer to Default Tutorial 3 for use details.
#
# The specific model we will use for this example is the urgency gating model with time depenedent drift discussed in PyBEAM's publication. In this model, the drift rate flips from positive, to negative, then back to positive. The model files for this example are on PyBEAM's github under the folder ugm_drift_flipping.
#
# As before, import PyBEAM's custom sub-module.
#
# +
# import PyBEAM's custom module
import pybeam.custom as pbc
# also import pyplot to modify figure axes
import matplotlib.pyplot as plt
# -
# Next, we specify the directory where the model is located.
#
# +
# the directory containing your model goes here
model_dir = ''
# windows computers
# model_dir = r''
# -
# As we did in Custom Tutorial 2, we run a quick check using functions_test to make sure our functions are doing what we expect them to.
#
# +
# dictionary containing model parameters
phi = {'phi[0]' : 0.25, # t_nd: non-decision time
'phi[1]' : 0.5, # w: relative start point
'phi[2]' : 1.0, # mu: drift rate
'phi[3]' : 3.0, # l: leakage rate
'phi[4]' : 2.0, # k: urgency ratio
'phi[5]' : 1.0, # sigma: model scale
'phi[6]' : 1.0, # a: threshold location
'phi[7]' : 0.33, # t0: time for first drift rate flip
'phi[8]' : 0.66} # t1: time for second drift rate flip
pbc.functions_test(model_dir = model_dir, # string containing directory name where your model files are
phi = phi, # dictionary of model parameters
x = 0.0, # accumulator state
t = 1.0) # time
# -
# We next simulate two data sets, corresponding to two model conditions. Specifically, we model a speed/accuracy trade-off by simulated data sets with differing threhsold locations. The first has a = 1.0, while the second has a = 1.5
#
# +
phi0 = {'phi[0]' : 0.25, # t_nd: non-decision time
'phi[1]' : 0.5, # w: relative start point
'phi[2]' : 1.0, # mu: drift rate
'phi[3]' : 3.0, # l: leakage rate
'phi[4]' : 2.0, # k: urgency ratio
'phi[5]' : 1.0, # sigma: model scale
'phi[6]' : 1.0, # a: threshold location
'phi[7]' : 0.33, # t0: time for first drift rate flip
'phi[8]' : 0.66} # t1: time for second drift rate flip
rt0 = pbc.simulate_model(model_dir = model_dir,
N_sims = 500,
phi = phi0)
pbc.plot_rt(model_dir = model_dir,
phi = phi0,
rt = rt0,
bins = 50);
plt.xlim(-2.5, 2.5)
# +
phi1 = {'phi[0]' : 0.25, # t_nd: non-decision time
'phi[1]' : 0.5, # w: relative start point
'phi[2]' : 1.0, # mu: drift rate
'phi[3]' : 3.0, # l: leakage rate
'phi[4]' : 2.0, # k: urgency ratio
'phi[5]' : 1.0, # sigma: model scale
'phi[6]' : 1.5, # a: threshold location
'phi[7]' : 0.33, # t0: time for first drift rate flip
'phi[8]' : 0.66} # t1: time for second drift rate flip
rt1 = pbc.simulate_model(model_dir = model_dir,
N_sims = 500,
phi = phi1)
pbc.plot_rt(model_dir = model_dir,
phi = phi1,
rt = rt1,
bins = 50);
plt.xlim(-2.5, 2.5)
# -
# Now that we have data, we can infer what parameters best describe the data set. This follows the same process as the default sub-module, discussed in Default Tutorial 3. We first define a bank of priors in dictionary p. Then, we define our condition dictionaries. Note that, instead of parameter names for keys, we write the array location 'phi[0]', 'phi[1]', ... , 'phi[N_phi-1]'. This is as we did for the phi dictionary inputs to the simulate and plot model functions. These sub-dictionaries are placed into another condition dictionary in the same way as for the default model.
#
# Now that we have done this, we can call the inference function. This is identical the callout used in the default tutorial other than the model input. As with the other custom functions, we define the model directory instead of a model dictionary.
#
# The analysis tools plot_trace and summary are the same as for the default module.
#
# +
p = {'pphi[0]' : 'Uniform("t_nd", lower = 0.0, upper = 0.75)', # non-decision time prior
'pphi[1]' : 'Uniform("w", lower = 0.3, upper = 0.7)', # relative start point prior
'pphi[2]' : 'Uniform("mu", lower = -5.0, upper = 5.0)', # drift rate prior
'pphi[3]' : 'Uniform("l", lower = 0.0, upper = 10.0)', # leakage rate prior
'pphi[4]' : 'Uniform("k", lower = 0.0, upper = 10.0)', # urgency rate prior
'pphi[5]' : 1.0, # scaling parameter
'pphi[6]0' : 'Uniform("a0", lower = 0.25, upper = 3.0)', # decision threshold prior 0
'pphi[6]1' : 'Uniform("a1", lower = 0.25, upper = 3.0)', # decision threshold prior 1
'pphi[7]' : 0.33, # first drift flip time
'pphi[8]' : 0.66} # second drift flip time
c0 = {'rt' : rt0,
'phi[0]' : 'pphi[0]',
'phi[1]' : 'pphi[1]',
'phi[2]' : 'pphi[2]',
'phi[3]' : 'pphi[3]',
'phi[4]' : 'pphi[4]',
'phi[5]' : 'pphi[5]',
'phi[6]' : 'pphi[6]0',
'phi[7]' : 'pphi[7]',
'phi[8]' : 'pphi[8]'}
c1 = {'rt' : rt1,
'phi[0]' : 'pphi[0]',
'phi[1]' : 'pphi[1]',
'phi[2]' : 'pphi[2]',
'phi[3]' : 'pphi[3]',
'phi[4]' : 'pphi[4]',
'phi[5]' : 'pphi[5]',
'phi[6]' : 'pphi[6]1',
'phi[7]' : 'pphi[7]',
'phi[8]' : 'pphi[8]'}
cond = {0 : c0, 1 : c1}
trace = pbc.inference(model_dir = model_dir, # specify model directory
priors = p, # dictionary of priors
conditions = cond, # conditions dictionary
samples = 50000, # MCMC samples
chains = 3, # MCMC chains
cores = 3, # CPU cores to run MCMC chains on
file_name = 'custom') # output file name
# -
# summary of posteriors
pbc.plot_trace(file_name = 'custom', burnin = 25000);
# summary of posteriors
pbc.summary(file_name = 'custom', burnin = 25000)
| custom_tutorials/Custom_Tutorial3_parameter_inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import pickle
# ## Loading manually labelled data
bin_data = pd.read_csv('./KMTrainingSet/binary/bin_dataset_simulink.csv')
from sklearn.utils import shuffle
bin_data = shuffle(bin_data).reset_index(drop=True)
# +
# storing length of columns ignoring column 'label'
col_len = bin_data.shape[1]-1
# creating a new column list for the csv as there are no column names from MatLab
cols = ['A'+str(each+1) for each in range(int(col_len/2))] + ['V'+str(each+1) for each in range(int(col_len/2))]
# assigning new column names to the dataframe
bin_data.columns = cols + ['label']
# -
# ## Ploatting the data
# +
fig, ax = plt.subplots(6,figsize=(15,15))
data = bin_data
normal = data[data.label == 0]
abnormal = data[data.label != 0]
for i in range(col_len):
ax[i].plot(normal[cols[i]],"-")
ax[i].plot(abnormal[cols[i]],"-")
ax[i].set_title(cols[i])
fig.tight_layout()
# -
# ## Creating SVM (support vector machine)
from sklearn import svm
# ## binary
bin_data.dropna(inplace = True)
# +
# creating training set ignoring labels
bin_train_data = bin_data[cols]
bin_labels = bin_data['label']
# converting it to list of list from DataFrame
#train_data = train_data.values.tolist()
# -
dataset_percentage = 1
dataset_percentage = int((dataset_percentage*len(bin_train_data))/100)
bin_train_data = bin_train_data.head(dataset_percentage)
bin_labels = bin_labels[:dataset_percentage]
print(bin_train_data.shape,len(bin_labels))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(bin_train_data, bin_labels, test_size=0.20)
# configuring and fitting the model
clf = svm.SVC(decision_function_shape='ovo')
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
filename = 'binary_svm.pkl'
pickle.dump(clf, open(filename, 'wb'))
# ## multi-class
dataset = pd.read_csv('./KMTrainingSet/multi/mul_dataset_simulink.csv')
dataset.dropna(inplace = True)
# changing column names
dataset.columns = cols + ['label']
dataset.head()
mul_train_data = dataset[cols]
mul_labels = dataset['label']
dataset_percentage = 1
dataset_percentage = int((dataset_percentage*len(dataset))/100)
mul_train_data = mul_train_data.head(dataset_percentage)
mul_labels = mul_labels[:dataset_percentage]
print(mul_train_data.shape,len(mul_labels))
X_train, X_test, y_train, y_test = train_test_split(mul_train_data, mul_labels, test_size=0.20)
mul_clf = svm.SVC(decision_function_shape='ovr')
mul_clf.fit(X_train, y_train )
mul_clf.score(X_test, y_test)
filename = 'multi_svm.sav'
pickle.dump(mul_clf, open(filename, 'wb'))
# reading the new dataset from csv
# new dataset with changed simulation length and occurance time
new_data = pd.read_csv('./TrainingSet/1AB.csv')
new_data.columns = cols + ['label']
new_data = new_data[cols]
testing = new_data.values.tolist()
# ### Binnary classification
bin_gend_labels = clf.predict(testing)
new_data.values
from keras.models import load_model
#new_data = pd.read_csv('./TrainingSet/10ABCG.csv')
testing = new_data.values.reshape(-1,1,6).tolist()
NN_binary_model = load_model('binary_clf.pkl')
bin_gend_labels = NN_binary_model.predict(testing)
import numpy as np
gen_pred = np.argmax(bin_gend_labels,axis=1)
new_data['label'] = gen_pred
# +
total_labels = ['NML', 'AB', 'AC', 'BC', 'ABC', 'AG', 'BG', 'ABG', 'CG', 'ACG', 'BCG', 'ABCG']
df = new_data
x = list(gen_pred)
unq_labels = sorted(set(x), key=x.index)
unq_labels = [bin_list[each] for each in unq_labels]
print(unq_labels)
#df['label'] = df['label'].apply(unq_labels.index)
# -
test_sample.columns = cols + ['label']
# +
gend_labels = gen_pred
df = new_data
plt.hist([bin_list[x] for x in gend_labels if x!=0])
fig, ax = plt.subplots(6,figsize=(15,15))
for j in range(col_len):
legend_list = []
for i in range(len(unq_labels)):
extract = df[df.label==bin_list.index(unq_labels[i])][cols[j]]
#print(len(extract))
if unq_labels[i]==score[0][0]:
temp = ax[j].scatter(extract.index,extract,marker='+',s=40)
else:
temp = ax[j].scatter(extract.index,extract,marker='.',s=10)
legend_list.append(temp)
ax[j].legend(legend_list,unq_labels,scatterpoints=3,ncol=1,fontsize=15)
fig.tight_layout()
plt.show()
# -
new_data = new_data[new_data.label != 0]
# +
testing = new_data[cols]
# predicting the labels for new dataset
mul_gend_labels = mul_clf.predict(testing)
# adding predicted labels to new dataframe
new_data['label'] = mul_gend_labels +1
# +
x = list(new_data['label'])
sorted(set(x), key=x.index)
# +
gend_labels = new_data['label']
total_labels = ['NML', 'AB', 'AC', 'BC', 'ABC', 'AG', 'BG', 'ABG', 'CG', 'ACG', 'BCG', 'ABCG']
df = new_data
x = list(gend_labels)
unq_labels = sorted(set(x), key=x.index)
unq_labels = [total_labels[each] for each in unq_labels]
print(unq_labels)
# +
from collections import Counter
matrics = sorted(zip([total_labels[each] for each in Counter(gend_labels).keys()],Counter(gend_labels).values() ), key=lambda x: x[1])
#print(Counter(gend_labels).values(),[total_labels[each] for each in Counter(gend_labels).keys()])
score = [list(j) for j in matrics][::-1]
# +
total = sum([i[1] for i in score])
c=0
for i in score:
score[c][1] = str(round(i[1]*100/total,2))+"%"
#print("Fault type:", i[-1], "Percentage: {:.2f}%".format(i[1]*100/total))
c+=1
# -
score
# ## Plotting predicted data
# +
print(pd.DataFrame.from_records(score,columns=['Fault type','Percentage']))
plt.hist([total_labels[x] for x in gend_labels if x!=0])
fig, ax = plt.subplots(6,figsize=(15,20))
df = new_data
for j in range(col_len):
legend_list = []
for i in range(len(unq_labels)):
extract = df[df.label==total_labels.index(unq_labels[i])][cols[j]]
#print(len(extract))
if unq_labels[i]==score[0][0]:
temp = ax[j].scatter(extract.index,extract,marker='+',s=40)
else:
temp = ax[j].scatter(extract.index,extract,marker='.',s=10)
legend_list.append(temp)
ax[j].legend(legend_list,unq_labels,scatterpoints=3,ncol=1,fontsize=15)
fig.tight_layout()
plt.show()
# -
| .ipynb_checkpoints/SVM_classifier-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WxiNIYTnzCdw" colab_type="text"
# # [The Impact of Smartphones on the Camera Industry](https://www.reddit.com/r/dataisbeautiful/comments/d4mh5k/the_impact_of_smartphones_on_the_camera_industry/)
#
# In this notebook I set out to recreate the following graphic from /r/dataisbeautiful subreddit:
#
# 
# + [markdown] id="jQAQHLza0F0e" colab_type="text"
# ## Get the data
#
# First, I import some common data science Python libraries:
# + id="-3efoTuJc2fx" colab_type="code" colab={}
import numpy as np # numpy for arrays and matrices
import pandas as pd # pandas for data manipulation
import matplotlib.pyplot as plt # matplotlib for plotting
# + [markdown] id="QPeC54aN0ZU4" colab_type="text"
# For camera data, the authors citation used the following source from [CISPA](http://www.cipa.jp/stats/documents/common/cr200.pdf). I used the same souce and converted the PDF to a table using Adobe Acrobat's export to Excel feature and saved the relevant columns to a csv.
#
# For smarphone sales, the author used Statista, which is a paid service. Since I didn't have access, I used an the following source from [Wikipedia](https://en.wikipedia.org/wiki/List_of_best-selling_mobile_phones#Annual_sales_by_manufacturer). I copied the table into Excel and saved the file as a csv.
# + id="g4BVjC2PdHPr" colab_type="code" colab={}
# read csv files into pandas DataFrames
df_cameras = pd.read_csv('https://raw.githubusercontent.com/lejimmy/data-is-beautiful/master/smartphones-impact/cr200.csv')
df_phones = pd.read_csv('https://raw.githubusercontent.com/lejimmy/data-is-beautiful/master/smartphones-impact/phone-sales.csv')
# + id="68LiMO3D14Ny" colab_type="code" outputId="49b3cb0f-1e66-4709-89ef-e1450090ee2c" executionInfo={"status": "ok", "timestamp": 1576690289007, "user_tz": 360, "elapsed": 871, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 359}
# preview camera table
df_cameras.head(10)
# + id="yt7xMKEa-Rfa" colab_type="code" outputId="93a3b92c-d5a5-4ade-d3e6-97a7b0bbcaf2" executionInfo={"status": "ok", "timestamp": 1576690289008, "user_tz": 360, "elapsed": 852, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 153}
# summary of camera table
df_cameras.info()
# + id="eNpP4bZz17jS" colab_type="code" outputId="78f6e3b8-481f-474b-b9bb-53ba2a2df4ce" executionInfo={"status": "ok", "timestamp": 1576690289152, "user_tz": 360, "elapsed": 984, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 224}
# preview phone table
df_phones.tail()
# + id="fbBxb3v5-cUG" colab_type="code" outputId="b96a424b-9a82-472b-e1d7-4c53a19fa474" executionInfo={"status": "ok", "timestamp": 1576690289153, "user_tz": 360, "elapsed": 966, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 578}
# summary of phone table
df_phones.info()
# + [markdown] id="JzANm1KV9YwJ" colab_type="text"
# From the data sources, I know that for the camera sales, the number of `shipments` is in thousands of units.
#
# For the total camera phone sales, the `Total` is for millions of units sold across all manufacturers.
# + [markdown] id="6oMSE7xN9yiv" colab_type="text"
# ## Clean the data
#
# Our goal is to have a final dataframe with only 3 columns: year, camera sales, and phone sales.
#
# ### Camera sales
#
# For camera sales, we'll simply drop the value of camera sales and rename the `shipments` column to `camera_sales`. We may revisit this at a future date if we are curious about how the value of shipments changed.
# + id="_xEJqLTL-6Gw" colab_type="code" colab={}
# drop the value columm
df_cameras.drop(columns = 'value', inplace = True)
# + id="M28m5n77ELVu" colab_type="code" colab={}
# rename shipments column
df_cameras.rename(columns = {'shipments': 'camera_sales'}, inplace = True)
# + id="QOVucuEOD3d5" colab_type="code" outputId="650a8f7e-264e-4424-87c5-ffab8d160822" executionInfo={"status": "ok", "timestamp": 1576690289155, "user_tz": 360, "elapsed": 937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 419}
# view cleaned camera table
df_cameras
# + id="sCXxXUZ-DzJb" colab_type="code" outputId="176aeb86-d593-4332-a144-922ab4b0fe6e" executionInfo={"status": "ok", "timestamp": 1576690289156, "user_tz": 360, "elapsed": 928, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
# summary of cleaned cameras table
df_cameras.info()
# + [markdown] id="GoUkHXjH_DZm" colab_type="text"
# ### Phone sales
#
# For phone sales, there is a lot of extra information we don't need. For this section, I previewed the table after each cleaning step to make sure I was getting closer to the desired result.
#
# We will complete the following cleaning steps:
# - Transpose and extract the row for `Total` phone sales
# - Drop the `Manufacturer` row
# - Rename years 2015-2018 to remove the `*` suffixes.
# - Reset the index
# - Rename the columns
# - Convert data types
# + id="9X5txv4Eoath" colab_type="code" colab={}
# rotate the table, and extract only row 27.
df_phones = pd.DataFrame(df_phones.T[27])
# + id="kVMF95IHxzrc" colab_type="code" colab={}
# drop manufacturer row
df_phones.drop('Manufacturer', axis = 0, inplace = True)
# + id="PMZvrSVfyAGt" colab_type="code" colab={}
# use lambda function to extract only the first 4 numbers in each year
df_phones.rename(lambda x:x[:4], inplace = True)
# + id="Xa7jFqI2ydlQ" colab_type="code" colab={}
# reset indices
df_phones.reset_index(inplace = True)
# + id="zQEZZ_9pyHs7" colab_type="code" colab={}
# rename columns
df_phones.rename(columns = {27: 'phone_sales', 'index': 'year'}, inplace = True)
# + id="JKk3tDWXy4Ox" colab_type="code" colab={}
# change column data types
df_phones['year'] = df_phones['year'].astype(int)
df_phones['phone_sales'] = df_phones['phone_sales'].astype(float)
# + id="bphQnktGyheL" colab_type="code" outputId="2bc0d8cf-458d-4935-d1f0-d1179d92c011" executionInfo={"status": "ok", "timestamp": 1576690289251, "user_tz": 360, "elapsed": 985, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 886}
# view cleaned phone table
df_phones
# + id="GaJAtgdyDnF5" colab_type="code" outputId="d603b274-4296-40dd-a154-46c3c433bfd4" executionInfo={"status": "ok", "timestamp": 1576690289252, "user_tz": 360, "elapsed": 975, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
# summary of cleaned phone table
df_phones.info()
# + [markdown] id="ywoNpOecEZDF" colab_type="text"
# Now that both of our camera and phone shipment tables are cleaned, we will join them into one table with `year` as the matching column.
# + id="YpUnyf2Fu0-l" colab_type="code" colab={}
# left join
df_combined = df_cameras.merge(df_phones, how = 'left', left_on = 'year', right_on = 'year')
# + [markdown] id="UPqqfqETFKw-" colab_type="text"
# Since our phone sales data begins in `1997`, we'll replace missing values with 0
# + id="-g92pa7kE8Q2" colab_type="code" colab={}
# fill in missing values with 0
df_combined['phone_sales'].fillna(0, inplace = True)
# + id="KG5yXk5gz-Sj" colab_type="code" outputId="cea589cb-f13a-4926-b6ec-71b70a8fb760" executionInfo={"status": "ok", "timestamp": 1576690289351, "user_tz": 360, "elapsed": 1052, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 419}
# preview combined table
df_combined
# + id="osHJweYf0ajE" colab_type="code" outputId="c0a7e85b-fcef-407d-ffea-f136b2df62f2" executionInfo={"status": "ok", "timestamp": 1576690289354, "user_tz": 360, "elapsed": 1043, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 153}
# summary of combined table
df_combined.info()
# + [markdown] id="1nUcVMU5G_Cp" colab_type="text"
# As mentioned previously, the number of camera sales are in the 1000s and the number of phone sales is in the millions. We'll adjust the numbers so we can graph them on the same scale here:
# + id="cdi3ww2K0I6y" colab_type="code" colab={}
# divide camera sales by 1000 to share the same units
df_combined['camera_sales'] = df_combined['camera_sales'] / 1000
# + id="aYkoMHCcwOEI" colab_type="code" outputId="932488bd-4730-45a0-c2a0-0593658adb1d" executionInfo={"status": "ok", "timestamp": 1576690289356, "user_tz": 360, "elapsed": 1026, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 419}
# preview combined table with final scale
df_combined
# + [markdown] id="eBJoyVBWHPmi" colab_type="text"
# ## Plot the results
# + id="oq2pDIlOwV5F" colab_type="code" outputId="de722287-77ae-4d12-cb25-8e8ca52f63e7" executionInfo={"status": "ok", "timestamp": 1576690290024, "user_tz": 360, "elapsed": 1682, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 559}
# use matplotlib library to plot two lines
fig, ax1 = plt.subplots(figsize=(10,8))
ax1.plot('year', 'camera_sales', data = df_combined, color = 'blue')
ax1.set_xlim(1990,2020)
ax1.set_xlabel('Year', fontsize = 15)
ax1.set_ylabel('Millions of Units', fontsize = 15)
ax1.tick_params(axis = 'y', colors = 'b')
ax2 = ax1.twinx()
ax2.plot('year', 'phone_sales', data = df_combined, color = 'green')
ax2.tick_params(axis = 'y', colors = 'g')
fig.suptitle('The Impact of Smartphone Sales on the Camera Industry', fontsize = 18)
fig.subplots_adjust(top=.92)
fig.legend(loc = 'upper left', bbox_to_anchor = (0.08, 0.86))
fig.savefig('graph1.png');
# + [markdown] id="fTADd5MvvHWV" colab_type="text"
# # Conclusion:
#
# As camera phones have become ubiquitous in our society, it is reasonable to assume that digital camera sales have plummeted.
#
# Certainly the graphic makes the case that there is a correlation between the rise of the smartphone and the decline of the digital camera.
#
# However, because of the different y-axis scales, this effect is dramatized. If I replot the graph with the same shared y-axis:
# + id="yvT2kLsR1yS9" colab_type="code" outputId="2dca43e2-ebb2-46d0-e434-32463a2b44d8" executionInfo={"status": "ok", "timestamp": 1576690290394, "user_tz": 360, "elapsed": 2038, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCpHw0Rnn7ubfDaY4ddKYjRaH2P5-OnP0SBVU7515I=s64", "userId": "09945765524555120155"}} colab={"base_uri": "https://localhost:8080/", "height": 513}
# replot on the same shared y axis
plt.figure(figsize=(10,8))
plt.plot('year', 'phone_sales', data = df_combined, color = 'g')
plt.plot('year', 'camera_sales', data = df_combined, color = 'b')
plt.title('The Impact of Smartphone Sales on the Camera Industry')
plt.xlim(1990, 2020)
plt.xlabel('Year')
plt.ylabel('Millions of Units Sold')
plt.legend()
plt.savefig('graph2.png');
# + [markdown] id="vlNWT7CUxIT6" colab_type="text"
# From this chart, it appears that camera sales were not even a competing product. Maybe the original chart received over 50k upvotes because it reinforced a popular assumption that "you don't need a digital camera anymore since every smartphone has one", but from my recreation, I'm not convinced this is the case.
| smartphones-impact/Smartphones_Impact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Oxford-iiit-pet (tensorflow)
# > !pip install git+https://github.com/phylsix/dOD.git
# +
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from dOD.tf_model.model import UNet
from dOD.tf_model.datasets import oxford_iiit
from dOD.tf_model.trainer import Trainer
H, W = oxford_iiit.IMAGE_SIZE
# -
train_ds, test_ds = oxford_iiit.load_data()
modelMaker = UNet(input_shape=(H, W, oxford_iiit.CHANNELS),
kernel_shape=(3, 3), nlayer=2,
root_feature=64, depth=5, padding='same', activation='relu', norm_type='instancenorm',
drop_rate=0, num_classes=oxford_iiit.CLASSES)
modelMaker.build_net()
modelMaker.compile(learning_rate=1e-3,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['sparse_categorical_accuracy'])
modelMaker.describle()
# + tags=[]
T = Trainer(logbase=None)
history = T.fit(modelMaker.net, train_ds, validation_dataset=test_ds, epochs=25, batch_size=16)
# -
def show_history(history, EPOCHS=25):
train_loss = history.history['loss']
val_loss = history.history['val_loss']
train_acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
axes[0].plot(range(EPOCHS), train_loss, 'r--', label='training loss')
axes[0].plot(range(EPOCHS), val_loss, 'bo', label='validation loss')
axes[0].set_title('Training and validation loss')
axes[0].set_xlabel('Epoch')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[1].plot(range(EPOCHS), train_acc, 'r--', label='training accuracy')
axes[1].plot(range(EPOCHS), val_acc, 'bo', label='validation accuracy')
axes[1].set_title('Training and validation accuracy')
axes[1].set_xlabel('Epoch')
axes[1].set_ylabel('Accuracy')
axes[1].legend()
show_history(history, 25)
def show_result(train_ds, model, N=5):
fig, axes = plt.subplots(3, N, sharex=True, sharey=True,
figsize=(N * 3, 9), gridspec_kw={'wspace': 0.01, 'hspace': 0.01})
for i, (img, label) in enumerate(train_ds.shuffle(buffer_size=1000).take(N).batch(1)):
prediction = model.predict(img)[0]
axes[0][i].matshow(img[0])
axes[1][i].matshow(label[0, ..., 0], )
axes[2][i].matshow(prediction.argmax(axis=-1), )
for ax in axes:
for x in ax:
x.set_xticklabels([])
x.set_yticklabels([])
for i, x in enumerate(['image', 'label', 'prediction']):
axes[i][0].set_ylabel(x)
show_result(train_ds, modelMaker.net)
| notebooks/tf_oxford-iiit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import safenet
safenet.setup_logger(file_level=safenet.log_util.WARNING)
myApp = safenet.App()
myAuth_,addData=safenet.safe_utils.AuthReq(myApp.ffi_app.NULL,0,0,id=b'crappy_chat_reloaded',scope=b'noScope'
,name=b'i_love_it',vendor=b'no_vendor',app_container=True,ffi=myApp.ffi_app)
grantedAuth='<KEY>'
myApp.setup_app(myAuth_,grantedAuth)
myMutable = myApp.mData()
mutableBytes = b'\x97Y\x85\x9b\xf9\x07U\xeb-\x8a$k|\x02*qR\x01\x16\xd5~\xfde\x9f\x1cd\x9f\xb8\xa5\x98\xf0^\t\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/\xe3G\x7f\x00\x00'
infoData= safenet.safe_utils.getffiMutable(mutableBytes,myMutable.ffi_app)
userName = input('choose a username: ')
if userName == '':
userName = 'anon'
def getNewEntries(lastState,newState):
newEntries = {}
for additional in [item for item in newState if item not in lastState]:
newEntries[additional]=newState[additional]
return newEntries, newState
lastState={}
def printStuff(lastState):
for item in lastState:
print(f'{item.decode()}: {lastState[item].decode()}')
import datetime
newMessage=None
while newMessage != 'exit':
additionalEntries, lastState = getNewEntries(lastState,myMutable.getCurrentState(infoData))
printStuff(lastState)
newMessage = input('[your message (enter exit to leave)]: ')
if newMessage and newMessage.lower() != 'exit':
now = datetime.datetime.utcnow().strftime('%Y-%m-%d - %H:%M:%S')
myName = userName
text = newMessage
timeUser = f'{now} {myName}'
additionalEntries={timeUser:text}
myMutable.insertEntries(infoData,additionalEntries)
| crappyChat_reloaded_commandLine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''tfo'': conda)'
# name: python388jvsc74a57bd045f983f364f7a4cc7101e6d6987a2125bf0c2b5c5c9855ff35103689f542d13f
# ---
config = {
"speech_config": {
"sample_rate": 16000,
"frame_ms": 25,
"stride_ms": 10,
"num_feature_bins": 80,
"feature_type": "log_mel_spectrogram",
"preemphasis": 0.97,
"normalize_signal": True,
"normalize_feature": True,
"normalize_per_frame": False,
},
"decoder_config": {
"vocabulary": None,
"target_vocab_size": 1024,
"max_subword_length": 4,
"blank_at_zero": True,
"beam_width": 5,
"norm_score": True,
},
"model_config": {
"name": "streaming_transducer",
"encoder_reductions": {0: 3, 1: 2},
"encoder_dmodel": 320,
"encoder_rnn_type": "lstm",
"encoder_rnn_units": 1024,
"encoder_nlayers": 8,
"encoder_layer_norm": True,
"prediction_embed_dim": 320,
"prediction_embed_dropout": 0.0,
"prediction_num_rnns": 2,
"prediction_rnn_units": 1024,
"prediction_rnn_type": "lstm",
"prediction_projection_units": 320,
"prediction_layer_norm": True,
"joint_dim": 320,
"joint_activation": "tanh",
},
"learning_config": {
"train_dataset_config": {
"use_tf": True,
"augmentation_config": {
"feature_augment": {
"time_masking": {
"num_masks": 10,
"mask_factor": 100,
"p_upperbound": 0.05,
},
"freq_masking": {"num_masks": 1, "mask_factor": 27},
}
},
"data_paths": [
"/mnt/h/ML/Datasets/ASR/Raw/LibriSpeech/train-clean-100/transcripts.tsv"
],
"tfrecords_dir": None,
"shuffle": True,
"cache": True,
"buffer_size": 100,
"drop_remainder": True,
"stage": "train",
},
"eval_dataset_config": {
"use_tf": True,
"data_paths": None,
"tfrecords_dir": None,
"shuffle": False,
"cache": True,
"buffer_size": 100,
"drop_remainder": True,
"stage": "eval",
},
"test_dataset_config": {
"use_tf": True,
"data_paths": [
"/mnt/h/ML/Datasets/ASR/Raw/LibriSpeech/test-clean/transcripts.tsv"
],
"tfrecords_dir": None,
"shuffle": False,
"cache": True,
"buffer_size": 100,
"drop_remainder": True,
"stage": "test",
},
"optimizer_config": {"class_name": "adam", "config": {"learning_rate": 0.0001}},
"running_config": {
"batch_size": 2,
"num_epochs": 20,
"checkpoint": {
"filepath": "/mnt/e/Models/local/rnn_transducer/checkpoints/{epoch:02d}.h5",
"save_best_only": True,
"save_weights_only": True,
"save_freq": "epoch",
},
"states_dir": "/mnt/e/Models/local/rnn_transducer/states",
"tensorboard": {
"log_dir": "/mnt/e/Models/local/rnn_transducer/tensorboard",
"histogram_freq": 1,
"write_graph": True,
"write_images": True,
"update_freq": "epoch",
"profile_batch": 2,
},
},
},
}
metadata = {
"train": {"max_input_length": 2974, "max_label_length": 194, "num_entries": 281241},
"eval": {"max_input_length": 3516, "max_label_length": 186, "num_entries": 5567},
}
# +
import os
import math
import argparse
from tensorflow_asr.utils import env_util
env_util.setup_environment()
import tensorflow as tf
tf.keras.backend.clear_session()
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
strategy = env_util.setup_strategy([0])
from tensorflow_asr.configs.config import Config
from tensorflow_asr.datasets import asr_dataset
from tensorflow_asr.featurizers import speech_featurizers, text_featurizers
from tensorflow_asr.models.transducer.rnn_transducer import RnnTransducer
from tensorflow_asr.optimizers.schedules import TransformerSchedule
config = Config(config)
speech_featurizer = speech_featurizers.TFSpeechFeaturizer(config.speech_config)
text_featurizer = text_featurizers.CharFeaturizer(config.decoder_config)
train_dataset = asr_dataset.ASRSliceDataset(
speech_featurizer=speech_featurizer,
text_featurizer=text_featurizer,
**vars(config.learning_config.train_dataset_config),
indefinite=True
)
eval_dataset = asr_dataset.ASRSliceDataset(
speech_featurizer=speech_featurizer,
text_featurizer=text_featurizer,
**vars(config.learning_config.eval_dataset_config),
indefinite=True
)
train_dataset.load_metadata(metadata)
eval_dataset.load_metadata(metadata)
speech_featurizer.reset_length()
text_featurizer.reset_length()
global_batch_size = config.learning_config.running_config.batch_size
global_batch_size *= strategy.num_replicas_in_sync
train_data_loader = train_dataset.create(global_batch_size)
eval_data_loader = eval_dataset.create(global_batch_size)
with strategy.scope():
# build model
rnnt = RnnTransducer(**config.model_config, vocabulary_size=text_featurizer.num_classes)
rnnt.make(speech_featurizer.shape)
rnnt.summary(line_length=100)
rnnt.compile(
optimizer=config.learning_config.optimizer_config,
experimental_steps_per_execution=10,
global_batch_size=global_batch_size,
blank=text_featurizer.blank
)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(**config.learning_config.running_config.checkpoint),
tf.keras.callbacks.experimental.BackupAndRestore(config.learning_config.running_config.states_dir),
tf.keras.callbacks.TensorBoard(**config.learning_config.running_config.tensorboard)
]
rnnt.fit(
train_data_loader,
epochs=config.learning_config.running_config.num_epochs,
validation_data=eval_data_loader,
callbacks=callbacks,
steps_per_epoch=train_dataset.total_steps,
validation_steps=eval_dataset.total_steps
)
| notebooks/rnn_transducer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
path='splash'
tilenames=os.listdir(path)
tilenames
for tile in tilenames:
if '_0' not in tile:
path1=f'{path}\\{tile}'
os.remove(path1)
a='ab_cdefg'
if '_c' not in a:
print('No')
else:
print('yes')
import os
path='tiles'
b='abc.txt'
path=f'{path}\\{b}'
os.remove(path)
print("\\")
path='tiles'
b='\\'
f'{path}{b}'
print( r'\n' )
| SplashFilter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Python for Bioinformatics
# -----------------------------
#
# 
#
# This Jupyter notebook is intented to be used alongside the book [Python for Bioinformatics](http://py3.us/)
#
#
# Chapter 6: Code Modularizing
# --------------------------------
len('Hello')
# **Listing 6.1:** netchargefn: Function to calculate the net charge of a protein
def protcharge(aa_seq):
"""Returns the net charge of a protein sequence"""
protseq = aa_seq.upper()
charge = -0.002
aa_charge = {'C':-.045, 'D':-.999, 'E':-.998, 'H':.091,
'K':1, 'R':1, 'Y':-.001}
for aa in protseq:
charge += aa_charge.get(aa,0)
return charge
protcharge('EEARGPLRGKGDQKSAVSQKPRSRGILH')
protcharge()
# **Listing 6.2:** netchargefn: Function that returns two values
def charge_and_prop(aa_seq):
""" Returns the net charge of a protein sequence
and proportion of charged amino acids
"""
protseq = aa_seq.upper()
charge = -0.002
cp = 0
aa_charge = {'C':-.045, 'D':-.999, 'E':-.998, 'H':.091,
'K':1, 'R':1, 'Y':-.001}
for aa in protseq:
charge += aa_charge.get(aa,0)
if aa in aa_charge:
cp += 1
prop = 100.*cp/len(aa_seq)
return (charge,prop)
charge_and_prop('EEARGPLRGKGDQKSAVSQKPRSRGILH')
charge_and_prop('EEARGPLRGKGDQKSAVSQKPRSRGILH')[1]
# **Listing 6.3:** convertlist.py: Converts a list into a text file
def save_list(input_list, file_name):
"""A list (input_list) is saved in a file (file_name)"""
with open(file_name, 'w') as fh:
for item in input_list:
fh.write('{0}\n'.format(item))
return None
def duplicate(x):
y = 1
print('y = {0}'.format(y))
return(2*x)
duplicate(5)
y
def duplicate(x):
print('y = {0}'.format(y))
return(2*x)
duplicate(5)
y = 3
def duplicate(x):
print('y = {0}'.format(y))
return(2*x)
duplicate(5)
y = 3
def duplicate(x):
y = 1
print('y = {0}'.format(y))
return(2*x)
duplicate(5)
def test(x):
global z
z = 10
print('z = {0}'.format(z))
return x*2
z = 1
test (4)
z
# **Listing 6.4:** list2textdefault.py: Function with a default parameter
def save_list(input_list, file_name='temp.txt'):
"""A list (input_list) is saved in a file (file_name)"""
with open(file_name, 'w') as fh:
for item in input_list:
fh.write('{0}\n'.format(item))
return None
save_list(['MS233','MS772','MS120','MS93','MS912'])
# **Listing 6.5:** getaverage.py: Function to calculate the average of values entered
# as parameters
def average(*numbers):
if len(numbers)==0:
return None
else:
total = sum(numbers)
return total / len(numbers)
average(2,3,4,3,2)
average(2,3,4,3,2,1,8,10)
# **Listing 6.6:** list2text2.py: Converts a list into a text file, using print and *
def save_list(input_list, file_name='temp.txt'):
"""A list (input_list) is saved to a file (file_name)"""
with open(file_name, 'w') as fh:
print(*input_list, sep='\n', file=fh)
return None
# **Listing 6.7:** list2text2.py: Function that accepts a variable number of arguments
def commandline(name, **parameters):
line = ''
for item in parameters:
line += ' -{0} {1}'.format(item, parameters[item])
return name + line
commandline('formatdb', t='Caseins', i='indata.fas')
commandline('formatdb', t='Caseins', i='indata.fas', p='F')
# **Listing 6.8:** allprimes.py: Function that returns all prime numbers up to a given
# value
# +
def is_prime(n):
"""Returns True is n is prime, False if not"""
for i in range(2,n-1):
if n%i == 0:
return False
return True
def all_primes(n):
primes = []
for number in range(1,n):
if isprime(number):
primes.append(number)
return p
# -
# **Listing 6.9:** allprimesg.py: Generator that replaces putn() in code 6.8.
def g_all_primes(n):
for number in range(1,n):
if is_prime(number):
yield number
# Modules and Packages
# ----------
# utils.py file
def save_list(input_list, file_name='temp.txt'):
"""A list (input_list) is saved to a file (file_name)"""
with open(file_name, 'w') as fh:
print(*input_list, sep='\n', file=fh)
return None
# Since utils.py is not present in this shell, the following command will retrieve this file from GitHub and store it in the local shell so it is available for importing by Python
# !curl https://raw.githubusercontent.com/Serulab/Py4Bio/master/code/ch6/utils.py -o utils.py
import utils
utils.save_list([1,2,3])
# !cat temp.txt
| notebooks/Chapter 6 - Code Modularizing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. Dimensionality Reduction - PCA
# It can be very challenging to generate hypotheses regarding either single neurons or the population when looking at high-dimensional population activity. Dimensionality reduction techniques can help by giving a low-dimensional summary of the high-dimensional population activity, and thus provide an efficient way to explore and visualise the data.
#
# The goal of this exercise is to learn how to apply PCA to neural data and how to interpret the results.
# We will start by analyzing a relatively simple dataset.
#
# The dataset was collected by [Graf *et al*, 2011](http://www.nature.com/neuro/journal/v14/n2/full/nn.2733.html).
#
# Details about the dataset:
# - Neural activity recorded from 65 V1 neurons using multi-electrode arrays
# - The subject was an anesthetized monkey.
# - Stimuli were drifing sinusoidal gratings of 0 and 90 degrees, randomly interleaved.
# - Each stimulus lasted 2560ms. The first 1280ms consisted of a grating, the second 1280 consisted of a blank screen.
# - The dataset contains 100 stimulus repetitions.
# - The neural activity is quantified by counting the number of spikes into 40 ms time bins. Each stimulus therefore has 64 time bins (2560/40).
# - The dataset you will work with is a small subset of the original dataset.
#
#
# If there is time left, we will try our hand at the neuropixels dataset. This tutorial is inspired by exercises from <NAME> (see homework 1 of the course http://pillowlab.princeton.edu/teaching/statneuro2018/).
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy.io import loadmat
from sklearn.decomposition import PCA
from numpy.linalg import eig
# ### 3.1 Visualize the data
#
# The data consist of a (6400, 65) matrix of binned spike counts. Each column constains the spike counts of one neuron, each row contains the spike counts in one time bin.
#
# **a.**
# Plot the population response during the first
# 5 stimuli (first 320 rows of X). Tip: see `plt.imshow()` to visualise the population response. The responses should show clear stimulus-locking.
#
# +
data = loadmat('v1data_Graf2011.mat')
X = data['Msp']
print('Dimensions of X:',X.shape)
# Your code goes here:
# -
# **b.** Plot the responses of neurons 8 and 32 (columns 8 and 32) over the first 5 stimuli.
# Question: What is the main difference in the response properties of neuron 8 and 32?
# Answer: Their responses are anti-correlated.
# ### 3.2 Investigate the dimensionality of the data using PCA
#
# Recall that PCA finds an ordered set of activity patterns (principal components) that explain most variance in the data. Mathematically, the principal components are the eigenvectors of the covariance matrix $X^T X/(n-1)$. The variance that they capture is measured by the corresponding eigenvalue. In practice, we don't have to work with eigenvectors but we can use the class `sklearn.decomposition.PCA`. Use the function `fit` and variable `pca.explained_variance_ratio_` to answer the following question.
#
# **a.**
# Fit PCA to the spike count data. Next, visualize the dimensionality of the data by making two figures.
# The first figure should show the fraction of variance explained. The second figure should show the cumulative sum of the fraction of variance explained. Note that both the x-axis should read 'PCs' for both.
# +
from sklearn.decomposition import PCA
# create an PCA object.
# Giving it no input we won't reduce the data yet
pca = PCA(n_components=None)
# Your code goes here:
# -
# Question: How many components are needed to account for 50% of the variance in the data? And for 90%?
# Answer:
# **3.**
# Each principal component (PC) is a vector of length equal to the number of neurons. A PC can therefore be interpreted as an activity pattern, where the $i$th component of a PCs is the deviation of this neuron from its mean rate (PCA explains variance, so average deviation from the mean).
#
# Plot the first PC (The PCs are stored in the variable `pca.components_`). By definition, this is the single activity pattern that explains the most variance in the data.
# Question:
# What do you notice about the sign of its elements? What does this tell you about the dominant activity pattern?
# **4.** Plot the second PC. How do the values of neuron 8 and 32 (the neurons you previously looked at) compare?
# **5.** Use the function `pca.transform` to transform the data. The result is again a (6400, 65) matrix. The first column contains the projection of the neural activity onto the first PC. This vector of length 6400 is the similarity of the population activity to the first PC, over time. Next, make a scatter plot of the first PC agains the second PC.
# Question:
# Can you speculate on what is going on here?
# **6.**
# Plot the first 320 time bins of PC 1 and PC 2 over time to get a final answer of what the first PCs could represent.
| CNS/tutorial_questions/Code_Data/PCA_tutorial_questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://leetcode.com/problems/valid-parentheses/submissions/
class Solution:
def isValid(self, s: str) -> bool:
arr = []
for i in s:
if i == "(":
arr.append(i)
if i == "{":
arr.append(i)
if i == "[":
arr.append(i)
if i == ")":
if not len(arr):
return False
a = arr.pop()
if a != "(":
return False
if i == "}":
if not len(arr):
return False
a = arr.pop()
if a != "{":
return False
if i == "]":
if not len(arr):
return False
a = arr.pop()
if a != "[":
return False
if len(arr):
return False
return True
| tanmay/leetcode/valid-parentheses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Flexible RCMES Workflow
#
# * Typical Steps:
# * Load Datasets
# * Temporal Regrids
# * Spatial Regrids
# * Evaluate Metrics
# * Plot Results
#
# ### Make sure that OCW functions are available.
# +
# %matplotlib inline
from functions import loadDatasets, temporalRebins, commonLatLonGrid, spatialRegrids
from functions import computeMetrics, contourPlot, mymap
# def mymap(f, s): return map(f, s) # sequential single-core map function
# -
# ### Load Reference Dataset and one or more Model Datasets
# +
path1 = "/Users/bdwilson/Documents/code/RCMES/Workshop/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
variable1 = "tasmax"
path2 = "/Users/bdwilson/Documents/code/RCMES/Workshop/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
variable2 = "tasmax"
datasets = loadDatasets([(path1, variable1), (path2, variable2)], dir='./')
print datasets
# -
# ### Temporal Rebin
# +
import ocw.dataset_processor as dsp
from datetime import timedelta
timeRes=timedelta(days=365)
datasets = temporalRebins(datasets, timeRes)
# -
# ### Spatial Regrid
# +
latRes = 1.0 # degrees
lonRes = 1.0 # degrees
lats, lons = commonLatLonGrid(datasets, latRes, lonRes)
# Find common spatial bounds, return desired grid with specified resolution
datasets = spatialRegrids(datasets, lats, lons)
# -
# ### Compute Metric(s)
metricNames = ['Bias']
results = computeMetrics(datasets, metricNames, subregions=None)
bias = results[0][0]
# ### Plot Bias between two models as time-series of contour maps
# +
outputName = "wrf_bias_compared_to_knm"
config = {'gridshape': (4, 5),
'ptitle': 'TASMAX Bias of WRF Compared to KNMI (1989 - 2008)',
'subtitles': range(1989, 2009, 1)
}
plotFile = contourPlot(bias, lats, lons, outputName, **config)
# -
# ### Display Plot
# +
from IPython.display import Image, display
display(Image(plotFile))
# -
| ocw-parallel/notebooks/Flexible RCMES Workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Author : Sahil
#
# # Task 1 : Prediction using Supervised Machine Learning
#
# ## Under The Sparks Foundation
# ## Importing Required Libraries
#
# Pandas , Numpy for Data Manipulation
# Matplotlib , Seaborn for Data Visulation
# Sklearn for Data Modelling
#
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
# ## Reading the Data
#
data= pd.read_csv("http://bit.ly/w-data")
print("Data is imported successfully")
data.head()
print(data.shape)
data.describe()
data.boxplot(['Hours'])
data.boxplot(['Scores'])
# ### Hence our data did not have any major outlier , most of it clean
# # Visualization of Data
data.plot(x="Hours" , y="Scores" , style ='*')
plt.title(" Hours Vs Scores of Students")
plt.xlabel("Hours studied by Student")
plt.ylabel("Scores obtained")
plt.show()
sns.heatmap(data.corr(),annot=True)
# ### Hours and Scores are highly Correlated as we are getting value of 1
# # Preparing Data
x=data.drop("Scores", axis=1)
y=data['Scores']
# ### Scores are dependent variables where hours are independent
y
# # Linear Regression Training
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
data_reg = LinearRegression()
data_reg.fit(x_train, y_train)
# ## Predicting Scores
y_pred = scores_reg.predict(x_test)
y_pred
print('Training Score : ', scores_reg.score(x_train, y_train))
print('Test Score : ', scores_reg.score(x_test, y_test))
output = pd.DataFrame({'Actual Score': y_test,'Predicted Score': y_pred })
print(output.head())
#
plt.scatter(x_train, y_train, color = 'Yellow')
plt.plot(x_train, scores_reg.predict(x_train), color = 'red')
plt.title('Hours vs Scores')
plt.xlabel('Hours')
plt.ylabel('Scores')
plt.show()
# # Evaluating the Model
print('Mean absolute error : ', metrics.mean_absolute_error(y_test, y_pred))
print('Root mean square error : ', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
metrics.r2_score(y_test,y_pred)
# Hence , we are getting a good value of R-square value
study_hours = 9.25
score_pred = scores_reg.predict([[study_hours]])
print("Number of hours = {}".format(study_hours))
print("Predicted scores = {}".format(score_pred[0]))
| Task 1 Supervised Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 참고문헌 : Pytel 외 저, 이주성 외 역, 재료역학, 2판, 한티미디어, 2013.<br>Ref: Pytel, Kiusalaas, Sharma, Mechanics of Materials, 2nd Ed., Cengege Learning, 2013.
# `python` 기능을 확장해 주는 `module`을 불러 들임 (일부 기능만 사용될 수도 있음)
import numpy as np # 배열, 행렬 관련 기능
import numpy.linalg as na # 선형대수 (벡터, 행렬) 관련 기능
import matplotlib.pyplot as plt # 그래프 관련 기능
import scipy.integrate as si # 적분 관련 기능
import sympy as sy # 기호 연산 기능
import sympy.plotting as splot
import IPython.display as disp # 웹페이지 표시 기능
sy.init_printing() # 기호 연산 결과 표시 기능 준비
# ## 예제 07.008<br>ex07.008
# 부정정보: 중첩법<br>Statically Indeterminate Beam : Superposition
# p. 314
# ### 문제에서 주어진 변수<br>Given Parameters
# #### 보의 길이<br>Length of the beam
# +
L_AB_m = sy.symbols('L_AB_m', real=True, nonnegative=True)
s_d = {
L_AB_m: 10,
}
# -
# #### 하중<br>Load
# +
w0_N_m = sy.symbols('w0_N_m', real=True)
s_d.update(
{
w0_N_m: -1,
}
)
# -
# #### 재료와 단면 특성<br>Material & section properties
# +
E_Pa, I_m4 = sy.symbols('E_Pa, I_m4', positive=True)
s_d.update(
{
E_Pa: 200e9,
I_m4: 20e6 * (1e-3) ** 4,
}
)
# -
# #### 자유물체도<br>Free body diagram
x_m = sy.symbols('x_m', nonnegative=True)
x_A_m = 0
x_B_m = L_AB_m
# ### 아직 알지 못하는 반력<br>Reaction forces unknown yet
R_A_N, M_A_Nm, R_B_N = sy.symbols('R_A_N, M_A_Nm, R_B_N', real=True)
# ### 중첩법<br>Superposition
# #### 외팔보 전체에 가해지는 분포하중 $w_0$에 의한 끝점의 처짐<br>Deflection of a cantilever's end point by distributed load $w_0$ over the whole length
# p. 279 Table 6.2
# $$
# \delta_{Bw_0} = \frac{w_0L^4}{8EI}
# $$
delta_Bw0_m = (w0_N_m * L_AB_m ** 4 / (8 * E_Pa * I_m4))
delta_Bw0_m
# #### 외팔보 $L$ 지점에 가해지는 반력 $R$에 의한 끝점의 처짐<br>Deflection of a cantilever's end point by reaction force $R$ at $L$
# p. 279 Table 6.2
# $$
# \delta_{BR} = \frac{RL^3}{3EI}
# $$
delta_BR_m = (R_B_N * L_AB_m ** 3 / (3 * E_Pa * I_m4))
delta_BR_m
# #### 두 처짐을 중첩함<br>Superpose two $\delta$'s
super_eq = sy.Eq(delta_Bw0_m + delta_BR_m, 0)
super_eq
# #### $R$에 관하여 풂<br>Solve for $R$
R_B_N_sol = sy.solve(super_eq, R_B_N)[0]
R_B_N_sol
# ### 평형방정식<br>Equilibrium equation
# 수직방향<br>Vertical direction
# $$
# R_A + R_B + w_0 L = 0
# $$
fy_eq = sy.Eq(R_A_N + R_B_N + w0_N_m * L_AB_m)
fy_eq
fy_eq.subs(R_B_N, R_B_N_sol)
R_A_N_sol = sy.solve(fy_eq.subs(R_B_N, R_B_N_sol), R_A_N)[0]
R_A_N_sol
# $B$점 중심 회전방향<br>Rotation direction around $B$
# $$
# M_A - R_A \cdot L - P_N \cdot (L - a)=0
# $$
m_eq = sy.Eq(M_A_Nm - R_A_N * L_AB_m - sy.Rational(1, 2) * w0_N_m * L_AB_m ** 2)
m_eq
sy.simplify(m_eq.subs(R_A_N, R_A_N_sol))
M_A_Nm_sol = sy.solve(m_eq.subs(R_A_N, R_A_N_sol), M_A_Nm)[0]
sy.simplify(M_A_Nm_sol)
| utils/tests/sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../img/logo-bdc.png" align="right" width="64" />
#
# # <span style="color:#336699">SpatioTemporal Asset Catalog (STAC)</span>
# <hr style="border:2px solid #0077b9;">
# + [markdown] id="yZn7b78KPQDG"
# The [**S**patio**T**emporal **A**sset **C**atalog (STAC)](https://stacspec.org/) is a specification created through the colaboration of several organizations intended to increase satellite image search interoperability.
#
# The diagram depicted in the picture contains the most important concepts behind the STAC data model:
# -
# <img src="../../img/stac/stac-model.png" width="480" />
# <br/>
# <b>Figura 1</b> - STAC model.
# + [markdown] id="01at9HEIPQDJ"
# The description of the concepts below are adapted from the [STAC Specification](https://github.com/radiantearth/stac-spec):
#
# - **Item**: a `STAC Item` is the atomic unit of metadata in STAC, providing links to the actual `assets` (including thumbnails) that they represent. It is a `GeoJSON Feature` with additional fields for things like time, links to related entities and mainly to the assets. According to the specification, this is the atomic unit that describes the data to be discovered in a `STAC Catalog` or `Collection`.
#
# - **Asset**: a `spatiotemporal asset` is any file that represents information about the earth captured in a certain space and time.
#
#
# - **Catalog**: provides a structure to link various `STAC Items` together or even to other `STAC Catalogs` or `Collections`.
#
#
# - **Collection:** is a specialization of the `Catalog` that allows additional information about a spatio-temporal collection of data.
# -
# # 1. STAC API
# <hr style="border:1px solid #0077b9;">
#
#
# The STAC API is the dynamic version of a STAC. It returns `STAC Catalog`, `Collection`, `Item`, `ItemCollection` objects through several *endpoints*.
#
#
# For running the examples in this Jupyter Notebook you will need to install the [STAC client for Python](https://github.com/brazil-data-cube/stac.py). To install it from the Brazil Data Cube's GitHub repository, you can use `pip` with the following command:
# + colab={"base_uri": "https://localhost:8080/", "height": 311} id="RhG89dwJPQDM" outputId="c50d7fb0-3ca0-4481-e80f-acfd4ad4017e"
# !pip install stac.py
# -
# Let's import the `stac` package, as follows:
# + id="Nbh1DWTaPQDY"
import stac
# -
# After that, you can check the installed version of stac package:
stac.__version__
# Then, create a `STAC` object called `service` attached to the service address, which will allow us to comunicate to the given `STAC` service.
service = stac.STAC('https://brazildatacube.dpi.inpe.br/stac/', access_token='XXXXXXX')
# # 2. Listing the Available Data Products
# <hr style="border:1px solid #0077b9;">
# calling the `STAC` object retrieves the image collections and data cube collections available in the server.
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="3IKREEUpPQDg" outputId="9877684a-c575-47b8-d964-3e89c4f61944"
service
# -
# # 3. Retrieving the Metadata of a Data Product
# <hr style="border:1px solid #0077b9;">
# The `collection` method returns information about a given image or data cube collection identified by its name. In this example we are retrieving inormation about the datacube collection `CB4_64_16D_STK-1`:
# + id="dA0RhGdSPQDn"
collection = service.collection('CB4_64_16D_STK-1')
collection
# -
# # 4. Retrieving data
# <hr style="border:1px solid #0077b9;">
# The `get_items` method returns a consult given a `bbox` and `datetime`:
# + colab={"base_uri": "https://localhost:8080/", "height": 614} id="6hCWnHXePQDt" outputId="b32aff37-22a3-43c6-8723-3f47c416cffb"
items = collection.get_items(filter={'bbox':'-46.62597656250001,-13.19716452328198,-45.03570556640626,-12.297068292853805',
'datetime':'2018-08-01/2019-07-31',
'limit':10})
items
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="TyimexxXPQD1" outputId="2eb3dfee-bc47-4de8-a4a4-e3485cb37e18"
first_date_blue_url = items['features'][0]['assets']['BAND13']['href']
first_date_green_url = items['features'][0]['assets']['BAND14']['href']
first_date_red_url = items['features'][0]['assets']['BAND15']['href']
first_date_blue_url
# + id="YWzxK1JIPQD_"
import numpy
import rasterio
from matplotlib import pyplot as plt
from rasterio.windows import Window
# %matplotlib inline
# -
# Let's use `Window` from the `rasterio` package to read a subset of the original image, in this case from row 0 to row 3000 and column 0 to column 3000
# + id="SBW91SsdPQEF"
with rasterio.open(first_date_red_url) as dataset:
red = dataset.read(1, window=Window(0, 0, 3000, 3000)) # Window(col_off, row_off, width, height)
with rasterio.open(first_date_green_url) as dataset:
green = dataset.read(1, window=Window(0, 0, 3000, 3000)) # Window(col_off, row_off, width, height)
with rasterio.open(first_date_blue_url) as dataset:
blue = dataset.read(1, window=Window(0, 0, 3000, 3000)) # Window(col_off, row_off, width, height)
# -
# # 5. Visualizing the Images
# <hr style="border:1px solid #0077b9;">
# + colab={"base_uri": "https://localhost:8080/", "height": 249} id="XBOIh1ifPQEK" outputId="1fb666b0-c501-47f4-ebde-cb19032b8725"
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12, 4))
ax1.imshow(red, cmap='gray')
ax2.imshow(green, cmap='gray')
ax3.imshow(blue, cmap='gray')
fig.show()
# + id="IW8PNPNIPQEO"
def normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min))
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="JAOSp8VnPQEV" outputId="1c7fa67f-050e-43c0-f0c2-b4f11c0bbe95"
rgb = numpy.dstack((normalize(red), normalize(green), normalize(blue)))
plt.imshow(rgb)
# -
# # 6. Retrieving data using latlong
# <hr style="border:1px solid #0077b9;">
# A function to use latlong instead of the image row and column is defined next:
# + colab={"base_uri": "https://localhost:8080/", "height": 109} id="saQLQ7VTPQEb" outputId="091173ff-d87a-4918-d0a8-83a7d127b4fa"
# !pip install pyproj
# + id="q6WQd6AfPQEg"
from math import floor, ceil
from pyproj import Proj
def longlat2window(lon, lat, dataset):
"""
Args:
lon (tuple): Tuple of min and max lon
lat (tuple): Tuple of min and max lat
dataset: Rasterio dataset
Returns:
rasterio.windows.Window
"""
p = Proj(dataset.crs)
t = dataset.transform
xmin, ymin = p(lon[0], lat[0])
xmax, ymax = p(lon[1], lat[1])
col_min, row_min = ~t * (xmin, ymin)
col_max, row_max = ~t * (xmax, ymax)
return Window.from_slices(rows=(floor(row_max), ceil(row_min)),
cols=(floor(col_min), ceil(col_max)))
# + colab={"base_uri": "https://localhost:8080/", "height": 249} id="M4vaxmO-PQEk" outputId="f3f7e348-5fa8-4ecd-8945-07728efa1dff"
w = -45.90
n = -12.6
e = -45.40
s = -12.90
with rasterio.open(first_date_red_url) as dataset_red:
red = dataset_red.read(1, window = longlat2window((w,e), (s,n), dataset_red))
plt.imshow(red, cmap='gray')
plt.show()
# -
# # 7. Visualizing Composed Images
# <hr style="border:1px solid #0077b9;">
# A color composition can also be made:
# + id="sc9sKLDdPQEq"
with rasterio.open(first_date_blue_url) as dataset_blue:
blue = dataset_blue.read(1, window = longlat2window((w,e), (s,n), dataset_blue))
with rasterio.open(first_date_green_url) as dataset_green:
green = dataset_green.read(1, window = longlat2window((w,e), (s,n), dataset_green))
# + colab={"base_uri": "https://localhost:8080/", "height": 267} id="rX6SWOqCPQEt" outputId="2a2457fd-f599-4a27-bd87-e7e2b5a9d317"
rgb = numpy.dstack((normalize(red), normalize(green), normalize(blue)))
plt.imshow(rgb)
# -
# # 8. References
# <hr style="border:1px solid #0077b9;">
# - [Spatio Temporal Asset Catalog Specification](https://stacspec.org/)
#
#
# - [Brazil Data Cube Python Client Library for STAC Service - GitHub Repository](https://github.com/brazil-data-cube/stac.py)
# # 9. See also the following Jupyter Notebooks
# <hr style="border:1px solid #0077b9;">
#
# * [NDVI calculation on images obtained through STAC](./stac-ndvi-calculation.ipynb)
#
#
# * [Thresholding images obtained through STAC](./stac-image-threshold.ipynb)
#
#
# * [Calculating Image Difference on images obtained through STAC](./stac-image-difference.ipynb)
| Python/stac/stac-introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# # Concept Drift - Deployer
# Deploy a streaming Concept Drift detector on a labeled stream.
#
# This function is the Deployment step for the Streaming Concept Drift Detector. It will initialize the selected drift detectors with the base_dataset's statistics and deploy the [concept_drift_streaming serverless Nuclio function](../concept_drift_streaming/concept_drift_streaming.ipynb) with them for streaming concept-drift detection on top of a labeled stream.
# ## Environment setup
import nuclio
from pprint import pprint
# + tags=[]
# %%nuclio cmd -c
python -m pip install scikit-multiflow==0.4.1
python -m pip install v3io_frames
python -m pip install nuclio-jupyter
# + tags=[]
# Define function spec
# %nuclio config spec.build.baseImage = "mlrun/ml-models"
# +
# nuclio: start-code
# +
import skmultiflow.drift_detection # We will grab our PH, DDM, EDDM algorithms from here
import numpy as np
import pandas as pd
import os
from cloudpickle import dumps, load, dump
from nuclio.triggers import V3IOStreamTrigger
from mlrun import DataItem, import_function, mlconf, MLClientCtx, mount_v3io
# For testing
import random
# -
def concept_drift_deployer(context: MLClientCtx, base_dataset:DataItem,
input_stream:str, output_stream:str, output_tsdb:str, tsdb_batch_size:int, callbacks:list,
models:list=['ddm', 'eddm', 'pagehinkley'], models_dest='models',
pagehinkley_threshold:float=10, ddm_warning_level:float=2, ddm_out_control_level:float=3,
label_col='label', prediction_col='prediction', hub_url:str=mlconf.hub_url, fn_tag:str='master'):
"""Deploy a streaming Concept Drift detector on a labeled stream
This function is the Deployment step for the Streaming Concept Drift Detector.
It will load the selected drift detectors and initialize them with the
base_dataset's statistics. Then it will deploy the concept_drift_streaming
function and pass the models to it for streaming concept-drift detection on top
of a labeled stream.
:param context: MLRun context
:param base_dataset: Dataset containing label_col and prediction_col to initialize the detectors
:param input_stream: labeled stream to track.
Should contain label_col and prediction_col
:param output_stream: Output stream to push the detector's alerts
:param output_tsdb: Output TSDB table to allow analysis and display
:param tsdb_batch_size: Batch size of alerts to buffer before pushing to the TSDB
:param callbacks: Additional rest endpoints to send the alert data to
:param models: List of the detectors to deploy
Defaults to ['ddm', 'eddm', 'pagehinkley'].
:param models_dest: Location for saving the detectors
Defaults to 'models' (in relation to artifact_path).
:param pagehinkley_threshold: Drift level threshold for PH detector Defaults to 10.
:param ddm_warning_level: Warning level alert for DDM detector Defaults to 2.
:param ddm_out_control_level: Drift level alert for DDM detector Defaults to 3.
:param label_col: Label column to be used on base_dataset and input_stream
Defaults to 'label'.
:param prediction_col: Prediction column to be used on base_dataset and input_stream
Defaults to 'prediction'.
:param hub_url: hub_url in case the default is not used, concept_drift_streaming will be loaded
by this url
Defaults to mlconf.hub_url.
:param fn_tag: hub tag to use
Defaults to 'master'
"""
# Set the streaming function
mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'
mlconf.hub_url = hub_url
fn = import_function(url='hub://concept_drift_streaming')
# Load test dataset
context.logger.info('Loading base dataset')
base_df = base_dataset.as_df()
error_stream = np.where(base_df[prediction_col].values==base_df[label_col].values, 0, 1)
# Create models
context.logger.info('Creating models')
models = [model.strip() for model in os.getenv('models', 'pagehinkley, ddm, eddm').split(',')]
models = {'eddm': skmultiflow.drift_detection.EDDM(),
'pagehinkley': skmultiflow.drift_detection.PageHinkley(min_instances=len(error_stream),
threshold=pagehinkley_threshold),
'ddm': skmultiflow.drift_detection.DDM(min_num_instances=len(error_stream),
warning_level=ddm_warning_level,
out_control_level=ddm_out_control_level)}
# Initialzie the models with the base dataset
context.logger.info('Streaming data to models')
for i in range(len(error_stream)):
for model_name, model in models.items():
model.add_element(error_stream[i])
# Save warm models
context.logger.info('Logging ready models')
for name, model in models.items():
data = dumps(model)
model_file = f'{name}.pkl'
context.log_model(f'{name}_concept_drift', body=data, labels={'framework': 'skmultiflow', 'workflow': 'concept-drift'},
model_file=model_file, model_dir=models_dest, tag='latest')
fn.set_envs({f'{name}_model_path': os.path.join(context.artifact_path, models_dest, model_file)})
# Deploy streaming concept drift function
# with the warm models
context.logger.info('Deploying Concept Drift Streaming function')
fn.set_envs({'label_col': label_col,
'prediction_col': prediction_col,
'drift_stream': output_stream,
'tsdb_table': output_tsdb,
'pagehinkley_threshold': pagehinkley_threshold,
'ddm_warning_level': ddm_warning_level,
'ddm_out_control': ddm_out_control_level})
fn.add_trigger('labeled_stream', V3IOStreamTrigger(url=input_stream, name='labeled_stream'))
fn.apply(mount_v3io())
fn.deploy(project=context.project)
# +
# nuclio: end-code
# -
# ## Local test
# A usecase based run example
from mlrun import run_local, NewTask
# +
container = 'bigdata'
base_table = os.path.join('/', container, 'network-operations')
stream_consumer_group = 'cd'
artifacts_path = os.path.join(os.getcwd(), 'artifacts')
task = NewTask(name='concept_drift_deployer',
project='network-operations',
handler=concept_drift_deployer,
params={'models': ['ddm', 'eddm', 'pagehinkley'],
'label_col': 'is_error',
'prediction_col': 'yscore',
'output_tsdb': os.path.join(base_table, 'drift_tsdb'),
'input_stream': f'http://{os.environ["V3IO_API"]}{os.path.join(base_table, 'inference_stream')}@{stream_consumer_group}',
'output_stream': os.path.join(base_table, 'drift_stream')},
inputs={'base_dataset': 'store://network-operations/test_test_set_preds'},
artifact_path=artifacts_path)
# -
run_local(task)
# ## Save function yaml
from os import path
from mlrun import run_local, NewTask, mlconf, import_function, mount_v3io, code_to_function
mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'
# +
# create job function object from notebook code
fn = code_to_function("concept_drift",
kind='job',
with_doc=True,
embed_code=True)
# add metadata (for templates and reuse)
fn.spec.default_handler = "concept_drift_deployer"
fn.spec.description = "Deploy a streaming Concept Drift detector on a labeled stream"
fn.metadata.categories = ["ml", "serve"]
fn.metadata.labels = {"author": "orz", "framework": "sklearn"}
fn.export("function.yaml")
# -
fn.apply(mount_v3io())
# ## Stream testing
fn.deploy()
fn.run(task)
| concept_drift/concept_drift.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep convolutional neural networks
#
# In the previous chapters, you got a sense
# for how to classify images with convolutional neural network (CNNs).
# Specifically, we implemented a CNN with two convolutional layers
# interleaved with pooling layers, a singly fully-connected hidden layer,
# and a softmax output layer.
# That architecture loosely resembles a neural network affectionately named LeNet,
# in honor [<NAME>](http://yann.lecun.com/),
# an early pioneer of convolutional neural networks and the first
# to [reduced them to practice in 1989](http://www.mitpressjournals.org/doi/abs/10.1162/neco.1989.1.4.541)
# by training them with gradient descent (i.e. backpropagation).
# At the time, this was fairly novel idea.
# A cadre of researchers interested in biologically-inspired learning models had taken to investigating artificial simulations of neurons as learning models.
# However, as remains true to this day, few researchers believed that real brains learn by gradient descent.
# The community of neural networks researchers had explored many other learning rules.
# LeCun demonstrated that CNNs trained by gradient descent,
# could get state-of-the-art results on the task of recognizing hand-written digits.
# These groundbreaking results put CNNs on the map.
#
# However, in the intervening years, neural networks were superseded by numerous other methods.
# Neural networks were considered slow to train,
# and there wasn't wide consensus on whether it was possible
# to train very deep neural networks from a random initialization of the weights.
# Moreover, training networks with many channels, layers,
# and parameters required excessive computation
# relative to the resources available decades ago.
# While it was possible to train a LeNet for MNIST digit classification and get good scores,
# neural networks fell out of favor on larger, real-world datasets.
#
# Instead researchers precomputed features based on a mixture of elbow grease,
# knowledge of optics, and black magic.
# A typical pattern was this:
# 1. Grab a cool dataset
# 2. Preprocess it with giant bag of predetermined feature functions
# 3. Dump the representations into a simple linear model to do the *machine learning part*.
#
# This was the state of affairs in computer vision up until 2012,
# just before deep learning began to change the world of applied machine learning.
# One of us (Zack) entered graduate school in 2013.
# A friend in graduate school summarized the state of affairs thus:
#
# If you spoke to machine learning researchers,
# they believed that machine learning was both important and beautiful.
# Elegant theories proved the properties of various classifiers.
# The field of machine learning was thriving, rigorous and eminently useful.
# However, if you spoke to a computer vision researcher, you'd hear a very different story.
# The dirty truth of image recognition, they'd tell you,
# is that the really important aspects of the ML for CV pipeline were data and features.
# A slightly cleaner dataset, or a slightly better hand-tuned feature mattered a lot to the final accuracy.
# However, the specific choice of classifier was little more than an afterthought.
# At the end of the day you could throw your features in a logistic regression model, a support vector machine, or any other classifier of choice, and they would all perform roughly the same.
#
#
#
#
#
# ## Learning the representations
#
# Another way to cast the state of affairs is that
# the most important part of the pipeline was the representation.
# And up until 2012, this part was done mechanically, based on some hard-fought intuition.
# In fact, engineering a new set of feature functions, improving results, and writing up the method was a prominent genre of paper.
#
# Another group of researchers had other plans. They believed that features themselves ought to be learned.
# Moreover they believed that to be reasonably complex, the features ought to be hierarchically composed.
# These researchers, including <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> believed that by jointly training many layers of a neural network, they might come to learn hierarchical representations of data.
# In the case of an image, the lowest layers might come to detect edges, colors, and textures.
#
# 
#
#
# Higher layers might build upon these representations to represent larger structures, like eyes, noses, blades of grass, and features.
# Yet higher layers might represent whole objects like people, airplanes, dogs, or frisbees.
# And ultimately, before the classification layer, the final hidden state might represent a compact representation of the image that summarized the contents in a space where data belonging to different categories would be linearly separable.
# ## Missing ingredient 1: data
#
# Despite the sustained interest of a committed group of researchers in learning deep representations of visual data, for a long time these ambitions were frustrated. The failures to make progress owed to a few factors. First, while this wasn't yet known, supervised deep models with many representation require large amounts of labeled training data in order to outperform classical approaches. However, given the limited storage capacity of computers and the comparatively tighter research budgets in the 1990s and prior, most research relied on tiny datasets. For example, many credible research papers relied on a small set of corpora hosted by UCI, many of which contained hundreds or a few thousand images.
#
# This changed in a big way when Fei-Fei Li presented the ImageNet database in 2009. The ImageNet dataset dwarfed all previous research datasets. It contained one million images: one thousand each from one thousand distinct classes.
#
# 
#
# This dataset pushed both computer vision and machine learning research into a new regime where the previous best methods would no longer dominate.
# ## Missing ingredient 2: hardware
#
# Deep Learning has a voracious need for computation. This is one of the main reasons why in the 90s and early 2000s algorithms based on convex optimization were the preferred way of solving problems. After all, convex algorithms have fast rates of convergence, global minima, and efficient algorithms can be found.
#
# The game changer was the availability of GPUs. They had long been tuned for graphics processing in computer games. In particular, they were optimized for high throughput 4x4 matrix-vector products, since these are needed for many computer graphics tasks. Fortunately, the math required for that is very similar to convolutional layers in deep networks. Furthermore, around that time, NVIDIA and ATI had begun optimizing GPUs for general compute operations, going as far as renaming them GPGPU (General Purpose GPUs).
#
# To provide some intuition, consider the cores of a modern microprocessor. Each of the cores is quite powerful, running at a high clock frequency, it has quite advanced and large caches (up to several MB of L3). Each core is very good at executing a very wide range of code, with branch predictors, a deep pipeline and lots of other things that make it great at executing regular programs. This apparent strength, however, is also its Achilles' heel: general purpose cores are very expensive to build. They require lots of chip area, a sophisticated support structure (memory interfaces, caching logic between cores, high speed interconnects, etc.), and they're comparatively bad at any single task. Modern laptops have up to 4 cores, and even high end servers rarely exceed 64 cores, simply because it is not cost effective.
#
# Compare that with GPUs. They consist of 100-1000 small processing elements (the details differ somewhat betwen NVIDIA, ATI, ARM and other chip vendors), often grouped into larger groups (NVIDIA calls them warps). While each core is relatively weak, running at sub-1GHz clock frequency, it is the total number of such cores that makes GPUs orders of magnitude faster than CPUs. For instance, NVIDIA's latest Volta generation offers up to 120 TFlops per chip for specialized instructions (and up to 24 TFlops for more general purpose ones), while floating point performance of CPUs has not exceeded 1 TFlop to date. The reason for why this is possible is actually quite simple: firstly, power consumption tends to grow *quadratically* with clock frequency. Hence, for the power budget of a CPU core that runs 4x faster (a typical number) you can use 16 GPU cores at 1/4 the speed, which yields 16 x 1/4 = 4x the performance. Furthermore GPU cores are much simpler (in fact, for a long time they weren't even *able* to execute general purpose code), which makes them more energy efficient. Lastly, many operations in deep learning require high memory bandwidth. Again, GPUs shine here with buses that are at least 10x as wide as many CPUs.
#
# Back to 2012. A major breakthrough came when <NAME> and <NAME>
# implemented a deep convolutional neural network that could run on GPU hardware. They realized that
# the computational bottlenecks in CNNs (convolutions and matrix multiplications) are all operations that could be parallelized in hardware. Using two NIVIDA GTX 580s with 3GB of memory (depicted below) they implemented fast convolutions. The code [cuda-convnet](https://code.google.com/archive/p/cuda-convnet/) was good enough that for several years it was the industry standard and powered the first couple years of the deep learning boom.
#
# 
#
# ## AlexNet
#
# In 2012, using their cuda-convnet implementation on an eight-layer CNN,
# Khrizhevsky, Sutskever and Hinton won the ImageNet challenge on image recognition by a wide margin.
# Their model, [introduced in this paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf), is *very* similar to the LeNet architecture from 1995.
#
# In the rest of the chapter we're going to implement a similar model to the one designed by them. Due to memory constraints on the GPU they did some wacky things to make the model fit. For example, they designed a dual-stream architecture in which half of the nodes live on each GPU. The two streams, and thus the two GPUs only communicate at certain layers. This limits the amount of overhead for keeping the two GPUs in sync with each other. Fortunately, distributed deep learning has advanced a long way in the last few years, so we won't be needing those features (except for very unusual architectures). In later sections, we'll go into greater depth on how you can speed up your networks by training on many GPUs (in AWS you can get up to 16 on a single machine with 12GB each), and how you can train on many machine simultaneously. As usual, we'll start by importing the same dependencies as in the past gluon tutorials:
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd
from mxnet import gluon
import numpy as np
mx.random.seed(1)
ctx = mx.gpu()
# ## Load up a dataset
#
# Now let's load up a dataset. This time we're going to use gluon's new `vision` package, and import the CIFAR dataset. Cifar is a much smaller color dataset, roughly the dimensions of ImageNet. It contains 50,000 training and 10,000 test images. The images belong in equal quantities to 10 categories. While this dataset is considerably smaller than the 1M image, 1k category, 256x256 ImageNet dataset, we'll use it here to demonstrate the model because we don't want to assume that you have a license for the ImageNet dataset or a machine that can store it comfortably. To give you some sense for the proportions of working with ImageNet data, we'll upsample the images to 224x224 (the size used in the original AlexNet).
def transformer(data, label):
data = mx.image.imresize(data, 224, 224)
data = mx.nd.transpose(data, (2,0,1))
data = data.astype(np.float32)
return data, label
# +
batch_size = 64
train_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=True, transform=transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
test_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=False, transform=transformer),
batch_size=batch_size, shuffle=False, last_batch='discard')
# -
for d, l in train_data:
break
print(d.shape, l.shape)
d.dtype
# ## The AlexNet architecture
#
# This model has some notable features.
# First, in contrast to the relatively tiny LeNet,
# AlexNet contains 8 layers of transformations,
# five convolutional layers followed by two fully connected hidden layers and an output layer.
#
# The convolutional kernels in the first convolutional layer are reasonably large at $11 \times 11$, in the second they are $5\times5$ and thereafter they are $3\times3$. Moreover, the first, second, and fifth convolutional layers are each followed by overlapping pooling operations with pool size $3\times3$ and stride ($2\times2$).
#
# Following the convolutional layers, the original AlexNet had fully-connected layers with 4096 nodes each. Using `gluon.nn.Sequential()`, we can define the entire AlexNet architecture in just 14 lines of code. Besides the specific architectural choices and the data preparation, we can recycle all of the code we'd used for LeNet verbatim.
#
# [**right now relying on a different data pipeline (the new gluon.vision). Sync this with the other chapter soon and commit to one data pipeline.**]
#
# [add dropout once we are 100% final on API]
alex_net = gluon.nn.Sequential()
with alex_net.name_scope():
# First convolutional layer
alex_net.add(gluon.nn.Conv2D(channels=96, kernel_size=11, strides=(4,4), activation='relu'))
alex_net.add(gluon.nn.MaxPool2D(pool_size=3, strides=2))
# Second convolutional layer
alex_net.add(gluon.nn.Conv2D(channels=192, kernel_size=5, activation='relu'))
alex_net.add(gluon.nn.MaxPool2D(pool_size=3, strides=(2,2)))
# Third convolutional layer
alex_net.add(gluon.nn.Conv2D(channels=384, kernel_size=3, activation='relu'))
# Fourth convolutional layer
alex_net.add(gluon.nn.Conv2D(channels=384, kernel_size=3, activation='relu'))
# Fifth convolutional layer
alex_net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, activation='relu'))
alex_net.add(gluon.nn.MaxPool2D(pool_size=3, strides=2))
# Flatten and apply fullly connected layers
alex_net.add(gluon.nn.Flatten())
alex_net.add(gluon.nn.Dense(4096, activation="relu"))
alex_net.add(gluon.nn.Dense(4096, activation="relu"))
alex_net.add(gluon.nn.Dense(10))
# ## Initialize parameters
alex_net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
# ## Optimizer
trainer = gluon.Trainer(alex_net.collect_params(), 'sgd', {'learning_rate': .001})
# ## Softmax cross-entropy loss
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# ## Evaluation loop
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
for d, l in data_iterator:
data = d.as_in_context(ctx)
label = l.as_in_context(ctx)
output = net(data)
predictions = nd.argmax(output, axis=1)
acc.update(preds=predictions, labels=label)
return acc.get()[1]
# ## Training loop
# +
###########################
# Only one epoch so tests can run quickly, increase this variable to actually run
###########################
epochs = 1
smoothing_constant = .01
for e in range(epochs):
for i, (d, l) in enumerate(train_data):
data = d.as_in_context(ctx)
label = l.as_in_context(ctx)
with autograd.record():
output = alex_net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(data.shape[0])
##########################
# Keep a moving average of the losses
##########################
curr_loss = nd.mean(loss).asscalar()
moving_loss = (curr_loss if ((i == 0) and (e == 0))
else (1 - smoothing_constant) * moving_loss + (smoothing_constant) * curr_loss)
test_accuracy = evaluate_accuracy(test_data, alex_net)
train_accuracy = evaluate_accuracy(train_data, alex_net)
print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" % (e, moving_loss, train_accuracy, test_accuracy))
# -
# ## Next
# [Very deep convolutional neural nets with repeating blocks](../chapter04_convolutional-neural-networks/very-deep-nets-vgg.ipynb)
# For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
| chapter04_convolutional-neural-networks/deep-cnns-alexnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Notebook to create results tables, regression of detection factors and subtests
# -
import pandas as pd
import numpy as np
import os
#import meld_classifier.old_hdf5_io as hio
import matplotlib.pyplot as plt
import meld_classifier.paths as paths
from statsmodels.stats.proportion import proportion_confint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from statsmodels.stats.multitest import multipletests
from meld_classifier.meld_cohort import MeldCohort, MeldSubject
demographics_file = '/rds/project/kw350/rds-kw350-meld/meld_data/Data/demographics_qc_allgroups.csv'
folder = '/rds/project/kw350/rds-kw350-meld/experiments/co-ripa1/iteration_21-09-15/ensemble_21-09-15/fold_all/results'
res_file = os.path.join(folder,'test_results.csv')
test_df=pd.read_csv(res_file,index_col=False)
test_df.drop_duplicates(inplace=True,ignore_index=True)
test_df.groupby('group').mean()
fp = test_df['n_clusters']<1
test_df['any']=fp
test_df.border=test_df.border>100
#test_df.detected+=test_df.border
test_df=test_df.dropna()
test_df.detected=test_df.detected.astype(int)
#create results table
def results_row(df):
# create results table
# #%detected
#mean = pd.DataFrame(df.groupby('group',as_index=False).mean())
detected = np.round(np.mean(df.detected[df.group])*100).astype(int)
detected_plus = np.round(np.mean(df.detected[df.group]+df.border[df.group])*100).astype(int)
#np.round(mean['detected'][mean['group']==True].values[0]*100).astype(int)
median = pd.DataFrame(df.groupby('group',as_index=False).median())
pat_med = median['n_clusters'][median['group']==True].values[0]
pat_iqr = [np.percentile(df[df['group']==True]['n_clusters'].values,25),
np.percentile(df[df['group']==True]['n_clusters'].values,75)]
try:
cont_med = median['n_clusters'][median['group']==False].values[0]
cont_iqr = [np.percentile(df[df['group']==False]['n_clusters'].values,25),
np.percentile(df[df['group']==False]['n_clusters'].values,75)]
cont_spec = np.round(np.mean(df[df['group']==False]['n_clusters']==0)*100).astype(int)
row = [ f'{detected_plus}% ({df.detected.sum()+df.border.sum()}/{df.group.sum()})',
f'{detected}% ({df.detected.sum()}/{df.group.sum()})',
f'{pat_med} ({pat_iqr[0]}-{pat_iqr[1]})',
f'{cont_spec}% ({(df[df["group"]==False]["n_clusters"]==0).sum()}/{(df["group"]==0).sum()})',
f'{cont_med} ({cont_iqr[0]}-{cont_iqr[1]})']
except IndexError:
row = [ f'{detected_plus}% ({df.detected.sum()+df.border.sum()}/{df.group.sum()})',
f'{detected}% ({df.detected.sum()}/{df.group.sum()})',
f'{pat_med} ({pat_iqr[0]}-{pat_iqr[1]})',
'NA',
'NA']
return row
test_row = results_row(test_df)
# +
folder = '/rds/project/kw350/rds-kw350-meld/experiments/co-ripa1/iteration_21-09-17/ensemble_21-09-20/fold_all/results'
res_file = os.path.join(folder,'test_results.csv')
n = pd.read_csv(res_file,index_col=False)
n.drop_duplicates(inplace=True,ignore_index=True)
n.dropna(inplace=True)
fp = n['n_clusters']<1
n['any']=fp
n.border=n.border>100
#n.detected+=n.border
n=n.dropna()
test_df=test_df.append(n,ignore_index=True)
test_df['group']=test_df['group'].astype(bool)
full_row = results_row(test_df)
# +
folder = '/rds/project/kw350/rds-kw350-meld/experiments/co-ripa1/predict_NewSiteH27H28_21-09-20/fold_all/results'
res_file = os.path.join(folder,'test_results.csv')
n = pd.read_csv(res_file,index_col=False)
n.drop_duplicates(inplace=True,ignore_index=True)
n.dropna(inplace=True)
n.border=n.border>100
#n.detected+=n.border
n=n.dropna()
n['group'] = n['group'].astype(bool)
#test_df=test_df.append(n,ignore_index=True)
#test_df['group']=test_df['group'].astype(bool)
site1=np.zeros(len(n),dtype=bool)
for i,s in enumerate(n.ID):
if 'H27' in s:
site1[i] = 1
new_sites1 = results_row(n[site1])
new_sites2 = results_row(n[~site1])
# -
data = {'': ['Sensitivity+ (Percentage of patients detected)',
'Sensitivity (Percentage of patients detected)',
'Number of clusters in patients (Median (IQR))',
'Specificity (Percentage of controls with zero clusters',
'Number of clusters in controls (Median (IQR))',
],
"Test cohort": test_row,
"Full cohort":full_row,
"Independent site 1":new_sites1,
"Independent site 2":new_sites2,
}
df = pd.DataFrame(data)
df=df.set_index('')
df.transpose()
test_df.detected+=test_df.border
test_df.detected=test_df.detected.astype(int)
len(np.unique(n.ID)),len(n)
len(np.unique(test_df.ID)),len(test_df)
#df = pd.read_csv(res_file,index_col=False)
test_df.groupby('group').mean()
fp = test_df['n_clusters']==1
test_df['any']=fp
test_df.groupby('group').mean()
# df = test_df.append(train_df)
# df=df.reset_index()
df = test_df
#df.border = df.borded
df['detected'] = df['detected'].astype(float)
df[np.isnan(df['detected'])]
demographics = pd.read_csv(demographics_file,index_col=False)
qc_scores = pd.read_csv(os.path.join(paths.BASE_PATH,'OLD','analysis_outliers_qc_v3.csv'))
cohort = MeldCohort(hdf5_file_root='{site_code}_{group}_featurematrix_combat_6.hdf5', dataset=None)
# +
empty=[]
site=[]
scanner=[]
flair=[]
qc_score = []
for c in demographics.columns[1:]:
df[c]=0
df['qc_score'] = 0
for sub in df['ID']:
for c in demographics.columns[1:]:
df[c][df.ID==sub] = demographics[c][demographics.ID==sub].values
# empty.append(demographics[demographics.columns[1:]][demographics.ID==sub].values)
# site.append(sub.split('_')[1])
# scanner.append(sub.split('_')[2])
# subj = MeldSubject(sub, cohort=cohort)
# flair.append('FLAIR' in subj.get_feature_list()[-1])
df['qc_score'][df.ID==sub] = qc_scores['perc_outliers_rois'][qc_scores.ID==sub].values[0]
#qc_score.append(qc_scores['perc_outliers_rois'][qc_scores.ID==sub].values[0])
# empty=np.ndarray.squeeze(np.array(empty))
# -
n_site={}
for site in np.unique(df.Site):
n_site[site]={}
site_grouped=pd.DataFrame(df.groupby(['Site','Scanner']).count().ID[site].reset_index(name = "Group_Count"))
for scanner in site_grouped.Scanner:
n_site[site][scanner]=site_grouped.Group_Count[site_grouped.Scanner==scanner].values[0]
site_size = []
for sub in df['ID']:
site_size.append(n_site[df.Site[df.ID==sub].values[0]][df.Scanner[df.ID==sub].values[0]])
df['Site_scanner_size']=site_size
# +
histo ='FCD IIIB'
list_fcd=['FCD3', 'FCD 3', 'FCDIII', 'FCD III']
if any(key in histo for key in list_fcd):
print(histo)
# -
#df.border=df.border>100
#df.detected+=df.border
pat=df[df['group']=='patient'].copy()
#df = df[df['site']!='H7']
site_r=pat.groupby('Site').mean()
# +
# fig,axes=plt.subplots(2,2)
# axes[0,0].hist(site_r.detected)
# axes[0,0].set_xlim([0,1])
# axes[0,0].set_title('Baseline detected')
# axes[0,1].hist(site_r.border)
# axes[0,1].set_xlim([0,1])
# axes[0,1].set_title('Baseline borderzone')
# axes[1,0].scatter(site_r.border,site_r.detected,
# c=np.array(site_r.index=='H24').astype(int))
# axes[1,0].set_title(f'Detected vs borderzone,\nR = {np.round(np.corrcoef(site_r.border,site_r.detected)[0,1],2)}')
# axes[1,0].set_ylabel('Detected (sensitivity)')
# axes[1,0].set_xlabel('Borderzone')
# axes[1,1].hist(site_r.detected+site_r.border)
# axes[1,1].set_xlim([0,1])
# axes[1,1].set_title('Detected + border')
# plt.tight_layout()
# -
#filter to see what the issue with FLAIR & 3T was
# # df = df[df['flair']]
# # df=df[df['group']]
# # df=df[df['scanner']=='3T']
# # tmp=pd.DataFrame(df.groupby('site').mean()['detected'])
# # tmp['counts'] = df.groupby('site').count()['detected']
# print(np.mean(df['detected'][df['site']!='H24']),np.mean(df['detected']))
y = df['detected'][df['group']]
#site size
df
# +
feature_sets = [[
'Age at preoperative',
'Sex', 'Ever reported MRI negative',
'Hemisphere',#'Lobe',
'Scanner','FLAIR','Surgery',
'detected','FreeSurfer'],
[
'Histology','Seizure free','detected'
],
]
features_of_interest = feature_sets[0]
# -
#.columns
# +
n_perm=10000
import matplotlib
font = {'family' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
np.random.seed(42)
fig, ax = plt.subplots(1,2,figsize=(20,8))
for f, features_of_interest in enumerate(feature_sets):
x=df[features_of_interest][df['group']=='patient']
if f==0:
x=x.fillna(0)
x['Age at preoperative'] = (x['Age at preoperative']).astype(float)
x['Hemisphere']=x['Hemisphere']=='lh'
x['Scanner']=x['Scanner']=='15T'
x['FreeSurfer']=x['FreeSurfer']==6.
x=x.rename(columns={'Age at preoperative':'Age at scan',"Scanner":"MRI:1.5T",
"Sex":"Sex:Male","Hemisphere":"Hemisphere:left",
"FreeSurfer":"FreeSurfer:v6"})
x['1.5T*FLAIR'] = x['MRI:1.5T']*x['FLAIR']
x=x[['Age at scan', 'Sex:Male', 'Ever reported MRI negative',
'Hemisphere:left', 'MRI:1.5T', 'FLAIR','1.5T*FLAIR', 'Surgery', 'FreeSurfer:v6',
'detected']]
else:
x=x.dropna()
x['Seizure free'] = x['Seizure free'].astype(int)
y = x['detected'].astype(int)
x=x.drop(columns=['detected'])
X=pd.get_dummies(x)
if f==1:
for c in X.columns:
if 'Histology' in c:
new_c = ' '.join(c.split('_')[1:])
X=X.rename(columns={c:new_c})
clf = LogisticRegression(solver='liblinear')
clf.fit(X,y)
coefs = clf.coef_.ravel()
#permutation-based statistics
perm_coefs = np.zeros((n_perm,clf.coef_.shape[1]))
y_perm=np.array(y).copy()
for perm in np.arange(n_perm):
np.random.shuffle(y_perm)
clf.fit(X,y_perm)
perm_coefs[perm]=clf.coef_
pvals=(0.5-np.abs(0.5-np.mean(coefs>perm_coefs ,axis=0)))*2
pd.DataFrame([coefs,pvals], columns=X.columns)
sig,pcorr,_,_=multipletests(pvals,0.05,method='fdr_bh')
y_pos = np.arange(len(coefs))
#set colours
colors=[]
for i in y_pos:
if coefs[i]>0:
colors.append([1,0,0,0.4+0.6*sig[i]])
else:
colors.append([0,0,1,0.4+0.6*sig[i]])
ax[f].barh(y_pos, coefs,color=colors , align='center')
ax[f].set_yticks(y_pos)
ax[f].set_yticklabels(X.columns);
ax[f].invert_yaxis() # labels read top-to-bottom
ax[f].set_xlabel('Logistic coefficient\nLess likely detected -> more likely detected')
ax[f].set_title('');
ax[f].text(-0.1,1,['A','B'][f],transform = ax[f].transAxes,fontsize=30)
ax[f].set_yticklabels(['Seizure free', 'FCD I', 'FCD IIA', 'FCD IIB', 'FCD III']);
plt.tight_layout()
fig.savefig('logistic_regression.pdf')
# -
fig.savefig('Figure_logistic_regression.png')
# +
# import ptitprince as pt
# pat['log_a'] = np.log(pat['Lesion area'])
# plt.figure(figsize=(8,8))
# pt.RainCloud(y='log_a',x='Site',data=pat)
# #pat.groupby('Site').median()
# -
pat = df[df['group']=='patient']
disp_df=pd.DataFrame(100*pat.groupby('Scanner').mean()['detected']).round(1)
disp_df['count'] = pat.groupby('Scanner').count()['detected']
disp_df
# +
disp_df=pd.DataFrame(100*pat.groupby(['Scanner','FLAIR']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['Scanner','FLAIR']).count()['detected']
disp_df
# -
disp_df=pd.DataFrame(100*pat.groupby(['Seizure free']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['Seizure free']).count()['detected']
disp_df
det=pat[np.logical_and(pat['FLAIR'],np.logical_and(pat['Scanner']=='3T',np.logical_and(pat['Seizure free']==1,
np.logical_or(pat['Histology']=='FCD_2B',
pat['Histology']=='FCD_2A'))))]
det=pat[np.logical_and(pat['FLAIR'],np.logical_and(pat['Scanner']=='3T',np.logical_and(pat['Seizure free']==1,
pat['Histology']=='FCD_2B')))]
det=pat[np.logical_and(pat['Seizure free']==1,np.logical_and(pat['FLAIR'],
pat['Histology']=='FCD_2B'))]
np.mean(det.detected)
# +
pat['fcd_2_seizure_free'] = np.logical_and(pat['Seizure free']==1,
np.logical_or(pat['Histology']=='FCD_2B',
pat['Histology']=='FCD_2A'))
pat['fcd_2'] = np.logical_or(pat['Histology']=='FCD_2B',
pat['Histology']=='FCD_2A')
disp_df=pd.DataFrame(100*pat.groupby(['fcd_2_seizure_free']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['fcd_2_seizure_free']).count()['detected']
disp_df
# -
pat.Histology[pat.Histology.isna()]='None'
# +
disp_df=pd.DataFrame(100*pat.groupby(['Histology']).mean()['detected']).round(1)
disp_df['n patients'] = pat.groupby(['Histology']).count()['detected']
disp_df.rename(columns={'detected':'% Detected'})
# -
disp_df=pd.DataFrame(100*pat.groupby(['Ever reported MRI negative']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['Ever reported MRI negative']).count()['detected']
disp_df
pat['paediatric'] = pat['Age at preoperative']<18
disp_df=pd.DataFrame(100*pat.groupby(['paediatric']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['paediatric']).count()['detected']
disp_df
disp_df=pd.DataFrame(100*pat.groupby(['flair','scanner','Histology']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['flair','scanner','Histology']).count()['detected']
disp_df
sf_df = pat[~pat['Seizure free'].isna()]
sf_df['Seizure free']=sf_df['Seizure free'].astype(int)
sf_df['n_clusters']=np.clip(sf_df['n_clusters'],0,4)
sf_df['t_ext'] = sf_df['Lobe']=='temporal'
#sf_df = sf_df[['detected','Seizure free','n_clusters']]
disp_df=pd.DataFrame(sf_df.groupby(['t_ext']).mean()['Seizure free'])
disp_df['count'] = sf_df.groupby(['t_ext']).count()['Seizure free']
st.fisher_exact(np.array([disp_df['Seizure free'],disp_df['count']-disp_df['Seizure free']]))
disp_df=pd.DataFrame(sf_df.groupby(['t_ext']).mean()['Seizure free'])
disp_df
plt.hist(pat.n_clusters,15)
plt.xlabel('n clusters')
plt.ylabel('count')
#sf_df['n_clusters'] = sf_df['n_clusters']+sf_df['detected']
disp_df=pd.DataFrame(sf_df.groupby(['n_clusters']).mean()['Seizure free'])
disp_df['count'] = sf_df.groupby(['n_clusters']).count()['Seizure free']
disp_df['std']=sf_df.groupby(['n_clusters']).std()['Seizure free']
disp_df
# +
import scipy.stats as st
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
contingency_table = np.array([disp_df['Seizure free']*disp_df['count'],disp_df['count']-disp_df['Seizure free']*disp_df['count']])
#print(st.chisquare(contingency_table.T))
mc=MultiComparison(sf_df['Seizure free'],sf_df['n_clusters'])
result=mc.tukeyhsd()
print(result)
# +
e=[]
for k in np.arange(len(disp_df)):
l,u=proportion_confint(disp_df['Seizure free'][k]*disp_df['count'][k],disp_df['count'][k])
e.append((u-l)/2)
plt.errorbar(disp_df.index,disp_df['Seizure free'],yerr=e,fmt='o')
plt.xticks(np.arange(5));
# -
disp_df['Seizure free'][k]*disp_df['count'][k]
# +
disp_df=pd.DataFrame(100*pat.groupby(['site']).mean()['detected']).round(1)
disp_df['count'] = pat.groupby(['site']).count()['detected']
site=[]
for sub in train_df['ID']:
site.append(sub.split('_')[1])
train_df['site']=site
train_pat = train_df[train_df['group']==1]
trdisp_df=pd.DataFrame(100*train_pat.groupby(['site']).mean()['detected']).round(1)
trdisp_df['count'] = train_pat.groupby(['site']).count()['detected']
disp_df['detected_train']=trdisp_df['detected']
disp_df['count_train']=trdisp_df['count']
disp_df.sort_values('detected')
#plt.scatter(disp_df['detected'],disp_df['count'])
disp_df.sort_values('detected')
# -
test_df = pd.read_csv(os.path.join(folder,'test_results.csv'))
train_df = pd.read_csv(os.path.join(folder,'train_results.csv'))
combi = test_df.append(train_df)
combi_pat = combi[combi.group]
# +
cohort_sizes=[20,40,60,100,150,200,400]
res = np.zeros((len(cohort_sizes),1000))
n_subs= np.zeros((len(cohort_sizes),1000))
for k,n in enumerate(cohort_sizes):
for p in np.arange(1000):
subs=np.sort(np.random.choice(np.arange(len(combi_pat)),n))
bool_ = np.zeros(len(combi_pat)).astype(bool)
bool_[subs]=1
correct=np.random.random(n)<0.6
res[k,p] = np.mean(correct)
n_subs[k,p] = n
# +
# res=np.zeros(100000)
# n_subs=np.zeros(100000).astype(int)
# for k in np.arange(100000):
# n_subs[k] = np.random.choice(np.arange(390)+11)
# res[k] = np.mean(np.random.random(n_subs[k])<0.6)
# from pygam import LinearGAM
# gam = LinearGAM()
# gam = LinearGAM().fit(n_subs, res)
# plt.scatter(n_subs,res,alpha=0.01)
# xx = np.arange(400)+1
# plt.plot(xx,gam.confidence_intervals( X=xx, width=.95), c='r', ls='--')
# plt.ylim([0.4,0.8])
# -
import ptitprince as pt
import matplotlib.cm as cm
colors=cm.Set2(np.arange(len(cohort_sizes)))
my_pal={}
k=-1
for c in np.arange(401):
if c in cohort_sizes:
k+=1
my_pal[c]=colors[k]
else:
my_pal[c]=colors[k]
#matplotlib.rc('font', **font)
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(cohort_sizes,100*np.percentile(res,5,axis=1),'r')
ax.plot(cohort_sizes,100*np.percentile(res,95,axis=1),'r')
df_sense=pd.DataFrame(np.array([100*res.ravel(),n_subs.ravel()]).T,
columns=['sensitivity','n_subs'])
pt.RainCloud(y='sensitivity',x='n_subs',data=df_sense,
ax=ax,order=np.arange(401),width_viol = 20, width_box = 10,
jitter=5,palette=my_pal)
ax.set_xlim([0,411])
ax.set_xticks(cohort_sizes);
ax.set_xticklabels(cohort_sizes,rotation=90,ha='center');
confidence_intervals=np.zeros((200,2))
confidence_intervals_90=np.zeros((200,2))
for n in np.arange(200):
confidence_intervals[n]=proportion_confint(0.6*(n+1),n+1)
confidence_intervals_90[n]=proportion_confint(0.6*(n+1),n+1,alpha=0.1)
# +
plt.plot(np.arange(200)+1,100*np.ones(200)*0.6,label='True sensitivity')
plt.plot(np.arange(200)+1,100*confidence_intervals,'r')
plt.plot(np.arange(200)+1,100*confidence_intervals_90,'g')
plt.plot([0],[0],'r',label='95% confidence interval')
plt.plot([0],[0],'g',label='90% confidence interval')
plt.xlabel('Number of subjects')
plt.ylabel('Sensitivity (%)')
plt.ylim([40,80])
plt.legend()
# -
| notebooks/read_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Cla0001/CPEN-21A-CPE-1-1/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GKtJFC_UjHtJ"
# ## Problem Statement 1
# + colab={"base_uri": "https://localhost:8080/"} id="444againjK6F" outputId="2260f452-0490-4fb1-f738-604279e08e2d"
name = "<NAME>"
print(name)
# + colab={"base_uri": "https://localhost:8080/"} id="_8P__wi8jfc7" outputId="44649e8c-c92f-4c55-a983-c85913f415bd"
SN = 202101695
print(SN)
# + colab={"base_uri": "https://localhost:8080/"} id="lXk2bPurlwYs" outputId="3fea7620-4e43-4417-d5c4-459255ab7974"
age = 20
print(age)
# + colab={"base_uri": "https://localhost:8080/"} id="W12QgA51l7br" outputId="7b324848-bef9-4683-84b5-500d7cbf37d2"
birthday = "October",27,2001
print(birthday)
# + colab={"base_uri": "https://localhost:8080/"} id="6kqYhOxnnkaG" outputId="554faaf9-8d97-4107-e6fd-f2ba0779d2ff"
brgy = "SanJuan I,"
mun = "Noveleta,"
prov = "Cavite"
print(brgy+mun+prov)
# + colab={"base_uri": "https://localhost:8080/"} id="RzClDJxUoefa" outputId="6939d9ca-a259-4c48-9861-abfc320247fc"
a = "Bachelor "
b = "of "
c = "Science "
d = "in "
e = "Computer "
f = "Engineering"
print(a+b+c+d+e+f)
# + colab={"base_uri": "https://localhost:8080/"} id="JDou4bvyp7gk" outputId="1ef936ac-8da0-452a-91be-860e9514a017"
GWA = 95
print(GWA)
# + [markdown] id="PaMcB8PtqMX7"
# ## Problem Statement 2
# + colab={"base_uri": "https://localhost:8080/"} id="qeI64rz9qPwR" outputId="a41d0878-4d38-4576-c232-7073ba801bc1"
n=4
answ= "Y"
print((2<n) and (n<6))
print((2<n) or (n==6))
print(not(2<n) or (n==6))
print(not(n<6))
print((answ=="Y") or (answ=="Y"))
print((answ=="Y") and (answ=="Y"))
print(not(answ=="Y"))
# + [markdown] id="EYH93bBmu1v4"
# ## Problem Statement 3
# + colab={"base_uri": "https://localhost:8080/"} id="W-U0yltFu6bX" outputId="eb1013d8-9df1-4145-a726-0be494e4b22e"
x = 2
y =-3
w = 7
z =-10
print(x/y)
print(w/y/x)
print(z/y%x)
print(x%-y*w)
print(x%y)
print(z%w-y/x*(5+5))
print((9)-x%(2+y))
print(z//w)
print((2+y)**(2))
print(w/x*(2))
| Midterm_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tC_ulOVlqCY4"
# Use of libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor, plot_tree
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split as split
from sklearn import preprocessing
import warnings
from sys import modules
warnings.filterwarnings('ignore')
# %matplotlib inline
# + [markdown] id="aVX_KEaeutqh"
# ##**Airline fare - EDA and Prediction**
#
# Our data set contains information about flights and details such as date, route and destination,
# And we want to predict the price of the flight.
# The data are on flights in India, during the year 2019.
#
# Given that flight prices today are unpredictablee, we will try to check what factors affect the prices of airline tickets.
#
# And perform price prediction using Linear Regression, KNN and Decision Tree Regressor.
#
# The data set was taken from Kagel.
#
# + [markdown] id="iEueaN_8brbK"
# ####**Loadind the Data and some info**
# + id="KTBmf79XszmN"
airline_data = pd.read_csv('Airline_fare.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 250} id="zf_v1fuqtAuQ" outputId="03a3dc6b-19b0-482f-9317-39d873f8760d"
airline_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="nuJX_ztGt_2a" outputId="ba6c547f-f566-4a63-9cb3-a17e786f6b41"
airline_data.info()
# + [markdown] id="kvZTH5hOTAB2"
# ####**Dealing with missing and duplicated values**
# + colab={"base_uri": "https://localhost:8080/"} id="Nkuu_Owu3Grn" outputId="ad65c037-e58f-4347-96e0-f2975920450d"
# chaking nan values
airline_data.isnull().sum()
# + id="1Hv49BR93SNr"
# there is only one row with null we decide to drop it
airline_data[airline_data['Route'].isnull() == True] # only row index 9039 is null also for Total_Stops
airline_data.dropna(axis='rows', inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="0wi4C8SD6wQ8" outputId="510b4f45-a9fb-478d-8f94-185dfbad37ef"
# chacking duplicated row
a = airline_data.duplicated().sum()
print(f'There are {a} duplicated rows, we decide to drop them from the data.')
airline_data.drop_duplicates(keep= 'first', inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="ozXQkEoHDs7M" outputId="a45cb905-21c8-4320-ba5c-02c5bd82fcc0"
airline_data.duplicated().sum()
# + [markdown] id="W6xBAm3fxznK"
# ####**EDA and Data Preprocessing**
# + [markdown] id="PJUP5GGiV2kO"
# Flights by airlines -
#
# We compare the total flights by airlines for this data set.
# after we saw that there is same airline company but with a different budget we decide to add another column with the airline budget.
# + colab={"base_uri": "https://localhost:8080/", "height": 410} id="cUnrvfX2yjZm" outputId="60aad1c0-6da9-4efb-fa94-2e8f662212a0"
plt.figure(figsize=(8,6))
sns.set(style="darkgrid")
airline_data['Airline'].value_counts().sort_values(ascending=True).plot(kind ='barh')
plt.title('Flights by airlines', fontsize=13)
plt.xlabel('Total flights')
plt.show()
# + id="SJ5yBTIrzcVx"
# splitting the departure time to hours and minutes
airline_data['dept_hour'] = pd.to_datetime(airline_data.Dep_Time).dt.hour
airline_data['dept_min'] = pd.to_datetime(airline_data.Dep_Time).dt.minute
# + id="dkPNL8O4aWMi"
# changning the dept time to period
airline_data['Dep_Time'] = pd.to_datetime(airline_data["Dep_Time"]).dt.hour
def changing_hour_dpet(row):
if row in [4,5,6,7]:
return 'Early Morning'
elif row in [8,9,10,11] :
return 'Morning'
elif row in [12,13,14,15] :
return 'Noon'
elif row in [16,17,18] :
return 'Afternoon'
elif row in [19,20,21,22] :
return 'Evening'
elif row in [23,0,1,2,3]:
return 'Night'
airline_data['Dep_Time'] = airline_data['Dep_Time'].apply(changing_hour_dpet)
# + [markdown] id="iFqPJb3IQKyv"
# Mean price by dep time -
#
# We decide to use Log Transform technique, to convert the distribution to a normal distribution.
#
# we took the log of the prices and use these values as the column price.
#
# we can see that the price is more high in the afternoon and morning time.
# also the lower price is the night time, we sow that there is less flights in the night.
# + colab={"base_uri": "https://localhost:8080/", "height": 427} id="OXd0KYNIwf0C" outputId="8f9f2bb1-8b56-40ec-d28e-8164ab81ac17"
# Log Transform changing the price to log for normalization and skala
airline_data['log_price'] = np.log1p(airline_data.Price)
airline_data.drop('Price', axis = 'columns', inplace = True)
plt.figure(figsize=(8,6))
sns.set(style="darkgrid")
airline_data.groupby('Dep_Time')['log_price'].mean().plot()
plt.title('Mean price by dep time')
plt.show()
# save the figure plt.
plt.savefig('mean price by dept.png')
# + [markdown] id="zQ5ranI8dcny"
# Distribution Price
#
# The price of 50% from the flights in our data set is around 8200-9000 in Indian currency.
# + colab={"base_uri": "https://localhost:8080/", "height": 411} id="HHt_yf1b53g2" outputId="d377109d-ead2-4b79-8148-9c7b0ed48965"
plt.figure(figsize=(8,6))
airline_data.log_price.hist(bins=50)
plt.title('Distribution Price')
plt.show()
plt.savefig('Dist Price.png')
# + [markdown] id="gOeBccuZd1Tn"
# Price Describe
# + colab={"base_uri": "https://localhost:8080/"} id="rqdqlVYIHwms" outputId="4bc5a6a2-0dab-4583-dad2-90f6891f2c14"
np.expm1(airline_data['log_price']).describe()
# + id="2Nib3zCPzxKJ"
# changing the total_stops column to int type
airline_data = (airline_data.replace("non-stop", 0).replace("1 stop", 1).replace("2 stops", 2).replace("3 stops", 3).replace("4 stops", 4))
# + colab={"base_uri": "https://localhost:8080/", "height": 654} id="DWHOh7wVJ1ZB" outputId="31071866-2f5e-4c89-e825-d38a89d6c91a"
sns.pairplot(airline_data[['log_price','Total_Stops', 'Duration','dept_hour']], height=3)
# plt.savefig('pairplot Price.png')
plt.show()
# + [markdown] id="7vfD-q4nzGwa"
# Since the data contains flights from many different airlines, but not in a balanced way, we chose to categorize the companies into 3 categories and present the information - low cost, full service and business class.
# + id="kmSP0LKnXTxL"
def change_airline(row):
if row in ['IndiGo', 'SpiceJet', 'Air Asia', 'GoAir', 'Trujet'] :
return 'low_cost'
if row in ['Jet Airways','Vistara', 'Air India', 'Multiple carriers']:
return 'full_service'
if row in ['Jet Airways Business', 'Multiple carriers Premium economy' ,'Vistara Premium economy']:
return 'business'
airline_data['Airline_budget'] = airline_data['Airline'].apply(change_airline)
# + [markdown] id="_j-Tj6w7J5fm"
# After splitting into the airline budget, most flights (60%) are full service, 30% low cost and the rest in business class.
#
# and here we can see the mean price by airline budget.
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="W_9uvZJNcXSA" outputId="d8bfed1c-3c97-4c28-db6f-4ca399aeaf90"
airline_data.groupby('Airline_budget')['log_price'].mean().sort_values(ascending=True).plot(kind='barh')
plt.title('Price by airline budget')
plt.xlabel('Log price')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="OH6VcHA3fkBJ" outputId="b8fe80bd-7508-4e56-f23c-d471c8cbb7b0"
plt.figure(figsize=(18 ,4))
sns.scatterplot(data=airline_data,
x="Airline", y="log_price", hue="Airline")
plt.title('Price VS Airlines', fontsize=18)
plt.xticks(rotation = 50)
plt.legend(loc='upper right')
plt.show()
# + [markdown] id="ve4sT6f7eSne"
# Price by month
#
# Our data set not contain flights in all the year months.
# in April there is less flights then the other months (maybe the reason is because India holidays) and there is small decrease in the price for this month.
#
# + id="I-t-0A-mvUcX"
# adding separate columns of day, month and year
airline_data['day'] = pd.to_datetime(airline_data.Date_of_Journey).dt.day
airline_data['month'] = pd.to_datetime(airline_data.Date_of_Journey).dt.month
airline_data['year'] = pd.to_datetime(airline_data.Date_of_Journey).dt.year
airline_data.drop(columns=['Date_of_Journey','year'], axis='columns', inplace= True)
# + id="p118qLQYkyYT" colab={"base_uri": "https://localhost:8080/", "height": 301} outputId="66e67ca8-0e42-4bbc-eb66-c8f18022934a"
sns.set_theme(style="whitegrid")
ax = sns.lineplot(x='month', y='log_price', data=airline_data)
plt.title('Price by Month')
plt.show()
# + [markdown] id="mrVSghSrh8WM"
# Distribution flights by stops -
#
# More than 50% from the flights in our data is with one stop.
#
# and 33% from our data is direct flights.
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="ncolwBqbg7wB" outputId="f7b40ab0-f09e-4dd4-d780-f3b1e15d4ca4"
airline_data['Total_Stops'].value_counts(normalize=True).sort_index().plot(kind='bar')
plt.title('Distribution flights by stops')
plt.xlabel('Stops')
plt.ylabel('Amount')
plt.show()
# + id="r7zsa0XDWp2R"
# chanage Duration to minute
airline_data['Duration'] = (pd.to_timedelta(airline_data['Duration']).dt.seconds // 60).astype(int)
# + colab={"base_uri": "https://localhost:8080/"} id="KUhdso8Oiknw" outputId="1807eeb5-fa55-429e-9395-db474589ead1"
airline_data.groupby('Total_Stops')['Duration'].mean()
# + [markdown] id="Jo5zXzZ5gzQC"
# ####**Detecting Outliers**
# + colab={"base_uri": "https://localhost:8080/", "height": 410} id="2jVGudKxNeVj" outputId="7e471062-8051-4ea0-84dc-935b97494046"
# check outliers fo price
plt.figure(figsize=(7,6))
ax = sns.boxplot(x=airline_data["log_price"], palette="Set3").set_title('Distribution price')
# + colab={"base_uri": "https://localhost:8080/", "height": 410} id="-4olxpqRhG0D" outputId="93100447-59fc-4b19-e93b-6f33022ce895"
# check outliers fo stops
plt.figure(figsize=(6,6))
ax = sns.boxplot(x=airline_data['Total_Stops'], palette="Set3").set_title('Distribution stops')
# + id="judnAB2ZjU2l"
# moving outliers for price
airline_data = airline_data[airline_data['log_price'] < 10.5]
# + colab={"base_uri": "https://localhost:8080/"} id="7xWNTYc1jp1m" outputId="2c6dbe6b-c41b-476d-d123-c5cccd8270d2"
a,b = airline_data.shape
print(f'The final data for the model contain {a} rows and {b} columns.')
# + [markdown] id="bBTWp6twrTrJ"
# ###**Feature engineering and Preparing data for the model**
# + id="2FV4QGuiVa2z"
# dropping irrelevant columns
airline_data.drop(columns=['Airline','Additional Info','Arrival_Time','dept_min'], axis='columns', inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="8sk8_bxqj6UZ" outputId="fefa249d-1b85-4eb8-c8b9-fc48cf7c7a8b"
a,b = airline_data.shape
print(f'The data for now is {a} rows and {b} columns')
# + id="w_xV6nX1-g7s"
# # option 1 for dealing with route column
airline_data[['R0','R1','R2','R3','R4','R5']] = airline_data['Route'].str.split('?', expand = True)
airline_data = airline_data.fillna('None')
airline_data.drop('Route', axis='columns', inplace= True)
# + id="o59JGEFpFP3f" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="90e8e17f-87b2-4fc3-91e0-22fb82f58e98"
airline_dummies = airline_data.copy()
airline_dummies.drop(columns=['R0','R5','R4'], axis='columns', inplace=True)
airline_dummies.head(3)
# + id="xFVfeQe4dinp"
# Dealing with category columns
category_coulmns = ['Airline_budget','Source','Destination','Dep_Time','R1','R2','R3']
def category_change(name) :
name = pd.get_dummies(airline_dummies[name], drop_first= True)
return name
# Airline = category_change('Airline')
Destination = category_change('Destination')
Dep_Time = category_change('Dep_Time')
Airline_budget = category_change('Airline_budget')
Source = category_change('Source')
# R0 = category_change('R0')
R1 = category_change('R1')
R2 = category_change('R2')
R3 = category_change('R3')
# R4 = category_change('R4')
# R5 = category_change('R5')
airline_dummies.drop(columns=category_coulmns, inplace=True)
# + id="h0Td14tvgxUk"
airline_dummies = pd.concat([airline_dummies,Airline_budget,Source,Destination,Dep_Time,R1,R2,R3], axis=1)
airline_dummies.head()
# + [markdown] id="aXbiF-Avde3Y"
# ###**Linear Regression Model**
#
# In this model we used dummies for the categories columns.
#
#
#
#
#
# + id="wICCkKLhjgbl"
# function to transform from log price to real price
def transrome_logprice(columnreal, columnpred ):
a = np.expm1(columnreal)
b = np.expm1(columnpred)
rmse = mean_squared_error(y_true=a,
y_pred=b,
squared=False)
return rmse
# + colab={"base_uri": "https://localhost:8080/"} id="tO47wJTZKvAv" outputId="6ee8afc0-c569-467b-e983-eb524bca38d0"
fare_train, fare_test = split(airline_dummies, test_size=0.3,
random_state=123456)
def train_test(df):
X_train = fare_train.drop('log_price', axis=1)
y_train = fare_train['log_price']
X_test = fare_test.drop('log_price', axis=1)
y_test = fare_test['log_price']
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test(airline_dummies)
print(f'Train size:\nInput-> {X_train.shape}, Output-> {y_train.shape[0]}')
print(f'Test size:\nInput-> {X_test.shape} Output-> {X_test.shape[0]}')
print("~"*40)
fare_lm = LinearRegression().fit(X_train, y_train)
# print(f'''The intercept is {fare_lm.intercept_:.3f}\nand the coefficient is {list(zip(X_train.columns, fare_lm.coef_))} ''')
def model_lr(X_train, y_train, df=fare_train):
df['price pred.'] = fare_lm.predict(X_train)
# print(f'Score of train is: {fare_lm.score(X_train, y_train):.3f}')
rmse = mean_squared_error(y_true=df['log_price'],
y_pred=df['price pred.'],
squared=False)
print(f"The RMSLE is = {rmse:.4f}")
# return df['price pred.']
# function to transform from log price to real price
def transrome_logprice(columnreal, columnpred ):
a = np.exp(columnreal)
b = np.exp(columnpred)
rmse = mean_squared_error(y_true=a,
y_pred=b,
squared=False)
return rmse
print("~"*40)
print('Result for train :')
model_lr(X_train, y_train)
train_rmse = transrome_logprice(y_train, fare_train['price pred.'] )
print(f'The rmse is = {train_rmse:.3f}')
print("~"*40)
print('Result for test :')
model_lr(X_test, y_test, df=fare_test)
test_rmse = transrome_logprice(y_test, fare_test['price pred.'] )
print(f'The rmse is = {test_rmse:.3f}')
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="YSW4AoLRkklQ" outputId="be15908b-b501-48b7-f037-443bb37a8edd"
ax = sns.scatterplot(x=y_train, y=fare_train['price pred.'])
ax.plot(y_train, y_train, 'r')
plt.title('For train result')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 161} id="2ZQIuf8fJdeU" outputId="a3132024-7982-4c83-d268-90e27ac3b1bd"
# Sample price prediction
print('Sample price prediction- train')
pd.DataFrame({'Real_price':fare_train['log_price'],'Prdict_price':fare_train['price pred.']}).head(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="BPPjFoM9STHt" outputId="50f53878-ac6c-4165-8da1-fb9d09a16295"
ax = sns.scatterplot(x=y_test, y=fare_test['price pred.'])
ax.plot(y_test,y_test, 'r')
plt.title('For test result')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 161} id="CqnCJXTtqVLZ" outputId="55cf238a-4fbf-43c3-c263-ec172b0fe851"
# Sample price prediction
print('Sample price prediction- test')
pd.DataFrame({'Real_price':fare_test['log_price'],'Prdict_price':fare_test['price pred.']}).head(3)
# + [markdown] id="GTu65E-Q840a"
#
#
# > **Predicted vs. Actual**
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 471} id="FBN2Yee6G4ut" outputId="8060a864-61ab-4cb3-e403-090f18ed9b34"
plt.figure(figsize=(7,7))
plt.scatter(y_train, fare_train['price pred.'])
# plt.yscale('log')
# plt.xscale('log')
p1 = max(max(y_train), max(fare_train['price pred.']))
p2 = min(min(y_train), min(fare_train['price pred.']))
plt.plot([p1, p2], [p1, p2], 'r', linewidth=3)
plt.xlabel('Real Price', fontsize=15)
plt.ylabel('Price Predictions', fontsize=15)
plt.axis('equal')
plt.axis('square')
plt.title('Predicted vs. Actual', fontsize=20)
plt.show()
# + [markdown] id="T_s6YIpwoNML"
# ###**KNN Model**
# + id="eQXfx-zL3hHN"
knn_data = airline_data.copy()
knn_data.drop(columns=['R0','R5'], axis='columns', inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="BH0-ytqA3ydU" outputId="a1252f78-7397-490d-d6cd-6519a1204a70"
knn_data.head()
knn_data.drop('Dep_Time', axis='columns', inplace=True)
knn_data.head()
# + id="0fyg5faLxmQi" cellView="form"
#@title
# scaling the data
ml = StandardScaler()
# ml = MinMaxScaler()
ds = knn_data.drop('log_price', axis=1)
y = knn_data['log_price']
dataset = ml.fit_transform(ds)
k_data = pd.DataFrame(dataset,columns= ds.columns)
k_data.shape,y.shape
# + id="V4-wozRKG292" cellView="form"
#@title
conver_source = {'Banglore':1, 'Kolkata':2, 'Delhi':3, 'Chennai':4, 'Mumbai':5}
conver_Destination = {'New Delhi':6, 'Banglore':1, 'Cochin':8, 'Kolkata':9, 'Delhi':3,'Hyderabad':11}
knn_data['Source'] = knn_data['Source'].map(conver_source)
knn_data['Destination'] = knn_data['Destination'].map(conver_Destination)
# + cellView="form" id="iwFYoK9o810H"
#@title
def category_change(name) :
name = pd.get_dummies(knn_data[name], drop_first= True)
return name
Airline = category_change('Airline')
Dep_Time = category_change('Dep_Time')
Airline_budget = category_change('Airline_budget')
Destination = category_change('Destination')
Source = category_change('Source')
R5 = category_change('R2')
R5 = category_change('R3')
knn_data.drop(columns=['Airline','Dep_Time','Airline_budget','Destination','Source','R2','R3'], inplace=True)
knn_data = pd.concat([knn_data,Airline,Dep_Time,Airline_budget,Destination,Source], axis=1)
knn_data.head()
# + id="69TDQuoi39dZ" cellView="form"
#@title
# encoding aicategorail columns to int
def encoding_columns(columnname):
le = preprocessing.LabelEncoder()
le.fit(list(knn_data[columnname].unique()))
le.classes_
knn_data[columnname] = le.transform(knn_data[columnname])
listis_encoding = ['Dep_Time','Airline_budget','Source','Destination']
for name in listis_encoding:
encoding_columns(name)
# + id="nMiFeqtf5nxn" cellView="form"
#@title
# option 1 for dealing with route column
# airline_data[['0','1','2','3','4','5']] = airline_data['Route'].str.split('?', expand = True)
# airline_data = airline_data.fillna(0)
all_airports = {}
for ls in ['R1','R2','R3','R4']:
for i in list(knn_data[ls].unique()):
all_airports[i] = 0
l = list(all_airports.keys())
dict_air = dict(enumerate(l, start=1))
dict_air
new = {}
for k,v in dict_air.items() :
new[v] = k
new['None'] = 0
print(new)
for ls in ['R1','R2','R3','R4']:
knn_data[ls] = knn_data[ls].replace(new, regex= True)
# + id="XFmrOYDr24hL"
X_train, X_test,y_train,y_test = split(k_data,y, test_size=0.3,
random_state=123456)
# + id="OPhP5TTheUyD" cellView="form"
#@title
# # first option
# fare_train, fare_test = split(airline_dummies, test_size=0.3,
# random_state=123456)
# def train_test(df):
# X_train = fare_train.drop('log_price', axis=1)
# y_train = fare_train['log_price']
# X_test = fare_test.drop('log_price', axis=1)
# y_test = fare_test['log_price']
# return X_train, X_test, y_train, y_test
# X_train, X_test, y_train, y_test = train_test(airline_dummies)
# + id="4w8iqdIyuJ2m"
knn_model = KNeighborsRegressor(n_neighbors=8).fit(X_train, y_train)
y_train_pred = knn_model.predict(X_train)
# fare_train['price pred.'] = knn_model_1.predict(X_train)
# + colab={"base_uri": "https://localhost:8080/"} id="wS3mtyIHuuCn" outputId="15d45ac6-1438-4c33-bb4a-717f790bda95"
price_error = transrome_logprice(y_train, y_train_pred)
RMSE = mean_squared_error(y_train, y_train_pred, squared=False)
print(f'The RMSLE is {RMSE:.3f}')
print(f"The RMSE is = {price_error:.3f}")
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="TeCl8G_vuRid" outputId="0b9e2c9a-2607-44bb-9d50-15f8f3c7f3fe"
ax = sns.scatterplot(y_train, y_train_pred)
ax.plot(y_train,y_train, 'r')
plt.show()
# + [markdown] id="YsIVxp24rKTQ"
# Checking in the range of 15 neighbors where we will get the best result for verification, we can see that in number 9 the lowest error.
# + colab={"base_uri": "https://localhost:8080/"} id="XEYmCvnBAKAz" outputId="d891a6ad-4cfb-452d-ad9f-ab8c2247cd1c"
# chaking with loop where we wiil get the best rmase for validation
rmse_list = []
for K in range(1,15):
model = KNeighborsRegressor(n_neighbors = K).fit(X_train, y_train)
y_test_pred =model.predict(X_test)
error = mean_squared_error(y_test,y_test_pred, squared=False)
rmse_list.append(error)
print(f'RMSE value for k = {K:^3} is: {error}')
# + colab={"base_uri": "https://localhost:8080/"} id="0H4iY230vkgR" outputId="3ae8befd-bc3e-4c12-c912-00d9ae6efd7c"
# validation - run the model in test dat
y_test_pred = knn_model.predict(X_test)
price_test_error = transrome_logprice(y_test, y_test_pred)
RMSE = mean_squared_error(y_test, y_test_pred, squared=False)
print(f'The RMSLE is {RMSE:.3f}')
score= knn_model.score(X_test, y_test)
print(f"The RMSE is = {price_test_error:.3f}")
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="6Pvt-y_Kvs-H" outputId="eeb39659-8dd2-46df-c7d8-e1d49c4c4d63"
ax = sns.scatterplot(y_test, y_test_pred)
ax.plot(y_test,y_test, 'r')
plt.show()
# + [markdown] id="a0saXb7tgupP"
# ###**Decision Tree Regressor Model**
# + id="pdY1F0UO2Imd"
fare_train, fare_test = split(airline_dummies, test_size=0.3,
random_state=123456)
def train_test(df):
X_train = fare_train.drop('log_price', axis=1)
y_train = fare_train['log_price']
X_test = fare_test.drop('log_price', axis=1)
y_test = fare_test['log_price']
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test(airline_dummies)
# + id="nGKSDNmLM_7I"
X_train, X_test,y_train,y_test = split(k_data,y, test_size=0.3,
random_state=123456)
# + id="JAOQNdn4RISx"
dt_model= DecisionTreeRegressor(max_leaf_nodes=40,min_samples_leaf=0.01).fit(X_train, y_train)
# + id="DxKFy5-Cg0J-" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="c2507a82-1206-45e3-b61d-b94979c139b7"
def visualize_tree(model, md=5, fs=(12, 8)):
plt.figure(figsize=fs)
plot_tree(model,
max_depth=md,
feature_names=model.feature_names_in_,
label='all',
filled=True,
rounded=True,
fontsize=12);
visualize_tree(dt_model, md=3)
# + id="MyINldC5RQjV"
y_train_pred = dt_model.predict(X_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="e1f77ItcRXI_" outputId="05448d1e-9bb5-40bf-c8bb-21baf414ab78"
ax = sns.scatterplot(x=y_train, y=y_train_pred)
ax.plot(y_train, y_train, 'r')
plt.title('For train')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="Uyn4VZoWRarb" outputId="1f9929f2-df4e-4371-9242-e7f490db4e28"
price_error = transrome_logprice(y_train, y_train_pred)
RMSE = mean_squared_error(y_train, y_train_pred, squared=False)
print(f'The RMSLE is {RMSE:.3f}')
print(f"The RMSE is = {price_error:.3f}")
# + id="Dbm2dDOSRo0e" colab={"base_uri": "https://localhost:8080/"} outputId="8ea5f956-598b-4513-f947-5a60885451ff"
# validation model
y_test_pred = dt_model.predict(X_test)
price_test_error = transrome_logprice(y_test, y_test_pred)
RMSE = mean_squared_error(y_test, y_test_pred, squared=False)
print(f'The RMSLE is {RMSE:.3f}')
score= dt_model.score(X_test, y_test)
# print(f'The score is {score:.3f}')
print(f"The RMSE is = {price_test_error:.3f}")
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="0kMzai2kRt5R" outputId="f1442c0b-0901-4da4-f567-8d3a378a8bd6"
ax = sns.scatterplot(x=y_test, y=y_test_pred)
ax.plot(y_test, y_test, 'r')
plt.title('For test')
plt.show()
# + [markdown] id="eyJ6-iIwARrh"
# ###**Best Result we got**
# + colab={"base_uri": "https://localhost:8080/", "height": 143} cellView="form" id="6nZmexM3ESgn" outputId="feaedb81-4c55-4a35-b3aa-4e427b302dd0"
#@title
# Comparing all the models
models = pd.DataFrame({
'Model': ['Linear Regression','Decision Tree Regressor(min_samples_leaf=0.01)', 'KNN (K=9)'],
'Test RMSE': [ 2490,2241, 2466],
'Train RMSE': [2571, 2255, 2381]}, index= ['LR','DT','KNN'])
models.sort_values(by='Test RMSE', ascending=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 268} cellView="form" id="7OB-6UHfHC9F" outputId="eafd027d-7945-4827-cb57-daaeae86b032"
#@title
models.plot(kind='barh',color = ('lightblue','grey'))
plt.legend(loc='upper left')
plt.show()
# + [markdown] id="zeHuW276efSu"
# ##**Summary of results:**
#
#
# > Linear Regression
#
# ```
# Result for train :
# Score = 0.769
# RMSE = 2571.193
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Result for test :
# Score = 0.765
# RMSE = 2490.582
# ```
#
# > KNeighbors Regressor
#
# ```
# Result for train :
# Score = 0.838
# RMSE = 2381.332
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Result for test :
# Score = 0.792
# RMSE = 2466.473
# ```
#
# > Decision Tree Regressor
#
# ```
#
# Result for train :
# Score = 0.805
# RMSE = 2255.295
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Result for test :
# Score = 0.792
# RMSE = 2241.257
#
# ```
#
#
| Airline_price _.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#import matplotlib.patches as mpatches
# Enable inline plotting
# %matplotlib inline
# +
# jedi 0.14.1 tab completion fails; will be fixed in 0.14.2
import jedi
if jedi.__version__ == '0.14.1':
# %config Completer.use_jedi = False
# Progress bar
from tqdm.auto import trange, tqdm
# +
import pynrc
from pynrc import nrc_utils
from pynrc.nrc_utils import S, source_spectrum
pynrc.setup_logging('WARNING', verbose=False)
# +
# Observation Definitions
from pynrc.nb_funcs import make_key, obs_wfe, obs_optimize
from pynrc.nb_funcs import model_info, disk_rim_model
# Functions to run a series of operations
from pynrc.nb_funcs import do_opt, do_contrast, do_gen_hdus, do_sat_levels
# Plotting routines
from pynrc.nb_funcs import plot_contrasts, plot_contrasts_mjup, planet_mags, plot_planet_patches
from pynrc.nb_funcs import update_yscale, do_plot_contrasts, do_plot_contrasts2
from pynrc.nb_funcs import plot_hdulist, plot_images, plot_images_swlw
# -
# ## Define Sources and their Reference PSF Stars
# Various Bandpasses
bp_v = S.ObsBandpass('v')
bp_k = pynrc.bp_2mass('k')
bp_w1 = pynrc.bp_wise('w1')
bp_w2 = pynrc.bp_wise('w2')
# +
# source, dist, age, sptype, vmag kmag W1 W2
args_sources = [('PDS 70', 113, 5.4, 'K7IV', 12.2, 8.8, 8.0, 7.7)]
ref_sources = args_sources
# +
# Directory housing VOTables
# http://vizier.u-strasbg.fr/vizier/sed/
votdir = 'votables/'
# Directory to save plots and figures
outdir = 'YSOs/'
# +
# List of filters
args_filter = [('F187N', None, None),
('F200W', None, None),
('F405N', None, None),
('F410M', None, None),]
filt_keys = []
for filt,mask,pupil in args_filter:
filt_keys.append(make_key(filt, mask=mask, pupil=pupil))
# -
# Save generated figures?
save_figs = True
# ## SED Fit
# +
# Fit spectrum to SED photometry
i=0
name_sci, dist_sci, age_sci, spt_sci, vmag_sci, kmag_sci, w1_sci, w2_sci = args_sources[i]
vot = votdir + name_sci.replace(' ' ,'') + '.vot'
mag_sci, bp_sci = vmag_sci, bp_v
args = (name_sci, spt_sci, mag_sci, bp_sci, vot)
src = source_spectrum(*args)
src.fit_SED(use_err=False, robust=False, wlim=[1,10], IR_excess=True)
# Final source spectrum
sp_sci = src.sp_model
# -
# Do the same for the reference source
name_ref, spt_ref, sp_ref = name_sci, spt_sci, sp_sci
# +
cols = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Plot spectra
fig, axes = plt.subplots(1,2, figsize=(14,4.5))
ax = axes[0]
src.plot_SED(ax=axes[0], xr=[0.5,30])
ax.set_title('Science SED -- {} ({})'.format(name_sci, spt_sci))
# ax.set_xscale('linear')
# ax.xaxis.set_minor_locator(AutoMinorLocator())
ax = axes[1]
xr = [1,6]
bp = pynrc.read_filter(*args_filter[-1])
sp = sp_sci
w = sp.wave / 1e4
o = S.Observation(sp, bp, binset=bp.wave)
sp.convert('photlam')
f = sp.flux / sp.flux[(w>xr[0]) & (w<xr[1])].max()
ind = (w>=xr[0]) & (w<=xr[1])
ax.plot(w[ind], f[ind], lw=1, label=sp.name)
ax.set_ylabel('Normalized Flux (ph/s/wave)')
sp.convert('flam')
ax.set_xlim(xr)
ax.set_xlabel(r'Wavelength ($\mu m$)')
ax.set_title('{} Spectrum and Bandpasses'.format(sp_sci.name))
# Overplot Filter Bandpass
ax2 = ax.twinx()
for i, af in enumerate(args_filter):
bp = pynrc.read_filter(*af)
ax2.plot(bp.wave/1e4, bp.throughput, color=cols[i+1], label=bp.name+' Bandpass')
ax2.set_ylim([0,ax2.get_ylim()[1]])
ax2.set_xlim(xr)
ax2.set_ylabel('Bandpass Throughput')
ax.legend(loc='upper left')
ax2.legend(loc='upper right')
fig.tight_layout()
if save_figs:
fig.savefig(outdir+'{}_SED.pdf'.format(name_sci.replace(' ','')))
# -
# ### Generate observations
# +
from astropy.io import fits
hdul = fits.open(outdir + 'model_PDS70.fits')
# Data is in million Jy per Steradian
data = hdul[0].data
data_wave = 1.6 # micons
pa_offset = 295
# Arcsec/pixel
pix_asec = 0.0045
# Steradians to square arcsec
sr_to_asec2 = (3600*180/np.pi)**2
# Data in Jy/arcsec^2
data *= (1e9 * pix_asec**2 / sr_to_asec2) # mJy / pixel
# Mask inner disk region
rho = nrc_utils.dist_image(data, pixscale=pix_asec)
data[rho<=0.075] = 0
hdul[0].data = nrc_utils.rotate_offset(data, pa_offset, reshape=False)
# args_disk = (hdul, pix_asec, dist_sci, data_wave, 'mJy/pixel')
# #hdul_out = pynrc.obs_nircam.model_to_hdulist(args_disk, sp_sci, bp)
# extent = np.array([-1,1,-1,1]) * hdul[0].data.shape[0] * pix_asec / 2
# plt.imshow(hdul[0].data, extent=extent)
# plt.xlim([-1,1])
# plt.ylim([-1,1])
units = 'mJy/pixel'
args_disk = (hdul, pix_asec, dist_sci, data_wave, units)
#hdul_out = pynrc.obs_nircam.model_to_hdulist(args_disk, sp_sci, bp)
extent = np.array([-1,1,-1,1]) * hdul[0].data.shape[0] * pix_asec / 2
vmax = hdul[0].data.max()
vmin = vmax/1e5
norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)
fig, ax = plt.subplots(1,1)
im = ax.imshow(hdul[0].data, extent=extent)#, norm=norm)
fig.colorbar(im, ax=ax, label=units)#, extend='max')
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_xlabel('Arcsec')
ax.set_ylabel('Arcsec')
fig.tight_layout()
# +
subsize = 160
# Create a dictionary that holds the obs_hci class for each filter
wfe_drift = 0
obs_dict = obs_wfe(wfe_drift, args_filter, sp_sci, dist_sci, sp_ref=sp_ref, args_disk=args_disk,
wind_mode='WINDOW', subsize=subsize, verbose=False)
# -
# if there's a disk input, then we want to remove disk
# contributions from stellar flux and recompute to make
# sure total flux counts matches what we computed for
# sp_sci in previous section to match real photometry
if args_disk is not None:
for key in filt_keys:
obs = obs_dict[key]
star_flux = obs.star_flux(sp=sp_sci) # Pass original input spectrum
disk_flux = obs.disk_hdulist[0].data.sum()
obs.sp_sci = sp_sci * (1 - disk_flux / star_flux)
obs.sp_sci.name = sp_sci.name
print(disk_flux, star_flux, obs.star_flux())
obs.sp_ref = obs.sp_sci
# +
# Update detector readout
for i, key in enumerate(filt_keys):
obs = obs_dict[key]
if 'none' in key:
pattern, ng, nint_sci, nint_ref = ('RAPID',10,480,480)
elif ('MASK210R' in key) or ('MASKSWB' in key):
pattern, ng, nint_sci, nint_ref = ('BRIGHT2',10,20,20)
else:
pattern, ng, nint_sci, nint_ref = ('MEDIUM8',10,15,15)
obs.update_detectors(read_mode=pattern, ngroup=ng, nint=nint_sci)
obs.nrc_ref.update_detectors(read_mode=pattern, ngroup=ng, nint=nint_ref)
# print(key)
# print(obs.multiaccum_times)
# _ = obs.sensitivity(nsig=5, units='vegamag', verbose=True)
# print('')
# -
# ### Saturation
# +
sat_dict = {}
for k in filt_keys:
print('\n{}'.format(k))
obs = obs_dict[k]
dsat_asec = do_sat_levels(obs, satval=0.9, plot=False)
sat_dict[k] = dsat_asec
# -
# ### Roll Subtraction Curves
# +
# Determine contrast curves for various WFE drift values
wfe_list = [0, 1, 2, 5]
nsig = 5
roll = 10
# fk_contrast = ['F444W_none_none', 'F410M_none_none']
curves_dict = do_contrast(obs_dict, wfe_list, filt_keys,
nsig=nsig, roll_angle=roll, opt_diff=False, no_ref=True)
# +
lin_vals = np.linspace(0.2,0.8,len(wfe_list))
c1 = plt.cm.Reds_r(lin_vals)
c2 = plt.cm.Blues_r(lin_vals)
key1, key2 = ('F410M_none_none', 'F200W_none_none') #filt_keys[-2:][::-1]
lab1 = '{}'.format(obs_dict[key1].filter)
lab2 = '' if key2 is None else '{}'.format(obs_dict[key2].filter)
fig, axes_all = do_plot_contrasts2(key1, key2, curves_dict, nsig, obs_dict, wfe_list, age_sci,
sat_dict=sat_dict, label1=lab1, label2=lab2, c1=c1, c2=c2,
xr=[0,5], yr=[25,10], yscale2='log', yr2=[3e-2, 100])
fig.subplots_adjust(top=0.8, bottom=0.1 , left=0.05, right=0.95)
fname = "{}_contrast.pdf".format(name_sci.replace(" ", ""))
if save_figs:
fig.savefig(outdir+fname)
# -
# ### Images
# +
# Planet b:
# K = 8.7 + 8.0 = 16.7
# L = 8.2 + 6.9 = 15.1
# Planet c:
# K = 8.7 + 8.8 = 17.5
# L = 8.2 + 6.6 = 14.8
# Add known planets
dL_arr = np.array([6.9, 6.6]) # L-Band mag contrast
Lbp = pynrc.read_filter('F360M') # Approx L-Band
rth_arr = [(0.18,150), (0.25,280)] # sep (asec), PA
for key in filt_keys:
obs = obs_dict[key]
obs.kill_planets()
Kobs = S.Observation(obs.sp_sci, bp_k, binset=bp_k.wave)
Lobs = S.Observation(obs.sp_sci, Lbp, binset=Lbp.wave)
Lmag_arr = Lobs.effstim('vegamag') + dL_arr
# print(Kobs.effstim('vegamag'), Lobs.effstim('vegamag'))
# print(Lobs.effstim('vegamag'), Lmag_arr)
mass_arr = [10, 10]
mdot_arr = [1e-8, 5e-8]
av_arr = [1, 10]
for i, Lmag in enumerate(Lmag_arr):
obs.add_planet(rtheta=rth_arr[i], runits='asec', age=5, mass=mass_arr[i], entropy=9,
accr=True, mdot=mdot_arr[i], Av=av_arr[i],
renorm_args=(Lmag,'vegamag',Lbp))
pl_mags = []
for pl in obs.planets:
sp = obs.planet_spec(**pl)
renorm_args = pl['renorm_args']
sp_norm = sp.renorm(*renorm_args)
sp_norm.name = sp.name
sp = sp_norm
o = S.Observation(sp, obs.bandpass, binset=obs.bandpass.wave)
pl_mags.append(o.effstim('vegamag'))
print('Planet Mags:', key, pl_mags)
# -
# Ideal
wfe_ref = 0
wfe_roll = 0
hdul_dict_ideal = do_gen_hdus(obs_dict, filt_keys, wfe_ref, wfe_roll, no_ref=False, opt_diff=False,
oversample=4, PA1=0, PA2=0, exclude_noise=True)
# Roll Subtracted
wfe_ref = 0
wfe_roll = 2
hdul_dict = do_gen_hdus(obs_dict, filt_keys, wfe_ref, wfe_roll, no_ref=True, opt_diff=False,
oversample=4, PA1=-5, PA2=5)
# +
fk_images = [
'F187N_none_none',
'F200W_none_none',
'F405N_none_none',
'F410M_none_none',
]
fk_images = filt_keys
# +
from copy import deepcopy
fig, axes_arr = plt.subplots(2,4, figsize=(14,6.3))
xylim = 1.0
xr = yr = np.array([-1,1])*xylim
axes = axes_arr[0]
for i, k in enumerate(fk_images):
ax = axes[i]
hdul = hdul_dict_ideal[k]
vmax = np.nanmax(hdul[0].data)
plot_hdulist(hdul, ax=ax, xr=xr, yr=yr, vmax=vmax, interpolation='bicubic', cb_label='')
ax.set_title('Ideal ({})'.format(obs_dict[k].filter))
axes = axes_arr[1]
for i, k in enumerate(fk_images):
ax = axes[i]
hdul = deepcopy(hdul_dict[k])
# Saturation radius
sat_rad = sat_dict[k]
# Mask out inner region
mask_rad = np.max([sat_rad, 0.15])
rho = nrc_utils.dist_image(hdul[0].data, pixscale=hdul[0].header['PIXELSCL'])
vmax = np.nanmax(hdul[0].data[(rho>mask_rad) & (rho<xylim)])
hdul[0].data[rho<=mask_rad] = 0
plot_hdulist(hdul, ax=ax, xr=xr, yr=yr, vmin=-vmax/2, vmax=vmax, cb_label='')
ax.set_title('Roll Sub (' + '$\Delta$' + 'WFE = {} nm)'.format(wfe_roll))
# Location of planet
for pl in obs.planets:
loc = (np.array(pl['xyoff_pix'])) * obs.pix_scale
for ax in axes_arr.flatten():
circle = matplotlib.patches.Circle(loc, radius=xylim/10., lw=1, edgecolor='red', facecolor='none')
ax.add_artist(circle);
for axes in axes_arr:
for i, ax in enumerate(axes):
if i>0: ax.set_ylabel('')
for ax in axes_arr[0]:
ax.set_xlabel('')
# Title
dist = obs.distance
age_str = 'Age = {:.0f} Myr'.format(age_sci)
dist_str = 'Distance = {:.1f} pc'.format(dist)
title_str = '{} ({}, {})'.format(name_sci,age_str,dist_str)
fig.suptitle(title_str, fontsize=16);
fig.tight_layout()
fig.subplots_adjust(top=0.92)
fname = "{}_images.pdf".format(name_sci.replace(" ", ""))
if save_figs:
fig.savefig(outdir+fname)
# -
| notebooks/GTO/NRC_GTO_YSO_PDS70.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## loading Data
from PIL import Image
path2content= "./content.jpg"
path2style= "./style.jpg"
content_img = Image.open(path2content)
style_img = Image.open(path2style)
# +
# content_img
# +
# style_img
# +
import torchvision.transforms as transforms
h, w = 256, 384
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transformer = transforms.Compose([
transforms.Resize((h,w)),
transforms.ToTensor(),
transforms.Normalize(mean_rgb, std_rgb)])
# -
content_tensor = transformer(content_img)
print(content_tensor.shape, content_tensor.requires_grad)
style_tensor = transformer(style_img)
print(style_tensor.shape, style_tensor.requires_grad)
input_tensor = content_tensor.clone().requires_grad_(True)
print(input_tensor.shape, input_tensor.requires_grad)
# +
import torch
def imgtensor2pil(img_tensor):
img_tensor_c = img_tensor.clone().detach()
img_tensor_c*=torch.tensor(std_rgb).view(3, 1,1)
img_tensor_c+=torch.tensor(mean_rgb).view(3, 1,1)
img_tensor_c = img_tensor_c.clamp(0,1)
img_pil=to_pil_image(img_tensor_c)
return img_pil
# +
import matplotlib.pylab as plt
# %matplotlib inline
from torchvision.transforms.functional import to_pil_image
plt.imshow(imgtensor2pil(content_tensor))
plt.title("content image");
# -
plt.imshow(imgtensor2pil(style_tensor))
plt.title("style image");
# +
import torchvision.models as models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_vgg = models.vgg19(pretrained=True).features.to(device).eval()
for param in model_vgg.parameters():
param.requires_grad_(False)
print(model_vgg)
# -
def get_features(x, model, layers):
features = {}
for name, layer in enumerate(model.children()):
x = layer(x)
if str(name) in layers:
features[layers[str(name)]] = x
return features
def gram_matrix(x):
n, c, h, w = x.size()
x = x.view(n*c, h * w)
gram = torch.mm(x, x.t())
return gram
# +
import torch.nn.functional as F
def get_content_loss(pred_features, target_features, layer):
target= target_features[layer]
pred = pred_features [layer]
loss = F.mse_loss(pred, target)
return loss
# -
def get_style_loss(pred_features, target_features, style_layers_dict):
loss = 0
for layer in style_layers_dict:
pred_fea = pred_features[layer]
pred_gram = gram_matrix(pred_fea)
n, c, h, w = pred_fea.shape
target_gram = gram_matrix (target_features[layer])
layer_loss = style_layers_dict[layer] * F.mse_loss(pred_gram, target_gram)
loss += layer_loss/ (n* c * h * w)
return loss
# +
feature_layers = {'0': 'conv1_1',
'5': 'conv2_1',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2',
'28': 'conv5_1'}
con_tensor = content_tensor.unsqueeze(0).to(device)
sty_tensor = style_tensor.unsqueeze(0).to(device)
content_features = get_features(con_tensor, model_vgg, feature_layers)
style_features = get_features(sty_tensor, model_vgg, feature_layers)
# -
for key in content_features.keys():
print(content_features[key].shape)
# +
from torch import optim
input_tensor = con_tensor.clone().requires_grad_(True)
optimizer = optim.Adam([input_tensor], lr=0.01)
# +
num_epochs = 300
content_weight = 1e1
style_weight = 1e4
content_layer = "conv5_1"
style_layers_dict = { 'conv1_1': 0.75,
'conv2_1': 0.5,
'conv3_1': 0.25,
'conv4_1': 0.25,
'conv5_1': 0.25}
for epoch in range(num_epochs+1):
optimizer.zero_grad()
input_features = get_features(input_tensor, model_vgg, feature_layers)
content_loss = get_content_loss (input_features, content_features, content_layer)
style_loss = get_style_loss(input_features, style_features, style_layers_dict)
neural_loss = content_weight * content_loss + style_weight * style_loss
neural_loss.backward(retain_graph=True)
optimizer.step()
if epoch % 100 == 0:
print('epoch {}, content loss: {:.2}, style loss {:.2}'.format(
epoch,content_loss, style_loss))
# -
plt.imshow(imgtensor2pil(input_tensor[0].cpu()));
| chapters/Chapter08/Chapter8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jakerenv
# language: python
# name: jakerenv
# ---
# # Initial Data Exploration
#
# Here I'll do some standard unpacking and exploring of the provided data.
#
# [Competition description](https://www.drivendata.org/competitions/63/genetic-engineering-attribution/page/165/)
# Standard imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from itertools import permutations
import sys
import os
sys.path.append('../')
from src.functions import *
# The data is given as `train_labels.csv` and `train_values.csv` and is located in the data folder
#
# The site also provides `test_values.csv` from which competitors can generate their submissions.
X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')
y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')
# Find a full overview of the labels and values in the [data readme](../data/README.md)
X.head()
# First I'm going to just do the same steps that Khuyen Tran did for the official [benchmark](https://www.drivendata.co/blog/genetic-attribution-benchmark) for this competition.
bases = set(''.join(X.sequence.values))
seq_length = 4
subsequences = [''.join(perm) for perm in permutations(bases, seq_length)]
len(subsequences), bases
# I'll use a function from the benchmark to get counts of every one of these (non-overlapping) subsequences in each gene sequence
subs = get_ngram_features(X, subsequences)
X = subs.join(X.drop('sequence', axis=1))
lab_ids = pd.DataFrame(y.idxmax(axis=1), columns=['lab_id'])
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.mixture import BayesianGaussianMixture
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
estimators = [BernoulliNB(), GaussianNB(), MultinomialNB(),
BayesianGaussianMixture()]
for estimator in estimators:
name = estimator.__class__.__name__
estimator.fit(X, lab_ids.values.ravel())
print(f'{name}: {top10_accuracy_scorer(estimator, X, lab_ids.values.ravel())}')
# BernoulliNB: 0.6856
#
# GaussianNB: 0.4582
#
# MultinomialNB: 0.7007
| exploratory/01_jp_firststeps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ## Getting the data
#
# In this exercise, we work with a data set from [World Values Survey](http://www.worldvaluessurvey.org/WVSDocumentationWV6.jsp) who have extensive documentation on questions in the survey.
#
# We focus on question V105: "I'd like to ask you how much you trust people people from this group completely, somewhat, not very from various groups. Could you tell me for each whether you trust much or not at all? People you meet for the first time"
library(foreign)
dataset = read.dta("WV6_Data_Turkey_2012_Stata_v20180912.dta")
head( dataset )
# +
## here we dicotomize the variable to help in some analysis
## mark missing values NA
dataset$V105[ as.integer( dataset$V105 ) > 4 ] <- NA
dataset$V105[ as.integer( dataset$V105 ) <= 2 ] <- NA ## this helps us to work on binary data.
dataset$V105 <- droplevels( dataset$V105 )
dataset <- dataset[ complete.cases( dataset ), ]
summary( dataset$V105 )
# -
# ## Splitting training and test data
#
# To control the quality of your data analysis, we split the classified data into two groups. The first one, **training data** is used to develop and train the model. The second spllit is **testing data** which we use to explore how well we trained the mode. We shall never use testing data when we train the model so that we can evaluate the _accuracy_ of any model by showing it unseen data.
create_train_test <- function(data, size = 0.8, train = TRUE) {
n_row = nrow(data)
total_row = size * n_row
train_sample <- 1:total_row
if (train) {
return (data[train_sample, ])
} else {
return (data[-train_sample, ])
}
}
train <- create_train_test( dataset, train = TRUE )
test <- create_train_test( dataset, train = FALSE )
# ## Decision trees
#
# [Decision trees](https://en.wikipedia.org/wiki/Decision_tree_learning) help in data classification by exploring how to best predicts belonging to some category, step by step. It creates a nice tree-like visualization.
# They work best binary variables equally same size.
library(rpart)
library(rpart.plot)
model_rpart <- rpart( V105 ~ V10 + V20 + V30 + V40 + V50 + V60 + V70 + V80 + V90 + V100 + V242, data = train, method = "class")
rpart.plot( model_rpart )
# ## Support vector machines
#
# Support vector machines similarly are used to create a mechanism to classify content based on variables. Note how you can explore the importance of individual variables using `varImp`.
#
# You can also use advanced techniques to improve the model prediction by **cross-validating** even when doing data analysis -- not only in the end when comparing results from train and test data. This means that the model is created several times with different splits (folds) of the dataset and they are used together to create the best model.
library(caret)
## heere we create crossvalidation
tc <- trainControl(
method = "repeatedcv",
returnResamp = "all",
number = 2,
repeats = 2
)
model_svm <- train( V105 ~ V10 + V20 + V30 + V40 + V50 + V60 + V70 + V80 + V90 + V100 + V242, data=train,
method="svmLinear", trControl = tc)
varImp( model_svm, scale=TRUE )
varImp( model_svm, scale=FALSE )
# ## Random forest
model_rf <- train( V105 ~ V10 + V20 + V30 + V40 + V50 + V60 + V70 + V80 + V90 + V100 + V242, data=train, method="rf")
plot( model_rf )
# ## Evaluating results
#
# Now let's examine how well the models work with unseen test data.
p <- predict( model_rf, test )
confusionMatrix( p, test$V105 )
# ## Continous variable
#
# Above we worked with dataset that vas nominal, or classified. Let's move to work on dataset that is continous.
hist( train$V242, xlab = "Age", main = "Age of responders" )
model_lasso <- train( V242 ~ V10 + V20 + V30 , data=train, method="lmStepAIC")
summary( model_lasso )
test_lasso <- predict( model_lasso, test )
cor( test_lasso, test$V242 )
plot( test_lasso, test$V242 )
| 2019/istanbul/material/Quantitative analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import torch
x_data=torch.Tensor([[1.],[2.],[3.]])
y_data=torch.Tensor([[2.],[4.],[6.]])
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel,self).__init__()
self.linear=torch.nn.Linear(1,1)
def forward(self,x):
y_pred=self.linear(x)
return y_pred
model=LinearModel()
criterion=torch.nn.MSELoss(size_average=False)
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)
for epoch in range(2500):
y_pred=model(x_data)
loss=criterion(y_pred,y_data)
# print(epoch,loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.linear.weight.item()
torch.save(model.state_dict(),'./m2.pth')
print(2)
| p/getd/Linear_reg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hierarchical clustering with 2D toy datasets
# # [教學目標]
# - 將階層式聚類套用在 2D 樣板資料上, 來觀察幾種不同參數的結果有何不同
# - 因為非監督模型的效果, 較難以簡單的範例看出來
# 所以後續非監督偶數日提供的範例與作業, 主要目的在於觀察非監督模型的效果,
# 同學只要能感受到模型效果即可, 不用執著於搞懂程式的每一個部分
# # [範例重點]
# - 以幾種 2D 樣板資料, 觀察階層式聚類在 "linkage參數" 有所不同時, 分群的效果如何變化 (In[4], Out[4])
# +
# 載入套件
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
# +
# 設定 2D 樣板資料
n_samples = 1500
random_state = 100
# 生成 同心圓 資料點
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05)
# 生成 稀疏三群 資料點 (使用轉換矩陣)
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# 生成 斜向三群 資料點
varied = datasets.make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state)
# -
# 設定群集與繪圖參數
plt.figure(figsize=(10, 8))
plot_num = 1
default_base = {'n_neighbors': 10, 'n_clusters': 3}
datasets = [(noisy_circles, {'n_clusters': 2}), (varied, {'n_neighbors': 2}), (aniso, {'n_neighbors': 2})]
# 執行各種樣板資料的繪圖迴圈
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# 複製參數與標準化
params = default_base.copy()
params.update(algo_params)
X, y = dataset
X = StandardScaler().fit_transform(X)
# 設定三種不同參數的 Hierarchical clustering
ward = cluster.AgglomerativeClustering(n_clusters=params['n_clusters'], linkage="ward")
complete = cluster.AgglomerativeClustering(n_clusters=params['n_clusters'], linkage="complete")
average = cluster.AgglomerativeClustering(n_clusters=params['n_clusters'], linkage="average")
clustering_algorithms = (
('Average Linkage', average),
('Complete Linkage', complete),
('Ward Linkage', ward))
# 繪製三種圖形
for name, algorithm in clustering_algorithms:
# t0 / t1 : 紀錄時間差
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
# # 觀察結果
# * 同心圓 : 三者效果中 Complete 的效果似乎是最差
# * 三群 : Average 與 Ward 效果都還不錯
# * 斜三群 : 三者效果也是以 Complete 較差
# * 同學可以試著替換不同的 random_state 觀察, 會有不同的結果
| homeworks/D058/Day_058_hierarchical_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import json
from scipy import stats
import pandas as pd
t_test_folder = '../output/intrusion/'
eve_model = "eve"
methods = [eve_model,"word2vec_sg","word2vec_cbow","fasttext_cbow","fasttext_sg","glove"]
dataset_ids = ["animal_classes", "european_cities", "movie_genres", "cuisine", "music_genres", "nobel_laureates",
"country_continent"]
# +
def standard_t_test(a, b):
t, pvalue = stats.ttest_ind(a, b)
return t, pvalue
def pair_t_test(a, b):
t, pvalue = stats.ttest_rel(a, b)
return t, pvalue
def load_items(filepath):
print("Loading %s ..." % filepath)
return json.load(open(filepath))
# -
results = dict()
for dataset_id in dataset_ids:
# Load the data
print()
results[dataset_id] =load_items(t_test_folder + "results-for-tests-%s.json" % dataset_id )
distribution = dict()
distribution['all'] = dict()
for method in methods:
distribution['all'][method] = list()
for dataset_id in dataset_ids:
print('Processing', dataset_id)
distribution[dataset_id] = dict()
for method in methods:
distribution[dataset_id][method] = list(zip(*results[dataset_id][method]))[1]
distribution['all'][method] += distribution[dataset_id][method]
# +
result_cols = ["Dataset","Method","Standard t-stats","Standard p-value","Pairwise t-stats", "Pairwise p-value"]
result_rows = list()
for dataset_id in dataset_ids:
for i in range(len(methods)):
for j in range(i+1, len(methods)):
dist_a = distribution[dataset_id][methods[i]]
dist_b = distribution[dataset_id][methods[j]]
s_t, s_pvalue = standard_t_test(dist_a, dist_b)
p_t, p_pvalue = pair_t_test(dist_a, dist_b)
if methods[i] == eve_model or methods[j] == eve_model:
result_rows.append([dataset_id, methods[i] + ', ' + methods[j], s_t, s_pvalue, p_t, p_pvalue])
for i in range(len(methods)):
for j in range(i+1, len(methods)):
dist_a = distribution['all'][methods[i]]
dist_b = distribution['all'][methods[j]]
s_t, s_pvalue = standard_t_test(dist_a, dist_b)
p_t, p_pvalue = pair_t_test(dist_a, dist_b)
if methods[i] == eve_model or methods[j] == eve_model:
result_rows.append(['all', methods[i] + ', ' + methods[j], s_t, s_pvalue, p_t, p_pvalue])
print('preparing dataframe')
df_results = pd.DataFrame(result_rows, columns=result_cols)
df_results
# -
df_results.to_csv("intrusion_significance.csv")
| ipython/intrusion_p-value.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="BuQnCySLKLpU" colab_type="code" colab={}
# + id="MIKsolwIVSu4" colab_type="code" outputId="03322a3f-1826-4812-c737-a3d1cb1c537a" executionInfo={"status": "ok", "timestamp": 1583700525267, "user_tz": -60, "elapsed": 21106, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 698}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# !pip install hyperopt
# + id="F9WYbldYVaRy" colab_type="code" outputId="1f0481f9-1acb-4e7b-ed76-4812d2bc49d9" executionInfo={"status": "ok", "timestamp": 1583700528849, "user_tz": -60, "elapsed": 21413, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 177}
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
from hyperopt import hp, fmin, tpe, STATUS_OK
import eli5
from eli5.sklearn import PermutationImportance
# + id="zPSgjp2TaEEF" colab_type="code" outputId="8b6064d0-8352-4eb6-d8b6-ed04b6dd9a0b" executionInfo={"status": "ok", "timestamp": 1583700528854, "user_tz": -60, "elapsed": 18041, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car"
# + id="n_Iq8o8jaK-w" colab_type="code" outputId="ccf9fb11-0058-447d-e380-2237017e1d9c" executionInfo={"status": "ok", "timestamp": 1583700532433, "user_tz": -60, "elapsed": 20865, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
df = pd.read_hdf('data/car.h5')
df.shape
# + id="lpdnhJi3aP6N" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + id="h7P4AO_vad2J" colab_type="code" colab={}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0]))
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')))
# + id="zYPsnZd1asxa" colab_type="code" colab={}
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="l95vc-kZa9Q3" colab_type="code" outputId="5e683ed7-4b75-406c-c5a8-0d9e6feeee26" executionInfo={"status": "ok", "timestamp": 1583700548022, "user_tz": -60, "elapsed": 31992, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 91}
feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat','feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'random_state': 0.1,
'seed': 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + [markdown] id="A6gd0YBjAFXn" colab_type="text"
# ## Hyperopt
# + id="yLb8vUF9bMEh" colab_type="code" outputId="0ea1e5a6-1e79-4f9e-d799-a7a9e9558c98" executionInfo={"status": "ok", "timestamp": 1583703936934, "user_tz": -60, "elapsed": 541702, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17502613523486444756"}} colab={"base_uri": "https://localhost:8080/", "height": 477}
def obj_func(params):
print("Training with params: ")
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)
return {'loss': np.abs(mean_mae), 'status': STATUS_OK}
# space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
}
## run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals = 10)
best
# + id="pI7XCDmWEPZ7" colab_type="code" colab={}
| day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py35
# language: python
# name: py35
# ---
# # <center> Summer Load </center>
import os
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
LABELS = ['so2_kg', 'nox_kg', 'pm25_kg', 'co2_kg', 'so2_dam_ap2', 'nox_dam_ap2',
'pm25_dam_ap2', 'so2_dam_eas', 'nox_dam_eas', 'pm25_dam_eas',
'co2_dam', 'dam_ap2', 'dam_eas']
LABELS.sort()
dam_cols_ap2 = ['co2_dam', 'so2_dam_ap2', 'nox_dam_ap2', 'pm25_dam_ap2']
dam_cols_eas = ['co2_dam', 'so2_dam_eas', 'nox_dam_eas', 'pm25_dam_eas']
GROUPING_NAMES = ['SeasonalTOD', 'MonthTOD', 'TOD', 'YearOnly', 'Month', 'Hour']
GROUPING_COLS = [['year', 'season', 'hour'], ['year', 'month', 'hour'],
['year', 'hour'], ['year'], ['year', 'month'], ['DATE_UTC']]
GROUPING_NAMES_COLS = dict(zip(GROUPING_NAMES, GROUPING_COLS))
# ## Read in data
def get_factor_df(kind='MEF', time='MonthTOD', region='PJM', fuel_type='FossilOnly'):
kind_folder = 'mefs' if kind=='MEF' else 'aefs'
# Read in file
if fuel_type == 'FossilOnly':
region_breakdown = 'isorto' if region == 'PJM' else 'nerc'
df = pd.read_csv(os.path.join(os.pardir, os.pardir, 'factor_estimates', 'calculated_factors',
kind_folder, time,
'{}_{}.csv'.format(region_breakdown, kind_folder)),
index_col=GROUPING_NAMES_COLS[time])
df = df[df[region_breakdown] == region].drop(region_breakdown, axis=1)
else:
if region != 'PJM':
raise NotImplementedError('fossil-plus factors are only available for PJM')
df = pd.read_csv(os.path.join(os.pardir, os.pardir, 'factor_estimates', 'calculated_factors',
kind_folder, time,
'pjm_fplus_{}.csv'.format(kind_folder)),
index_col=GROUPING_NAMES_COLS[time])
# Filter MEF columns
if kind == 'MEF':
df = df.drop([x for x in df.columns if '-r' in x or '-int' in x], axis=1)
df.columns = [x.replace('-est', '') for x in df.columns]
# Ensure columns have numeric type
df = df.apply(pd.to_numeric, axis=1)
return df
all_dfs = {}
for kind in ['MEF', 'AEF']:
for region in ['PJM', 'RFC']:
for fuel_type in ['FossilOnly', 'FossilPlus']:
for time in ['YearOnly', 'Month', 'MonthTOD', 'Hour']:
if region == 'RFC' and fuel_type == 'FossilPlus':
continue
if kind == 'MEF' and time == 'Hour':
continue
print(kind, region, fuel_type, time)
df = get_factor_df(kind=kind, time=time, region=region, fuel_type=fuel_type)
all_dfs[(kind, region, fuel_type, time)] = df
# ## Get summer load
import matplotlib.pyplot as plt
# %matplotlib inline
metered_loads = pd.DataFrame(pd.read_csv(
os.path.join(os.pardir, os.pardir, 'data', 'metered_loads', 'formatted_data', 'hourly_loads.csv'),
index_col=0, parse_dates=[0]))['RTO-HrMeteredLoad']
metered_loads = pd.DataFrame(metered_loads.loc['2017-01-01':'2017-12-31'])
fontsize=13
fig, ax = plt.subplots(figsize=(8.5, 2))
(metered_loads/1e3).plot(legend=False, ax=ax, fontsize=fontsize);
ax.set_ylabel('Hourly metered\nload (GWh)', fontsize=fontsize);
ax.set_xlabel('');
# if not os.path.exists(os.path.join('figures', 'summer_load')):
# os.makedirs(os.path.join('figures', 'summer_load'))
# fig.savefig(os.path.join('figures', 'summer_load', 'load_profile.pdf'), bbox_inches='tight')
print('Peak load: {}'.format((metered_loads/1e3).max()[0]))
print('Peak load summer: {}'.format(((metered_loads/1e3).loc['2017-06-01':'2017-08-31']).max()[0]))
print('Peak load non summer: {}'.format(
pd.concat([(metered_loads/1e3).loc[:'2017-05-31'], (metered_loads/1e3).loc['2017-09-01':]]).max()[0]))
# Measure load only in summer
hours_summer = pd.date_range('2017-06-01', '2017-09-01', freq='H', closed='left')
intervention_df = pd.DataFrame(metered_loads.loc[hours_summer])
# ## Get intervention effects
def get_hour_factors(df, time, hours, prev_year=False):
year_series = hours.map(lambda x: x.year-1) if prev_year else hours.map(lambda x: x.year)
month_series = hours.map(lambda x: x.month)
hour_series = hours.map(lambda x: x.hour)
every_hour_series = hours.map(lambda x: x.replace(year=x.year-1)) if prev_year else hours
if time == 'YearOnly':
df2 = df.loc[year_series]
elif time == 'Month':
df2 = df.loc[list(zip(year_series, month_series))]
elif time == 'MonthTOD':
df2 = df.loc[list(zip(year_series, month_series, hour_series))]
elif time == 'Hour':
# df2 = df.loc[year_series]
df2 = df.loc[every_hour_series.map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))]
df2 = df2.set_index(hours)
return df2
# Get factor for each point in time series
hours_summer_factors = {}
for key in all_dfs.keys():
# print(key)
df = all_dfs[key]
# 2017
df2 = get_hour_factors(df, key[-1], hours_summer)
hours_summer_factors[key + (2017,)] = df2
# 2016 for PJM fossil-only (but not for hourly)
if key[1] == 'PJM' and key[2] == 'FossilPlus' and key[-1] != 'Hour':
df3 = get_hour_factors(df, key[-1], hours_summer, True)
hours_summer_factors[key + (2016,)] = df3
# Calculate effects
intervention_effects = {}
for key in hours_summer_factors.keys():
reds = hours_summer_factors[key].multiply(intervention_df['RTO-HrMeteredLoad'], axis='index')
effects = reds[LABELS].sum() # total effect
# For MEFs, propagate error.
# When same factor is applied to multiple reductions
# (i.e. multiplied by their total reduction amount), this factor's SE should be summed across
# these reductions (i.e. also multiplied by the total reduction amount).
# Independent errors (from different factors) should then be combined by the sqrt of their
# sum of squares.
if key[0] == 'MEF':
# Per-factor (non-independent) errors
groupby_list = dict([('YearOnly', reds.index.year),
('Month', [reds.index.year, reds.index.month]),
('MonthTOD', [reds.index.year, reds.index.month, reds.index.hour]),
('Hour', [reds.index])])
per_factor_errors = reds[['{}-se'.format(x) for x in LABELS]].groupby(
groupby_list[key[-2]]).sum()
# Combine per-factor errors to get independent errors
ses = np.sqrt((per_factor_errors ** 2).sum())
# ses = np.sqrt((reds[['{}-se'.format(x) for x in LABELS]] ** 2).sum())
effects = pd.concat([effects, ses])
intervention_effects[key] = effects
intervention_effects_df = pd.DataFrame(intervention_effects).T
intervention_effects_df.index.names = ['kind', 'region', 'fuel_type', 'time', 'year']
# ## Some analysis
def get_summary(intervention_effects_df, pollutant):
poll_effect = intervention_effects_df[pollutant]
print('Min: {}, {}'.format(poll_effect.min(), poll_effect.idxmin()))
print('Max: {}, {}'.format(poll_effect.max(), poll_effect.idxmax()))
print(poll_effect.T)
return poll_effect.T
df = get_summary(intervention_effects_df, 'dam_eas')
baseline = df.loc[('AEF', 'PJM', 'FossilPlus', 'Hour', 2017)]
baseline
(100 * df/baseline).round(0).astype(int)
df = get_summary(intervention_effects_df, 'dam_ap2')
baseline = df.loc[('AEF', 'PJM', 'FossilPlus', 'Hour', 2017)]
baseline
(100 * df/baseline).round(0).astype(int)
# ## Bar plots for SI
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# +
def get_dam_name(dam_abbr):
return 'AP2' if dam_abbr == 'ap2' else 'EASIUR'
FULL_DAMS = ['dam_ap2', 'dam_eas']
def format_title(label):
l = label.split('_')
if label in FULL_DAMS:
t = 'Total damages ({})'.format('AP2' if l[1] == 'ap2' else 'EASIUR')
else:
t = '{0}$_{{{1}}}$ {2}'.format(l[0][:2].upper(), l[0][2:], 'emissions' if l[1] == 'kg' else 'damages')
if len(l) > 2: t += ' ({})'.format('AP2' if l[2] == 'ap2' else 'EASIUR')
return t
def format_axis(label):
l = label.split('_')
if label in FULL_DAMS:
t = 'Total damages ($)'
elif len(l) > 2 or l[1] == 'dam':
t = 'Damages ($)'
else:
t = 'Emissions (kg)'
return t
# -
# Get formatted df with intervention effects for given label
def get_onelabel_formatted(label):
kind_map = dict([('MEF', 'Marginal'), ('AEF', 'Average')])
time_map = dict([('YearOnly', 'Annual'), ('MonthTOD', 'Monthly TOD'), ('Month', 'Monthly'), ('Hour', 'Hourly')])
df = intervention_effects_df[label].reset_index()
df['spat'] = df.apply(
lambda x: '{} ({}-{}{})'.format(
x['region'], x['fuel_type'][:-4].lower(), x['fuel_type'][-4:].lower(),
' 2016' if x['year'] == 2016 else ''), axis=1)
df['spat'] = df['spat'].str.replace('fossil-plus', 'fossil+non-emit')
df = df.drop(['region', 'fuel_type', 'year'], axis=1)
df['kind'] = df['kind'].map(lambda x: kind_map[x]).astype(
pd.CategoricalDtype(categories=['Marginal', 'Average'], ordered=True))
df['time'] = df['time'].map(lambda x: time_map[x]).astype(
pd.CategoricalDtype(categories=['Annual', 'Monthly', 'Monthly TOD', 'Hourly'], ordered=True))
df['spat'] = df['spat'].astype(pd.CategoricalDtype(
categories=['PJM (fossil-only)', 'PJM (fossil+non-emit 2016)', 'PJM (fossil+non-emit)', 'RFC (fossil-only)'],
ordered=True))
df = df.sort_values(['spat', 'kind', 'time'])
return df
from IPython.core.debugger import set_trace
def get_stacked_plot(label):
df = get_onelabel_formatted(label)
df_se = get_onelabel_formatted('{}-se'.format(label))
# Get bar plot
sns.set(style="whitegrid")
g = sns.catplot(x='kind', y=label, hue='time', col='spat', data=df,
kind='bar', palette=[sns.color_palette('muted')[x] for x in [0,2,3,4]], legend=False, ci=None,
height=3, aspect=1).set_titles('{col_name}')
g.despine(left=True);
# Adjust font size and add legend
fontsize=18
for i, ax in enumerate(g.axes.flatten()):
ax.set_xlabel('')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.title.set_fontsize(fontsize-2)
if i == 0:
lgd = ax.legend(loc='center left', bbox_to_anchor=(0.75, -0.3), ncol=4, frameon=True, fontsize=fontsize)
ax.set_ylabel(format_axis(label))
# Annotate baseline -- PJM fossil-plus average monthly TOD
if i == 2:
baseline_x = 1.3
patch_width = [p.get_width() for p in ax.patches][0]
baseline_y = max([p.get_height() \
for p in ax.patches if abs(p.get_xy()[0]+patch_width/2-baseline_x)<=patch_width/4])
ax.text(s='*', x=baseline_x, y=1.05 * baseline_y,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize*2, fontweight='bold')
# Hacky errorbars
for i, ax in enumerate(g.axes[0]):
spat = df['spat'].dtype.categories[i]
df_slice = df.query('spat == @spat').query('kind == "Marginal"')
df_se_slice = df_se.query('spat == @spat').query('kind == "Marginal"')
ax.errorbar([-0.3,-0.1, 0.1], df_slice[label].values,
yerr=df_se_slice['{}-se'.format(label)].values, ms=20, color='black',
linewidth=0, elinewidth=2, capsize=2, capthick=2)
# Plot title
fig = plt.gcf()
# sup=fig.text(0.5, 1, format_title(label),
# fontsize=fontsize, fontweight='bold', fontstyle='italic',
# transform=fig.transFigure, ha='center')
plt.tight_layout()
dirname = os.path.join('plots', 'summer_load')
if not os.path.exists(dirname): os.makedirs(dirname)
g.fig.savefig(os.path.join(dirname, 'si-{}.pdf'.format(label)),
bbox_extra_artists=(lgd,), #(lgd,sup),
bbox_inches='tight')
for label in LABELS:
get_stacked_plot(label)
# ## Stacked bar plots for total damages
import matplotlib.patches as mpatches
def plot_total_damages(dam_type, title=False):
fontsize=18
plt.rcParams['hatch.linewidth'] = 0.5
sns.set(style="whitegrid", color_codes=True)
dam_cols = dam_cols_eas if dam_type == 'EASIUR' else dam_cols_ap2
se_col = 'dam_{}-se'.format('eas' if dam_type == 'EASIUR' else 'ap2')
df = get_onelabel_formatted(dam_cols)
df = (df.set_index(['spat', 'kind', 'time'])/1e9).reset_index() # billions
df_cum = df.set_index(['spat', 'kind', 'time']).cumsum(axis=1).reset_index()
# Stacked bar plot
g = sns.FacetGrid(data=df_cum, col='spat', size=3, aspect=1)
hatches = ['||', '///', '', '\\\\\\']
g.map(sns.barplot, 'kind', dam_cols[-1], 'time',
hue_order=['Annual', 'Monthly', 'Monthly TOD', 'Hourly'], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in [0,2,3,4]], edgecolor='black', hatch=hatches[0])
g.map(sns.barplot, 'kind', dam_cols[-2], 'time',
hue_order=['Annual', 'Monthly', 'Monthly TOD', 'Hourly'], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in [0,2,3,4]], edgecolor='black', hatch=hatches[1])
g.map(sns.barplot, 'kind', dam_cols[-3], 'time',
hue_order=['Annual', 'Monthly', 'Monthly TOD', 'Hourly'], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in [0,2,3,4]], edgecolor='black', hatch=hatches[2])
g.map(sns.barplot, 'kind', dam_cols[-4], 'time',
hue_order=['Annual', 'Monthly', 'Monthly TOD', 'Hourly'], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in [0,2,3,4]], edgecolor='black', hatch=hatches[3]).set_titles('{col_name}')
g.despine(left='true')
# Legend, fontsize, and other formatting
xoffset=0.035
for i, ax in enumerate(g.axes.flatten()):
ax.set_xlabel('') # No x-label
if i == 0:
# y label on left plot
ax.set_ylabel('Total damages\n(\$ billions)'.format(dam_type))
# pollutants legend
leg_dict = dict(zip(dam_cols, ['CO$_2$', 'SO$_2$', 'NO$_x$', 'PM$_{{2.5}}$']))
dam_patches = []
for dam, hatch in zip(dam_cols, hatches[::-1]):
patch = mpatches.Patch(facecolor='white', label=leg_dict[dam], edgecolor='black', hatch=hatch)
dam_patches.append(patch)
lgd = ax.legend(handles=dam_patches, loc='center left',
bbox_to_anchor=(0.3+xoffset, -0.15), ncol=4, frameon=True, fontsize=fontsize,
bbox_transform=plt.gcf().transFigure)
plt.text(0.16+xoffset, -0.175, 'Pollutants:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
lgd.get_frame().set_edgecolor('white')
if i == 1:
# temporal scope legend (same length as pollutants legend for alignment)
blank_patch = mpatches.Patch(color='white', label='')
ann_patch = mpatches.Patch(color=sns.color_palette('muted')[0], label='Annual', edgecolor='black')
month_patch = mpatches.Patch(color=sns.color_palette('muted')[2], label='Monthly', edgecolor='black')
tod_patch = mpatches.Patch(color=sns.color_palette('muted')[3], label='Monthly TOD', edgecolor='black')
hr_patch = mpatches.Patch(color=sns.color_palette('muted')[4], label='Hourly', edgecolor='black')
time_patches = [ann_patch, month_patch, tod_patch, hr_patch]
lgd2 = ax.legend(handles=time_patches, loc='center left',
bbox_to_anchor=(0.27+xoffset, -0.025), ncol=4, frameon=True, fontsize=fontsize-1,
bbox_transform=plt.gcf().transFigure)
plt.text(0.09+xoffset, -0.045, 'Temporal scopes:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
lgd2.get_frame().set_edgecolor('white')
# Annotate baseline -- PJM fossil-plus average monthly TOD
if i == 2:
baseline_x = 1.3
patch_width = [p.get_width() for p in ax.patches][0]
baseline_y = max([p.get_height() \
for p in ax.patches if abs(p.get_xy()[0]+patch_width/2-baseline_x)<=patch_width/4])
ax.text(s='*', x=baseline_x, y=1.05 * baseline_y,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize*2, fontweight='bold')
# Set font size
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.title.set_fontsize(fontsize-2)
# Hacky errorbars
# Note/TODO: in billions
df_tot = get_onelabel_formatted(se_col[:-3])
df_tot_se = get_onelabel_formatted(se_col)
for i, ax in enumerate(g.axes[0]):
spat = df_tot['spat'].dtype.categories[i]
df_tot_slice = df_tot.query('spat == @spat').query('kind == "Marginal"')
df_tot_se_slice = df_tot_se.query('spat == @spat').query('kind == "Marginal"')
ax.errorbar([-0.3, -0.1, 0.1], df_tot_slice[se_col[:-3]].values/1e9,
yerr=df_tot_se_slice[se_col].values/1e9, ms=20, color='black',
linewidth=0, elinewidth=2, capsize=2, capthick=2)
# Line around legend
fig = plt.gcf()
leg_line = \
mpatches.Rectangle(
(0.073+xoffset, -0.2), 0.875, 0.24, facecolor='none', edgecolor='lightgray',
transform=fig.transFigure, figure=fig)
fig.patches.extend([leg_line])
extra_artists = (lgd, lgd2)
if title:
sup=fig.text(0.5, 0.9, 'Total damages ({})\n'.format(dam_type),
fontsize=fontsize, fontweight='bold', fontstyle='italic',
transform=fig.transFigure, ha='center')
extra_artists = extra_artists + (sup,)
plt.tight_layout()
dirname = os.path.join('plots', 'summer_load')
if not os.path.exists(dirname): os.makedirs(dirname)
g.fig.savefig(os.path.join(dirname,
'{}-stacked-with-error{}.pdf'.format(dam_type, '-titled' if title else '')),
bbox_extra_artists=extra_artists, bbox_inches='tight')
plot_total_damages('EASIUR', title=False)
plot_total_damages('AP2', title=False)
| interventions/notebooks/SummerLoad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://mail.google.com/mail/u/0/#inbox/QgrcJHrnscJGjsRzFLPzmGMZSlGWKMsntRB
# -
# %load_ext autoreload
# %autoreload 2
from childes_mi.utils.paths import DATA_DIR, ensure_dir, DROSOPHILA_DIR
import urllib.request
from tqdm.autonotebook import tqdm
import h5py
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import pandas as pd
mat_file = list(DROSOPHILA_DIR.glob('*.mat'))[0]
mat_file
ds = scipy.io.loadmat(mat_file.as_posix())
ds.keys()
ds['transition_states'][0]
states = [np.concatenate(np.concatenate(i)) for i in ds['transition_states']]
states[0]
len(states[0])
state_lens = [len(i) for i in states]
plt.hist(state_lens)
bouts_indv = [["{}_{}".format(bi, element) for element in bout] for bi, bout in enumerate(tqdm(states))]
# ### Mutual information
from childes_mi.information_theory import mutual_information as mi
distances = np.arange(1,1001)
(MI, MI_var), (shuff_MI, shuff_MI_var) = mi.sequential_mutual_information(
[np.random.permutation(i) for i in bouts_indv], distances=distances, n_jobs=-1
)
MI_DF = pd.DataFrame(
[[MI, MI_var, shuff_MI, shuff_MI_var, distances]],
columns=["MI", "MI_var", "shuff_MI", "shuff_MI_var", "distances"],
)
MI_DF.to_pickle(DATA_DIR / "mi" / "drosophila_mi_1000_shuffled.pickle")
row = MI_DF.iloc[0]
# +
fig, ax = plt.subplots(figsize=(10,5))
MI = row.MI-row.shuff_MI
MI_var = row.MI_var
ax.scatter(distances, MI)
ax.fill_between(distances, MI-MI_var, MI+MI_var, alpha = 0.25, color= 'k')
ax.set_ylim([1e-2, 3])
ax.set_yscale('log')
ax.set_xscale('log')
# -
plt.loglog(row.MI)
plt.loglog(row.shuff_MI)
from matplotlib import gridspec
from childes_mi.utils.paths import DATA_DIR, FIGURE_DIR
from childes_mi.utils.general import flatten,save_fig
# +
yoff=-.20
ncols = 4
zoom = 5
hr = [1, 0.5, 0.5, 0.5]
nrows = np.ceil(len(MI_DF)/ncols).astype(int)
fig = plt.figure(figsize=(len(MI_DF)*zoom*1.0,np.sum(hr)*zoom))
gs = gridspec.GridSpec(ncols=len(MI_DF), nrows=4, height_ratios=hr)
axi = 0
row = MI_DF.iloc[0]
color = 'k'#LCOL_DICT[row.language]
ax0 = plt.subplot(gs[0,axi])
ax = ax0
sig = np.array(row.MI-row.shuff_MI)
distances = row.distances
sig = sig
# get signal limits
sig_lims = np.log([np.min(sig[sig>0]), np.nanmax(sig)])
sig_lims = [sig_lims[0] - (sig_lims[1]-sig_lims[0])/10,
sig_lims[1] + (sig_lims[1]-sig_lims[0])/10]
if axi==0:
ax.set_ylabel('MI (bits)', labelpad=5, fontsize=24)
ax.yaxis.set_label_coords(yoff,0.5)
# plot real data
ax.scatter(distances, sig, alpha = 1, s=60, color=color)
ax.set_xlabel('Distance (states)', labelpad=5, fontsize=24)
#print(row.language, distances[peak_of_interest])
for ax in [ax0]:
ax.set_xlim([distances[0], distances[-1]])
sig_lims[0] = np.log(10e-6)
ax.set_ylim([1e-6, 0.5])
ax.tick_params(which='both', direction='in', labelsize=14, pad=10)
ax.tick_params(which='major', length=10, width =3)
ax.tick_params(which='minor', length=5, width =2)
ax.set_xscale( "log" , basex=10)
ax.set_yscale( "log" , basey=10)
ax.set_xticks([])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(3)
ax.spines[axis].set_color('k')
ax.set_xticks([1,10,100, 1000])
ax.set_xticklabels(['1','10','100', '1000'])
ax.set_xlim([0.9, 1000])
if axi !=0:
for ax in [ax0,ax1,ax2]:
ax.yaxis.set_ticklabels([])
gs.update(wspace=0.075, hspace=0.1)
ax0.set_title("Drosophila", fontsize=24)
save_fig(FIGURE_DIR/'drosophila_mi-1000-shuffled')
# -
| notebooks/drosophila2/1.0-load-drosophila-compute-mi-shuffled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="nobzk-U4Dmfi" colab_type="code" colab={}
## <NAME> (<EMAIL>)
## June, 2020
## Using Kaggle's 1.6 million tweets dataset and various online tutorials (including snippets from StackOverflow)
# + id="lx53EX2S6mvC" colab_type="code" colab={}
# %tensorflow_version 2.x # This line is necessary because I'm using a notebook
# + id="u9iarPA-7bv7" colab_type="code" colab={}
# Necessary imports:
import pandas as pd
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.preprocessing.text import Tokenizer
import gensim
import re
import time
import numpy as np
import os
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM
from keras import utils
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from collections import Counter
import itertools
# + id="r8DhzJi27dXG" colab_type="code" colab={}
# Defining constants
W2V_SIZE = 300
SEQUENCE_LENGTH = 300
# + id="MF-OEdprOzCx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="51837ff0-8cfd-4a71-c6b8-d6cc1fd7324f"
# Reading in data
# Using google colab's google drive mounting option:
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/input.csv', encoding ="ISO-8859-1" , names=["target", "ids", "date", "flag", "user", "text"])
df.head()
# + id="ZbtaWP4AMC9T" colab_type="code" colab={}
# Data preprocessing
# + id="PlBB1fjW7kfp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 516} outputId="efcac984-c6fe-442a-aa27-a3d13bc53565"
int_to_sentiment_map = {0: "NEGATIVE", 2: "NEUTRAL", 4: "POSITIVE"}
def map_sentiments(label):
return int_to_sentiment_map[int(float(label))]
df.target = df.target.apply(lambda x: map_sentiments(x))
target_cnt = Counter(df.target)
plt.figure(figsize=(8,8))
plt.bar(target_cnt.keys(), target_cnt.values(), width = 0.8, color = 'pink')
plt.title("Dataset labels distribuition")
# + id="CD-4Mawk7naZ" colab_type="code" colab={}
nltk.download('stopwords')
stop_words = stopwords.words("english")
stemmer = SnowballStemmer("english")
def preprocess(text, stem=False):
text = re.sub("@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+", ' ', str(text).lower()).strip()
tokens = []
for token in text.split():
if token not in stop_words:
if stem:
tokens.append(stemmer.stem(token))
else:
tokens.append(token)
return ' '.join(tokens)
# + id="QcnQ7SY3VMrP" colab_type="code" colab={}
# Building models
# + id="gZ3bA9ES7oiI" colab_type="code" colab={}
df.text = df.text.apply(lambda x: preprocess(x))
df_train, df_test = train_test_split(df, test_size=0.2, random_state=42)
documents = [_text.split() for _text in df_train.text]
w2v_model = gensim.models.word2vec.Word2Vec(size=W2V_SIZE, window=7, min_count=10, workers=8)
w2v_model.build_vocab(documents)
words = w2v_model.wv.vocab.keys()
vocab_size = len(words)
# + id="9PRX7HGF7s3M" colab_type="code" colab={}
w2v_model.train(documents, total_examples=len(documents), epochs=32)
# + id="NTBcqVuv7uGn" colab_type="code" colab={}
tokenizer = Tokenizer()
tokenizer.fit_on_texts(df_train.text)
vocab_size = len(tokenizer.word_index) + 1
x_train = pad_sequences(tokenizer.texts_to_sequences(df_train.text), maxlen=SEQUENCE_LENGTH)
x_test = pad_sequences(tokenizer.texts_to_sequences(df_test.text), maxlen=SEQUENCE_LENGTH)
# + id="Go0Mm6KS7vYp" colab_type="code" colab={}
labels = df_train.target.unique().tolist()
labels.append('NEUTRAL')
labels
encoder = LabelEncoder()
encoder.fit(df_train.target.tolist())
y_train = encoder.transform(df_train.target.tolist())
y_test = encoder.transform(df_test.target.tolist())
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
# + id="CTDEhXEh7x2X" colab_type="code" colab={}
embedding_matrix = np.zeros((vocab_size, W2V_SIZE))
for word, i in tokenizer.word_index.items():
if word in w2v_model.wv:
embedding_matrix[i] = w2v_model.wv[word]
embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False)
# + id="qJqD8rfn7ziJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="f29b8240-1505-4e41-972d-891d3745f8e2"
model = Sequential()
model.add(embedding_layer)
model.add(Dropout(0.5))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# + id="a1WKCWQP72Jt" colab_type="code" colab={}
model.compile(loss='binary_crossentropy',
optimizer="adam", # Can replace with any other optimizer
metrics=['accuracy'])
# + id="t4H_kubW74cN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="405ec883-8e9a-4012-9f38-826b42122554"
history = model.fit(x_train, y_train,
batch_size=1024,
epochs = 8, # Took too long with higher epochs
validation_split = 0.1,
verbose = 1,
callbacks=[ReduceLROnPlateau(monitor='val_loss', patience=5, cooldown=0),
EarlyStopping(monitor='val_acc', min_delta=1e-4, patience=5)])
# + id="5N0BCO9UVTV9" colab_type="code" colab={}
# Presenting results
# + id="F-moPFvH77qL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="61096d96-7ca9-430a-d471-697ef83857f7"
score = model.evaluate(x_test, y_test, batch_size=1024)
print()
print("ACCURACY:",score[1])
print("LOSS:",score[0])
# + id="9F6IlV8178Nn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 545} outputId="2412f211-4924-4dc6-83d5-243680d74545"
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# + id="3-yz5JIe8AKA" colab_type="code" colab={}
def label_sentiment(score, include_neutral=True):
if include_neutral:
label = 'NEUTRAL'
if score <= 0.4:
label = 'NEGATIVE'
elif score >= 0.7:
label = 'POSITIVE'
return label
else:
return 'NEGATIVE' if score < 0.5 else 'POSITIVE'
# + id="8Tex4GON8BTu" colab_type="code" colab={}
def predict(text, include_neutral=True):
start_at = time.time()
x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=SEQUENCE_LENGTH)
score = model.predict([x_test])[0]
label = label_sentiment(score, include_neutral=include_neutral)
return {"label": label, "score": float(score), "elapsed_time": time.time()-start_at}
# + id="NutxaGMB8Crv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="2e59d473-09f6-4155-fd44-08ae3824928f"
predict("I hate the weather")
# + id="o61Lr-mT8DHC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e6a20ca1-b269-4c8f-8048-5d66389967c1"
y_prediction = []
y_test = list(df_test.target)
scores = model.predict(x_test, verbose=1, batch_size=8000)
y_prediction = [label_sentiment(score, include_neutral=False) for score in scores]
# + id="ThgtPOdx8Gyp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="32cfe3af-7fd7-4b0e-859e-d75e939470aa"
print(classification_report(y_test, y_prediction))
# + id="VegU1tOe8H6x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="976c346f-90fa-40f9-99ed-3b5dabe71bed"
accuracy_score(y_test, y_prediction)
| lstm_sentiment_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Sakshi1007/30-DaysOfCode-March-2021/blob/main/3_1_EDA%26Univariate.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="q_569ugy6lyY"
# > This notebook was created for code illustration of the `ML.now()` course
#
#
# **`Univariate Linear Regression`**
#
# [Link to Download the dataset](https://www.kaggle.com/hesh97/titanicdataset-traincsv)
#
# **Date Created**: June 8, 2021
#
# **Author**:
# <NAME>
#
# [[Course Repository](https://https://github.com/Sakshi1007/MLnow_2.0)]
#
#
# + [markdown] id="KmW9S6zjaauQ"
# ## Setup
# + colab={"base_uri": "https://localhost:8080/"} id="oX_QibhWsnnX" outputId="8e34d963-0bee-4a85-c136-3a378461446f"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="wYuNn17Isvml" outputId="2c7f77a5-aeee-4150-8c4e-399fd9d4d88b"
# cd '/content/drive/MyDrive/ColabNotebooks/MLnow()'
# + colab={"base_uri": "https://localhost:8080/"} id="oR69jwALs-uP" outputId="ab8f6cfa-983c-42b0-b9a2-fec1e5b8dc11"
# ls
# + id="0rYwvbeMtPJg"
import os
# + id="cnGQRbmes_tZ"
import pandas as pd #working with csv or excel files
import numpy as np #working with numbers/ arrays/ tensors
import tensorflow as tf #framework
from tensorflow import keras #API / library
import os #using os commands between the python language
# + id="VV-Ps6dJtX6B"
mathData = pd.DataFrame(pd.read_csv('train.csv'))#load the csv file as dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="2_KId_bptr05" outputId="db3586ef-8077-46f9-9b41-03f72454708d"
mathData.head(10) #print the first ten rows of the dataframe
# + colab={"base_uri": "https://localhost:8080/"} id="Uzuy7kS1u6nI" outputId="4f481a7e-ff8c-4066-8f92-8d7407e2a2fc"
mathData.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Jj7DReukvCp3" outputId="ef9f1664-0bf3-4db7-a89c-67812cc32d5c"
mathData.isnull().sum()
# + [markdown] id="bHW7RIq9vPUj"
# **Drop the column which are not necessary during prediction as well as it have ore than 35% Null values**
# + colab={"base_uri": "https://localhost:8080/"} id="2Y2qJkiuvJ6C" outputId="eeaa0eb6-51cc-4259-d77c-ab083111188c"
drop_column=mathData.isnull().sum()[mathData.isnull().sum()>(35/100*mathData.shape[0])]
drop_column
# + [markdown] id="JA-r0RKfv6jy"
# **Cabin will doesn't matter to know that whether person will survive or not, So we will drop this column**
# + colab={"base_uri": "https://localhost:8080/"} id="cOpT55IcwIjT" outputId="89173771-5690-429a-efed-e2dad7e89f35"
drop_column.index
# + colab={"base_uri": "https://localhost:8080/"} id="yWyYNsY0wVji" outputId="ea327044-b9b3-4b6c-8a5f-69c333021dc5"
mathData.drop(drop_column.index,axis=1,inplace=True)
mathData.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="Uz9rjL7dBSGG" outputId="03e15e13-bdd5-431c-d89e-ce7edb7eb2ce"
mathData.fillna(mathData.mean(),inplace=True)
mathData.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="jKC9JUqVBSDF" outputId="38cd6eb6-7aed-4a5e-ca9e-d1dffc3f4018"
mathData['Embarked'].describe()
# + id="Ih7Ad-msCATU"
mathData['Embarked'].fillna('S',inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="WIt8jj3LCP3s" outputId="8d3ae7d0-afbe-44c2-c7e7-907291e29f71"
mathData.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="tZAwtLATEI2M" outputId="93fe9ea2-6e9c-4567-cef8-55e8a20f4bb8"
#unique classes/categories in every column
# variable.column.unique() --> all {column} values from the {variable} dataframe and print out the unique ones.
print(f'Survived: {mathData.Survived.unique()}')
print(f'SEX: {mathData.Sex.unique()}')
print(f'Pclass: {mathData.Pclass.unique()}')
print(f'SibSP: {mathData.SibSp.unique()}')
print(f'Parch: {mathData.Parch.unique()}')
print(f'Embarked: {mathData.Embarked.unique()}')
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="mhJDmq_PCi2u" outputId="a43dcab8-306b-441e-ebcc-bbc4e9133974"
mathData.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="fg3auyuUC308" outputId="678da961-a82f-4355-c037-0acf5eb893b7"
mathData['FamilySize']=mathData['SibSp']+mathData['Parch']
mathData.drop(['SibSp','Parch'],axis=1,inplace=True)
mathData.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="7i9zpPSdUWHM" outputId="a367836b-488a-4a10-fb6a-245e2945c048"
mathData['FamilySize'] = mathData['FamilySize'].apply({1:1, 0:0, 4:1, 2:1, 6:1, 5:1, 3:1, 7:1, 10:1}.get)
mathData.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="IFxN-58tTRwi" outputId="6b3d60f5-66f2-4c10-8dd1-0833c4417caa"
mathData['Alone']=[0 if mathData['FamilySize'][i]<=0 else 1 for i in mathData.index]
mathData.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="MzJRzr1VDZhh" outputId="dc783b69-a2f1-4e54-ebc0-194f37af1a60"
mathData.groupby(['Alone'])['Survived'].mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="F3OwOChXDc9G" outputId="47ab5628-8e86-4cb9-c010-cbcd82eac73b"
mathData[['Alone','Fare']].corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="4eTIkqTzJy3Q" outputId="f948de33-3864-4623-8e2c-349eaed0c53a"
mathData['Sex'] = mathData['Sex'].apply({'male':1, 'female':0}.get)
mathData['Embarked'] = mathData['Embarked'].apply({'S':1, 'C':2, 'Q':3}.get)
mathData.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="1-FXv97xDtge" outputId="473d2c8a-c8e1-4122-a501-832543cfa403"
mathData.groupby(['Sex'])['Survived'].mean()
# + colab={"base_uri": "https://localhost:8080/"} id="STeUsHahD0in" outputId="9b2c5d91-e5d2-494c-d06e-796c58aa2932"
mathData.groupby(['Embarked'])['Survived'].mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="bVG1B4HmD5zJ" outputId="4cf27e67-c235-408b-8101-9cb0ff00be3f"
mathData.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="g_8U4QXtG2nt" outputId="4f6b0bea-c53e-41c8-c545-ff2be798f0cd"
print(f'Survived: {mathData.Survived.unique()}')
print(f'SEX: {mathData.Sex.unique()}')
print(f'Pclass: {mathData.Pclass.unique()}')
print(f'Alone: {mathData.Alone.unique()}')
print(f'FamilySize: {mathData.FamilySize.unique()}')
print(f'Embarked: {mathData.Embarked.unique()}')
# + [markdown] id="gAIZTqWPwINV"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="mW997avtsetr" outputId="406e199c-9fe3-43cf-b542-b0749ab37130"
#columns to be considered
mathData=mathData[['PassengerId','Survived','Pclass','Name','Sex','Fare','FamilySize','Alone']]
mathData.head(10)
# + id="fqYvemxUXEiq"
mathData.to_csv('ClaenMathData.csv') #exporting the file
# + colab={"base_uri": "https://localhost:8080/"} id="_Rv8EuE2XVTU" outputId="2efb15b7-9264-4a87-9eff-3aa6eeb086f0"
# ls
# + [markdown] id="g9elFaqSXYH3"
# **VISUALIZATION**
# + colab={"base_uri": "https://localhost:8080/", "height": 153} id="eNFZZvHXXVQY" outputId="e4980880-c88e-470d-9d42-768cb2075c5e"
import seaborn as sns
sns.palplot(sns.color_palette('PuOr')) #Purple to Orange colors
pal = sns.color_palette('PuOr', 6) #print 6 color shades from Purple to Orange
pal.as_hex() #set hex code values for colors
import matplotlib.pyplot as plt
plt.style.use(['seaborn'])
sns_colors = ['#c6690c', '#664697'] #orange Purple hex codes
sns.set_palette(sns_colors) #set the palette as sns_colors
sns.palplot(sns.color_palette(sns_colors)) #plot the color codes
# + colab={"base_uri": "https://localhost:8080/"} id="kW8slP-9Xxh9" outputId="8763ae10-e6b5-46d1-d33d-abbcc0de2167"
print(f'''There are {len(mathData.columns)} columns in the dataframe mathData namely,
{mathData.columns}''') #columns in the dataframe
# + [markdown] id="r2ZxB4rBYCpf"
# A pairplot is a plot where the `x` axis and `y` axis will have all the columns you specify it with
# + colab={"base_uri": "https://localhost:8080/", "height": 567} id="eCKtPd1GaRfo" outputId="96e15d53-9172-4731-df1a-9a814af8d6c3"
sns.pairplot(mathData,
x_vars = [ 'Sex','Alone','Fare', 'Pclass' ],
y_vars = [ 'Survived','Sex','Alone'],
diag_kind='kde', hue='Survived'
)
# + colab={"base_uri": "https://localhost:8080/", "height": 988} id="oSUcHtQNXxup" outputId="11178451-849c-4d69-f927-6573b8070e10"
sns.pairplot(mathData,
x_vars = [ 'Survived','Sex','Alone','Fare', 'Pclass' ],
y_vars = [ 'Survived','Sex','Alone','Fare', 'Pclass'],
diag_kind='kde', hue='Survived'
)
# + colab={"base_uri": "https://localhost:8080/", "height": 378} id="5L_UwtnbYDV9" outputId="ba3a362e-ae55-4fd3-8f12-338946f7719d"
sns.stripplot(y='Fare', x='Survived', data=mathData)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Ek88ETGiXx0o" outputId="a0a7ef7a-ce5b-40c3-89b8-18c16c0d050a"
fig, axarr = plt.subplots(2,2, figsize=(20,20))
#three rows and 2 columns all starting from (0,0)
sns.stripplot(y='Fare', x='Survived', data=mathData, hue=None, ax=axarr[0][0])
sns.stripplot(y='Sex', x='Survived', data=mathData, hue=None, ax=axarr[1][0])
sns.stripplot(y='Alone', x='Survived', data=mathData, hue=None, ax=axarr[0][1])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="x_EGw1qxDOpS" outputId="742a58f1-2a4c-4960-fc21-0f7f0e6ec761"
uniMathData = mathData[['Fare', 'Survived']]
uniMathData.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="gcfCSInuDViP" outputId="71c5ef11-df78-4f9e-b78b-08689098d298"
sns.pairplot(uniMathData,
x_vars = ['Fare', 'Survived'],
y_vars = ['Fare', 'Survived'],
diag_kind='kde'
)
# + id="mFSxVaeJDVdM"
uniMathData.to_csv('univariate_MathData_2.csv')
# + colab={"base_uri": "https://localhost:8080/"} id="WixTa--pEBNo" outputId="65416a0c-9026-46a4-8679-22c7b37dd448"
# ls
# + [markdown] id="Bi2I84b9Xx4H"
# **DATA SPLIT**
#
# + colab={"base_uri": "https://localhost:8080/"} id="an0q1DK2XVKi" outputId="adb8609e-6ebe-4c66-a20d-bf9a457b3ebc"
trainDataset = uniMathData.sample(frac=0.8, random_state=0)
testDataset = uniMathData.drop(trainDataset.index)
print(trainDataset.head())
print(testDataset.head())
# + colab={"base_uri": "https://localhost:8080/"} id="BZ-Kj5tHpFJs" outputId="0c8a2424-8c97-4834-aba3-5772e4663387"
print(trainDataset.shape)
print(testDataset.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="tvAD3_q7pFUJ" outputId="9be70290-81ec-42cd-8656-646f4e16c6a2"
trainFeatures = trainDataset.copy()
testFeatures = testDataset.copy()
print(trainFeatures.head())
print(testFeatures.head())
# + colab={"base_uri": "https://localhost:8080/"} id="81EyMI6kpFYi" outputId="9ca6666a-c1e0-48d0-f36a-8626cf2d37dc"
trainLabels = trainFeatures.pop('Fare')
testLabels = testFeatures.pop('Fare')
print(trainLabels.head())
print(testLabels.head())
# + colab={"base_uri": "https://localhost:8080/"} id="2NerNgOkpFcY" outputId="add1fd0d-0ecd-4127-8f55-0b45c078c768"
print(trainFeatures.head())
print(testFeatures.head())
# + colab={"base_uri": "https://localhost:8080/"} id="xO8EkHCzpp45" outputId="e836b476-7898-4a5c-c5f0-fd119092430b"
print(testFeatures.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="UGPyjgz5pq8m" outputId="7ab80317-6f9d-4a37-80e2-535dec3e2356"
uniMathData.columns
# + id="ysx02DxXqPes"
model = tf.keras.Sequential([tf.keras.layers.Dense(1)])
# + id="0tEd9s0qqP0n"
model.compile(
loss='mean_absolute_error',
optimizer= tf.keras.optimizers.SGD(learning_rate=0.01,momentum=0.1,nesterov=False,name="SGD"),
metrics=['mae','mse']
)
# + colab={"base_uri": "https://localhost:8080/"} id="3LSqh-O5HANO" outputId="20f324d7-dcf2-44c0-a946-542e722f5cf7"
uniMathData.shape
# + colab={"base_uri": "https://localhost:8080/"} id="p_wWlx-dqQIP" outputId="3b3a2025-b358-464f-a706-ef42ad2acdb2"
numEpochs = 600
history = model.fit(x = trainFeatures, y = trainLabels, validation_data = (testFeatures, testLabels), epochs = numEpochs)
# + colab={"base_uri": "https://localhost:8080/"} id="LHpqjmYHqQrM" outputId="52fcfd57-86b8-4def-9ad7-dc997a0ea2bd"
print(history)
# + colab={"base_uri": "https://localhost:8080/"} id="h68ztHzQvlaD" outputId="746c8330-2127-4769-c500-17c0ceac4bea"
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="7T17dFesvmsN" outputId="8f38b6b5-a18d-4e12-d67e-d7229cd40270"
def curvePlots(tempString):
plt.plot(history.history[tempString])
plt.plot(history.history[f'val_{tempString}'])
plt.xlabel('NumEpochs')
plt.ylabel(tempString)
plt.legend([tempString, f'val_{tempString}'])
plt.show()
curvePlots('mse')
curvePlots('mae')
curvePlots('loss')
# + colab={"base_uri": "https://localhost:8080/"} id="IItRJdO0vnAI" outputId="96c5b53c-2cff-4b9e-ce23-dd9617077c55"
model.predict([1])
# + colab={"base_uri": "https://localhost:8080/"} id="zjFQ0VSNJr1N" outputId="1f8098d6-9b4c-4cf4-ae25-a3b89f63cc77"
print(f'Prediction for input value 1: {model.predict([1])}')
tempListforPreds = [1,2,3,4,5]
print(f'''
input List = {tempListforPreds}
List of Predictions:
{model.predict(tempListforPreds)}
List of Predictions (flattened out):
{model.predict(tempListforPreds).flatten()}
''')
# + colab={"base_uri": "https://localhost:8080/"} id="t6Z1g8RwJ1eI" outputId="979c4bb3-b067-464c-9efd-a29664b3c397"
print(testFeatures)
# + id="QXsJ53pHJ1qI"
testPreds = model.predict(testFeatures).flatten()
# + colab={"base_uri": "https://localhost:8080/"} id="DZVzMGLcJ1z_" outputId="8256a464-95a0-49f3-c684-d0cc26db2c19"
print(len(testPreds))
print(testPreds)
# + colab={"base_uri": "https://localhost:8080/", "height": 361} id="Q7_3XxJjJ1-4" outputId="ad032aef-b99b-42e6-d919-86edfae641c3"
def predPlot(labels, predictions):
plt.scatter(labels, predictions)
plt.ylabel('Predictions')
plt.xlabel('True Value or Labels')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
plt.show()
predPlot(testLabels, testPreds)
# + colab={"base_uri": "https://localhost:8080/", "height": 361} id="NFFbEW0CKJ8W" outputId="b2aa8818-0789-4063-e649-98af726a496d"
def errorPlot(preds, labels, counts):
errors = preds - labels
plt.hist(errors, counts)
plt.xlabel('Error')
plt.ylabel('Counts')
plt.show()
errorPlot(testPreds, testLabels, numEpochs)
| 3_1_EDA&Univariate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Improving Variational Quantum Optimization using CVaR
# ## Introduction
#
# This notebook shows how to use the Conditional Value at Risk (CVaR) objective function introduced in [1] within the variational quantum optimization algorithms provided by Qiskit. Particularly, it is shown how to setup the `MinimumEigenOptimizer` using `VQE` accordingly.
# For a given set of shots with corresponding objective values of the considered optimization problem, the CVaR with confidence level $\alpha \in [0, 1]$ is defined as the average of the $\alpha$ best shots.
# Thus, $\alpha = 1$ corresponds to the standard expected value, while $\alpha=0$ corresponds to the minimum of the given shots, and $\alpha \in (0, 1)$ is a tradeoff between focusing on better shots, but still applying some averaging to smoothen the optimization landscape.
#
# ## References
#
# [1] [<NAME> al., *Improving Variational Quantum Optimization using CVaR,* Quantum 4, 256 (2020).](https://quantum-journal.org/papers/q-2020-04-20-256/)
# +
from qiskit.circuit.library import RealAmplitudes
from qiskit.algorithms.optimizers import COBYLA
from qiskit.algorithms import NumPyMinimumEigensolver, VQE
from qiskit.opflow import PauliExpectation, CVaRExpectation
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.converters import LinearEqualityToPenalty
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit import execute, Aer
from qiskit.utils import algorithm_globals
import numpy as np
import matplotlib.pyplot as plt
from docplex.mp.model import Model
# -
algorithm_globals.random_seed = 123456
# ## Portfolio Optimization
# In the following we define a problem instance for portfolio optimization as introduced in [1].<br>
# prepare problem instance
n = 6 # number of assets
q = 0.5 # risk factor
budget = n // 2 # budget
penalty = 2*n # scaling of penalty term
# +
# instance from [1]
mu = np.array([0.7313, 0.9893, 0.2725, 0.8750, 0.7667, 0.3622])
sigma = np.array([
[ 0.7312, -0.6233, 0.4689, -0.5452, -0.0082, -0.3809],
[-0.6233, 2.4732, -0.7538, 2.4659, -0.0733, 0.8945],
[ 0.4689, -0.7538, 1.1543, -1.4095, 0.0007, -0.4301],
[-0.5452, 2.4659, -1.4095, 3.5067, 0.2012, 1.0922],
[-0.0082, -0.0733, 0.0007, 0.2012, 0.6231, 0.1509],
[-0.3809, 0.8945, -0.4301, 1.0922, 0.1509, 0.8992]
])
# or create random instance
# mu, sigma = portfolio.random_model(n, seed=123) # expected returns and covariance matrix
# +
# create docplex model
mdl = Model('portfolio_optimization')
x = mdl.binary_var_list('x{}'.format(i) for i in range(n))
objective = mdl.sum([mu[i]*x[i] for i in range(n)])
objective -= q * mdl.sum([sigma[i,j]*x[i]*x[j] for i in range(n) for j in range(n)])
mdl.maximize(objective)
mdl.add_constraint(mdl.sum(x[i] for i in range(n)) == budget)
# case to
qp = QuadraticProgram()
qp.from_docplex(mdl)
# -
# solve classically as reference
opt_result = MinimumEigenOptimizer(NumPyMinimumEigensolver()).solve(qp)
opt_result
# we convert the problem to an unconstrained problem for further analysis,
# otherwise this would not be necessary as the MinimumEigenSolver would do this
# translation automatically
linear2penalty = LinearEqualityToPenalty(penalty=penalty)
qp = linear2penalty.convert(qp)
_, offset = qp.to_ising()
# ## Minimum Eigen Optimizer using VQE
# +
# set classical optimizer
maxiter = 100
optimizer = COBYLA(maxiter=maxiter)
# set variational ansatz
ansatz = RealAmplitudes(n, reps=1)
m = ansatz.num_parameters
# set backend
backend_name = 'qasm_simulator' # use this for QASM simulator
# backend_name = 'statevector_simulator' # use this for statevector simlator
backend = Aer.get_backend(backend_name)
# run variational optimization for different values of alpha
alphas = [1.0, 0.50, 0.25] # confidence levels to be evaluated
# +
# dictionaries to store optimization progress and results
objectives = {alpha: [] for alpha in alphas} # set of tested objective functions w.r.t. alpha
results = {} # results of minimum eigensolver w.r.t alpha
# callback to store intermediate results
def callback(i, params, obj, stddev, alpha):
# we translate the objective from the internal Ising representation
# to the original optimization problem
objectives[alpha] += [-(obj + offset)]
# loop over all given alpha values
for alpha in alphas:
# initialize CVaR_alpha objective
cvar_exp = CVaRExpectation(alpha, PauliExpectation())
cvar_exp.compute_variance = lambda x: [0] # to be fixed in PR #1373
# initialize VQE using CVaR
vqe = VQE(expectation=cvar_exp, optimizer=optimizer, ansatz=ansatz, quantum_instance=backend,
callback=lambda i, params, obj, stddev: callback(i, params, obj, stddev, alpha))
# initialize optimization algorithm based on CVaR-VQE
opt_alg = MinimumEigenOptimizer(vqe)
# solve problem
results[alpha] = opt_alg.solve(qp)
# print results
print('alpha = {}:'.format(alpha))
print(results[alpha])
print()
# + tags=["nbsphinx-thumbnail"]
# plot resulting history of objective values
plt.figure(figsize=(10, 5))
plt.plot([0, maxiter], [opt_result.fval, opt_result.fval], 'r--', linewidth=2, label='optimum')
for alpha in alphas:
plt.plot(objectives[alpha], label='alpha = %.2f' % alpha, linewidth=2)
plt.legend(loc='lower right', fontsize=14)
plt.xlim(0, maxiter)
plt.xticks(fontsize=14)
plt.xlabel('iterations', fontsize=14)
plt.yticks(fontsize=14)
plt.ylabel('objective value', fontsize=14)
plt.show()
# +
# evaluate and sort all objective values
objective_values = np.zeros(2**n)
for i in range(2**n):
x_bin = ('{0:0%sb}' % n).format(i)
x = [0 if x_ == '0' else 1 for x_ in reversed(x_bin)]
objective_values[i] = qp.objective.evaluate(x)
ind = np.argsort(objective_values)
# evaluate final optimal probability for each alpha
probabilities = np.zeros(len(objective_values))
for alpha in alphas:
if backend_name == 'qasm_simulator':
counts = results[alpha].min_eigen_solver_result.eigenstate
shots = sum(counts.values())
for key, val in counts.items():
i = int(key, 2)
probabilities[i] = val / shots
else:
probabilities = np.abs(results[alpha].min_eigen_solver_result.eigenstate)**2
print('optimal probabilitiy (alpha = %.2f): %.4f' % (alpha, probabilities[ind][-1:]))
# -
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
| docs/tutorials/08_cvar_optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
races = pd.read_csv("../data/races.csv", header=None)
races_ = pd.DataFrame()
races_["race_id"] = pd.to_numeric(races[0], errors="coerce")
races_["circuit_id"] = pd.to_numeric(races[3], errors="coerce")
races_["date"] = pd.to_datetime(races[5], errors="coerce", format="%Y-%m-%d")
circuits = pd.read_csv("../data/circuits.csv", header=None)
circuits_ = pd.DataFrame()
circuits_["circuit_id"] = pd.to_numeric(circuits[0], errors="coerce")
circuits_["circuit"] = circuits[1]
drivers = pd.read_csv("../data/driver.csv", header=None)
drivers_ = pd.DataFrame()
drivers_["driver_id"] = pd.to_numeric(drivers[0], errors="coerce")
drivers_["driver"] = drivers[1]
drivers_["driver_birth_date"] = pd.to_datetime(drivers[6], errors="coerce", format="%Y-%m-%d")
drivers_["driver_yob"] = pd.to_numeric(drivers_.driver_birth_date.dt.strftime("%Y"), errors="coerce")
constructors = pd.read_csv("../data/constructors.csv", header=None)
constructors_ = pd.DataFrame()
constructors_["constructor_id"] = pd.to_numeric(constructors[0], errors="coerce")
constructors_["constructor"] = constructors[1]
results = pd.read_csv("../data/results.csv", header=None)
# +
df = pd.DataFrame()
df["race_id"] = pd.to_numeric(results[1], errors="coerce")
df = df.merge(races_.loc[:, ["race_id", "circuit_id", "date"]], on="race_id")
df.drop(columns=["race_id"], inplace=True)
df["circuit_id"] = pd.to_numeric(results[3], errors="coerce")
df = df.merge(circuits_.loc[:, ["circuit_id", "circuit"]], on="circuit_id")
df.drop(columns=["circuit_id"], inplace=True)
df["driver_id"] = pd.to_numeric(results[2], errors="coerce")
#df = df.merge(drivers_.loc[:, ["driver_id", "driver", "driver_birth_date", "driver_yob"]], on="driver_id")
df = df.merge(drivers_.loc[:, ["driver_id", "driver", "driver_birth_date"]], on="driver_id")
df.drop(columns=["driver_id"], inplace=True)
df["driver_age"] = pd.to_numeric((df["date"] - df["driver_birth_date"]).dt.days / 365, errors="coerce")
df["constructor_id"] = pd.to_numeric(results[3], errors="coerce")
df = df.merge(constructors_.loc[:, ["constructor_id", "constructor"]], on="constructor_id")
df.drop(columns=["constructor_id"], inplace=True)
#df["race_id_times_constructor_id"] = pd.to_numeric(results[1], errors="coerce")*(pd.to_numeric(results[3], errors="coerce"))
#df["race_id_times_driver_id"] = pd.to_numeric(results[1], errors="coerce")*(pd.to_numeric(results[2], errors="coerce"))
#df["driver_id_times_constructor_id"] = pd.to_numeric(results[2], errors="coerce")*(pd.to_numeric(results[3], errors="coerce"))
#df["race_id_times_driver_id_times_constructor_id"] = pd.to_numeric(results[1], errors="coerce")*pd.to_numeric(results[2], errors="coerce")*(pd.to_numeric(results[3], errors="coerce"))
df["grid"] = pd.to_numeric(results[5], errors="coerce")
df["position"] = pd.to_numeric(results[6], errors="coerce")
df["position"].fillna(np.max(df.position), inplace=True)
df.drop(columns=["date", "driver_birth_date"], inplace=True)
df[df.grid > 20] = 20
df[df.position > 20] = 20
df
# -
df.to_csv("../data/data.csv", index=False)
| notebooks/clean_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# #%%appyter init
from appyter import magic
magic.init(lambda _=globals: _())
# # General RNA-seq Data and Metadata Viewer
# This notebook template provides a flexible and generalized pipeline for the visualization and analysis of RNA-seq profiles from any source.
#
# ### Analysis Overview
#
# The RNA-seq data first undergoes normalization and dimensionality reduction via Principle Component Analysis (PCA) and Uniform Manifold Approximation and Projection (UMAP). Samples are then clustered based on their most-associated highly-variable genes and metadata features. The number of clusters is determined based on a modified silhouette score which prioritizes having more clusters over having larger clusters. Clusters are visualized using the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package.
#
# The most up-regulated and down-regulated genes are also identified for each cluster. These genes are used to perform enrichment analysis via the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The enrichment results are visualized with the [React-GSEA](https://github.com/MaayanLab/react-GSEA/tree/simplified) package.
#
# Finally, similar and opposite drug/small molecule signatures are queried using the [L1000FWD](https://maayanlab.cloud/L1000FWD/) API.
#
# *Note: If using GTEx data or other healthy tissue sample data for which querying drug signatures is not relevant, the [GTEx Tissue-Specific RNA-seq Analysis Appyter](https://appyters.maayanlab.cloud/#/GTEx_Tissue_RNA_Analysis) may be more useful instead. If using data from The Cancer Genome Atlas (TCGA), please use the more specific [TCGA Patient Cohorts Viewer Appyter](https://appyters.maayanlab.cloud/#/Patient_Cohorts_RNASeq_Viewer).*
# ## 0. Notebook Setup
# Import packages and set appropriate file names.
import os
import numpy as np
import pandas as pd
import requests
import time
from matplotlib import pyplot as plt
import fastcluster
import seaborn as sns
from umap import UMAP
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, silhouette_samples, silhouette_score, plot_roc_curve
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.cm as cm
from maayanlab_bioinformatics.dge import characteristic_direction
from maayanlab_bioinformatics.normalization import log2_normalize, filter_by_var
import qnorm
from scipy.stats import zscore
from maayanlab_bioinformatics.utils import merge
import math
from collections import OrderedDict
import json
from react_scatter_board.jupyter_compat import ScatterBoard
from IPython.display import display, IFrame, Markdown, HTML
from textwrap import wrap
from react_gsea import dataFromResult
from react_gsea.jupyter_compat import ReactGSEA
# +
# Notebook display util functions
def download_button(content, label, filename):
# Add download button
outname = filename.split('.')[0]
display(HTML('<textarea id="textbox_{outname}" style="display: none;">{content}</textarea> <button style="margin:10px 0;" id="create_{outname}">{label}</button> <a download="{filename}" id="downloadlink_{outname}" style="display: none">Download</a>'.format(**locals())))
display(HTML('<script type="text/javascript">!function(){{var e=null,t=document.getElementById("create_{outname}"),n=document.getElementById("textbox_{outname}");t.addEventListener("click",function(){{var t,l,c=document.getElementById("downloadlink_{outname}");c.href=(t=n.value,l=new Blob([t],{{type:"text/plain"}}),null!==e&&window.URL.revokeObjectURL(e),e=window.URL.createObjectURL(l)),c.click()}},!1)}}();</script>'.format(**locals())))
def make_clickable(link):
return f'<a target="_blank" href="{link}">{link}</a>'
def figure_header(label,title):
display(HTML(f"<div style='font-size:1rem; padding:1rem 0;'><b>{label}</b>: {title}</div>"))
def figure_legend(label,title,content=""):
display(HTML(f"<div style='font-size:1rem;'><b>{label}</b>: <i>{title}</i>. {content} </div>"))
# +
# %%appyter hide
{% do SectionField(
name = 'DATASETS',
title = 'Dataset Selection',
subtitle = 'Upload datasets for visualization and analysis. Both file uploads are required to run the analysis.',
img = 'rna.png'
) %}
{% do SectionField(
name = 'PARAMETERS',
title = 'Analysis Parameters',
subtitle = 'Set parameters for analysis.',
img = 'analysis.png'
) %}
{% do SectionField(
name = "ENRICHR_LIBS",
title = "Enrichment Analysis Library Selection",
subtitle = "Choose Enrichr geneset libraries for comparison against input genes. Multiple libraries can be selected from each section. If nothing is selected, default libraries will be used.",
img = 'enrichr-logo.png'
) %}
{% set data_filename = FileField(
name='data_filename',
label='RNA-seq data file',
description='TSV or CSV file containing RNA-seq read counts. Index should be Entrez gene symbols, and columns should be individual samples.',
default='',
examples = {
'GSE159266 Data': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
},
section='DATASETS'
) %}
{% set metadata_filename = FileField(
name='metadata_filename',
label='Sample metadata file',
description='TSV or CSV file containing sample metadata. Index should be sample IDs corresponding to columns of RNA-seq data file, and columns should be different sample attributes.',
default='',
examples = {
'GSE159266 Metadata': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
},
section='DATASETS'
) %}
{% set n_neighbors = IntField(
name = 'n_neighbors',
label = 'Number of neighbors to use for UMAP calculations',
description = 'Smaller values preserve local structure, while larger values emphasize global structure.',
default = 40,
min = 2,
max = 200,
section = 'PARAMETERS'
) %}
{% set min_cluster_dist = FloatField(
name = 'min_cluster_dist',
label = 'Minimum distance between UMAP-projected points',
description = 'Determines how close/distant points belonging to different clusters are from each other.',
default = 0.3,
min = 0.1,
max = 1,
section = 'PARAMETERS'
) %}
{% set top_n_genes = IntField(
name = 'top_n_genes',
label = 'Number of genes to analyze',
description = 'Number of top variable genes to use in analysis.',
default = 2500,
section = 'PARAMETERS'
) %}
{% set top_n_genes_enrichment = IntField(
name = 'top_n_genes_enrichment',
label = 'Number of genes to use for enrichment analysis',
description = 'Number of top variable genes to use for enrichment analysis; must be less than top_n_genes.',
default = 250,
section = 'PARAMETERS'
) %}
{% set do_l1000 = BoolField(
name = 'do_l1000',
label = 'Query L1000 signatures?',
description = 'Option to query opposite and similar L1000 signatures to input data using L1000FWD.',
default = True,
section = 'PARAMETERS'
) %}
{% set use_weighted_score = BoolField(
name = 'use_weighted_score',
label = 'Use weighted silhouette score?',
description = 'Option to prioritize more clusters over fewer.',
default = True,
section = 'PARAMETERS'
) %}
{% set transcription_libraries = MultiChoiceField(
name = 'transcription_libraries',
label = 'Transcription Libraries',
description = 'Default library is ENCODE_TF_ChIP-seq_2015',
choices = [
'ARCHS4_TFs_Coexp',
'ChEA_2016',
'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',
'ENCODE_Histone_Modifications_2015',
'ENCODE_TF_ChIP-seq_2015',
'Epigenomics_Roadmap_HM_ChIP-seq',
'Enrichr_Submissions_TF-Gene_Coocurrence',
'Genome_Browser_PWMs',
'lncHUB_lncRNA_Co-Expression',
'miRTarBase_2017',
'TargetScan_microRNA_2017',
'TF-LOF_Expression_from_GEO',
'TF_Perturbations_Followed_by_Expression',
'Transcription_Factor_PPIs',
'TRANSFAC_and_JASPAR_PWMs',
'TRRUST_Transcription_Factors_2019'
],
default = [
'ENCODE_TF_ChIP-seq_2015'
],
section = 'ENRICHR_LIBS'
) %}
{% set pathway_libraries = MultiChoiceField(
name = "pathway_libraries",
label = "Pathway Libraries",
description = 'Default libraries are KEGG_2019_Human and KEGG_2019_Mouse',
choices = [
'ARCHS4_Kinases_Coexp',
'BioCarta_2016',
'BioPlanet_2019',
'BioPlex_2017',
'CORUM',
'Elsevier_Pathway_Collection',
'HMS_LINCS_KinomeScan',
'HumanCyc_2016',
'huMAP',
'KEA_2015',
'KEGG_2019_Human',
'KEGG_2019_Mouse',
'Kinase_Perturbations_from_GEO_down',
'Kinase_Perturbations_from_GEO_up',
'L1000_Kinase_and_GPCR_Perturbations_down',
'L1000_Kinase_and_GPCR_Perturbations_up',
'NCI-Nature_2016',
'NURSA_Human_Endogenous_Complexome',
],
default = [
'KEGG_2019_Human',
'KEGG_2019_Mouse'
],
section = 'ENRICHR_LIBS'
) %}
{% set ontology_libraries = MultiChoiceField(
name = 'ontology_libraries',
label = 'Ontology Libraries',
description = 'Default libraries are GO_Biological_Process_2018 and MGI_Mammalian_Phenotype_Level_4_2019',
choices = [
'GO_Biological_Process_2018',
'GO_Cellular_Component_2018',
'GO_Molecular_Function_2018',
'Human_Phenotype_Ontology',
'Jensen_COMPARTMENTS',
'Jensen_DISEASES',
'Jensen_TISSUES',
'MGI_Mammalian_Phenotype_Level_4_2019'
],
default = [
'GO_Biological_Process_2018',
'MGI_Mammalian_Phenotype_Level_4_2019'],
section = 'ENRICHR_LIBS'
) %}
{% set disease_drug_libraries = MultiChoiceField(
name = 'disease_drug_libraries',
label = 'Disease Drug Libraries',
description = 'Default library is GWAS_Catalog_2019',
choices = [
'Achilles_fitness_decrease',
'Achilles_fitness_increase',
'ARCHS4_IDG_Coexp',
'ClinVar_2019',
'dbGaP',
'DepMap_WG_CRISPR_Screens_Broad_CellLines_2019',
'DepMap_WG_CRISPR_Screens_Sanger_CellLines_2019',
'DisGeNET',
'DrugMatrix',
'DSigDB',
'GeneSigDB',
'GWAS_Catalog_2019',
'LINCS_L1000_Chem_Pert_down',
'LINCS_L1000_Chem_Pert_up',
'LINCS_L1000_Ligand_Perturbations_down',
'LINCS_L1000_Ligand_Perturbations_up',
'MSigDB_Computational',
'MSigDB_Oncogenic_Signatures',
'Old_CMAP_down',
'Old_CMAP_up',
'OMIM_Disease',
'OMIM_Expanded',
'PheWeb_2019',
'Rare_Diseases_AutoRIF_ARCHS4_Predictions',
'Rare_Diseases_AutoRIF_Gene_Lists',
'Rare_Diseases_GeneRIF_ARCHS4_Predictions',
'Rare_Diseases_GeneRIF_Gene_Lists',
'UK_Biobank_GWAS_v1',
'Virus_Perturbations_from_GEO_down',
'Virus_Perturbations_from_GEO_up',
'VirusMINT'
],
default = [
'GWAS_Catalog_2019'
],
section = 'ENRICHR_LIBS'
) %}
{% set cell_type_libraries = MultiChoiceField(
name = 'cell_type_libraries',
label = 'Cell Type Libraries',
description = 'No libraries selected by default',
choices = [
'Allen_Brain_Atlas_down',
'Allen_Brain_Atlas_up',
'ARCHS4_Cell-lines',
'ARCHS4_Tissues',
'Cancer_Cell_Line_Encyclopedia',
'CCLE_Proteomics_2020',
'ESCAPE',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_down',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_up',
'Human_Gene_Atlas',
'Mouse_Gene_Atlas',
'NCI-60_Cancer_Cell_Lines',
'ProteomicsDB_2020',
'Tissue_Protein_Expression_from_Human_Proteome_Map'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set misc_libraries = MultiChoiceField(
name = 'misc_libraries',
label = 'Miscellaneous Libraries',
description = 'No libraries selected by default',
choices = [
'Chromosome_Location_hg19',
'Data_Acquisition_Method_Most_Popular_Genes',
'Enrichr_Libraries_Most_Popular_Genes',
'Genes_Associated_with_NIH_Grants',
'HMDB_Metabolites',
'HomoloGene',
'InterPro_Domains_2019',
'NIH_Funded_PIs_2017_AutoRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_GeneRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_Human_AutoRIF',
'NIH_Funded_PIs_2017_Human_GeneRIF',
'Pfam_Domains_2019',
'Pfam_InterPro_Domains',
'Table_Mining_of_CRISPR_Studies'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set legacy_libraries = MultiChoiceField(
name = 'legacy_libraries',
label = 'Legacy Libraries',
description = 'No libraries selected by default',
choices = [
'BioCarta_2013',
'BioCarta_2015',
'ChEA_2013',
'ChEA_2015',
'Chromosome_Location',
'Disease_Signatures_from_GEO_down_2014',
'Disease_Signatures_from_GEO_up_2014',
'Drug_Perturbations_from_GEO_2014',
'ENCODE_Histone_Modifications_2013',
'ENCODE_TF_ChIP-seq_2014',
'GO_Biological_Process_2013',
'GO_Biological_Process_2015',
'GO_Biological_Process_2017',
'GO_Biological_Process_2017b',
'GO_Cellular_Component_2013',
'GO_Cellular_Component_2015',
'GO_Cellular_Component_2017',
'GO_Cellular_Component_2017b',
'GO_Molecular_Function_2013',
'GO_Molecular_Function_2015',
'GO_Molecular_Function_2017',
'GO_Molecular_Function_2017b',
'HumanCyc_2015',
'KEA_2013',
'KEGG_2013',
'KEGG_2015',
'KEGG_2016',
'MGI_Mammalian_Phenotype_2013',
'MGI_Mammalian_Phenotype_2017',
'MGI_Mammalian_Phenotype_Level_3',
'MGI_Mammalian_Phenotype_Level_4',
'NCI-Nature_2015',
'Panther_2015',
'Reactome_2013',
'Reactome_2015',
'TargetScan_microRNA',
'Tissue_Protein_Expression_from_ProteomicsDB',
'WikiPathways_2013',
'WikiPathways_2015',
'WikiPathways_2016'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set crowd_libraries = MultiChoiceField(
name = 'crowd_libraries',
label = 'Crowd Libraries',
description = 'No libraries selected by default',
choices = [
'Aging_Perturbations_from_GEO_down',
'Aging_Perturbations_from_GEO_up',
'Disease_Perturbations_from_GEO_down',
'Disease_Perturbations_from_GEO_up',
'Drug_Perturbations_from_GEO_down',
'Drug_Perturbations_from_GEO_up',
'Gene_Perturbations_from_GEO_down',
'Gene_Perturbations_from_GEO_up',
'Ligand_Perturbations_from_GEO_down',
'Ligand_Perturbations_from_GEO_up',
'MCF7_Perturbations_from_GEO_down',
'MCF7_Perturbations_from_GEO_up',
'Microbe_Perturbations_from_GEO_down',
'Microbe_Perturbations_from_GEO_up',
'RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO',
'SysMyo_Muscle_Gene_Sets'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
# +
# %%appyter code_exec
data_filename = {{ data_filename }}
metadata_filename = {{ metadata_filename }}
n_neighbors = {{ n_neighbors }}
min_cluster_dist = {{ min_cluster_dist }}
top_n_genes = {{ top_n_genes }}
top_n_genes_enrichment = {{ top_n_genes_enrichment }}
do_l1000 = {{ do_l1000 }}
use_weighted_score = {{ use_weighted_score }}
transcription_libraries = {{ transcription_libraries }}
pathway_libraries = {{ pathway_libraries }}
ontology_libraries = {{ ontology_libraries }}
disease_drug_libraries = {{ disease_drug_libraries }}
cell_type_libraries = {{ cell_type_libraries }}
misc_libraries = {{ misc_libraries }}
legacy_libraries = {{ legacy_libraries }}
crowd_libraries = {{ crowd_libraries }}
# -
if data_filename == '' or metadata_filename == '':
print("One or both user-uploaded files missing, use example GEO data.")
data_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
metadata_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
print(data_filename + '\n' + metadata_filename)
# ## 1. Import Datasets
# Load RNA-seq gene read counts and associated sample metadata into dataframes.
def load_dataframe(file):
''' Load a file by downloading it or reading it if already downloaded.
'''
ext = os.path.splitext(file)[1]
if ext in {'.tsv', '.txt'}:
df = pd.read_csv(file, sep='\t', index_col=0)
elif ext == '.csv':
df = pd.read_csv(file, index_col=0)
else:
raise Exception('Unrecognized file format', ext)
# Fix any type coersion on identifiers
df.index = df.index.astype(str)
df.columns = df.columns.astype(str)
return df
# +
data_index = "symbol"
metadata_index = "sample_id"
print(f"Loading user-uploaded data...")
df_data = load_dataframe(data_filename).sort_index()
df_metadata = load_dataframe(metadata_filename).sort_index()
df_data.index.name = "symbol"
df_metadata.index.name = "sample_id"
print("Data loaded!")
# -
# ### 1a. RNA-seq Data
figure_legend("Table 1", "RNA-seq data", "The RNA-seq data contains a row per gene and a column per sample.")
display(df_data.head())
# ### 1b. Metadata
figure_legend("Table 2","Metadata", "The column indices are sample metadata attributes, while the row indices are sample IDs corresponding to the columns of the RNA-seq data.")
display(df_metadata.head())
# Listed below are all the metadata categories with >1 unique value and at least 1 repeated value. These categories will be used to cluster samples later in the analysis.
features = []
for col in df_metadata.columns:
if len(df_metadata[col].unique()) > 1 and len(df_metadata[col].unique()) < len(df_metadata[col]):
features.append(col)
else:
continue
# features = df_metadata.columns.values
features
df_metadata = df_metadata[features]
# ## 2. Normalize Data
# Given the highly variable nature of expression level between different genes, it is necessary to normalize the read counts before proceeding.
# +
# create dataframe to display sample stats
df_library_size = pd.DataFrame(
{
'n_expressed_genes': df_data[df_data > 0].count(),
'log_n_reads': np.log2(df_data.sum()),
'n_reads': df_data.sum(),
}).sort_values('n_reads', ascending=False)
df_library_size.index.name = "sample_id"
# -
figure_legend("Table 3","Library size", "By default, the first five entries are shown. A gene read is counted toward n_reads for a single sample if its value is greater than 0.")
display(df_library_size.head())
# Below, the library distribution is shown.
sns.displot(df_library_size["log_n_reads"]); plt.show()
figure_legend("Figure 1","Library size distribution")
# Two versions of the dataset are normalized: one with just the `top_n_genes` most variable genes and one with all genes. The former will be used to compute clusters after dimensionality reduction, and the latter to compute the characteristic direction (up or down) of each gene in a cluster.
# +
# # copy full dataset for computing characteristic directions later
df_data_norm_all_genes = df_data.copy()
# compute log2(x+1)
df_data_norm = log2_normalize(df_data, offset=1)
df_data_norm_all_genes = log2_normalize(df_data_norm_all_genes, offset=1)
# quantile normalize each sample
df_data_norm = qnorm.quantile_normalize(df_data_norm, axis=1)
df_data_norm_all_genes = qnorm.quantile_normalize(df_data_norm_all_genes, axis=1)
# +
# take top_n_genes most variable rows
df_data_norm = filter_by_var(df_data,top_n = top_n_genes)
# convert to zscores
df_data_norm = pd.DataFrame(zscore(df_data_norm, axis=1), index=df_data_norm.index, columns=df_data_norm.columns)
df_data_norm_all_genes = pd.DataFrame(zscore(df_data_norm_all_genes, axis=1), index=df_data_norm_all_genes.index, columns=df_data_norm_all_genes.columns)
# -
figure_legend("Table 4","Normalized RNA-seq data for most variable genes", "Counts are filtered for the most variable genes. The resulting dataset is log transformed and normalized, then converted to z-scores.")
display(df_data_norm.head())
# +
# plot the first gene distribution
gene1 = df_data_norm.index.values[0]
gene1_plt = sns.displot(df_data_norm.iloc[0, :])
gene1_plt.set(xlabel='Z-score', ylabel='Number of samples', title=f'Z-score distribution of {gene1}')
plt.show()
figure_legend("Figure 2",f"Sample gene expression distibution for {gene1}", f"In this dataset, {gene1} is the most variably expressed across all samples.")
# plot a single RNA-seq sample distribution
sample_plt = sns.displot(df_data_norm.iloc[:, 0])
sample_plt.set(xlabel='Z-score', ylabel='Number of genes', title=f'Z-score distribution of all genes in {df_data_norm.columns[0]}')
plt.show()
figure_legend("Figure 4",f"RNA-seq profile distribution for sample {df_data_norm.columns[0]}")
# -
# ## 3. Reduce Data Dimensionality
# Now that the data has been loaded and normalized, the most variable genes across the dataset can be identified and visualized with hierachical clustering and heatmaps. Dimensionality reduction facilitates the differentiation of the data in a more efficient manner by reducing the number of attributes to be considered.
# ### 3a. Principle Component Analysis
# PCA is used first to reduce the dimensionality of the dataset, while still maintaining most of the variability. In PCA, a large number of dimensions -- in this case, the different sample metadata attributes -- can be reduced to a few new dimensions that capture the relevant information of the original attributes.
#
# First, all data values are scaled to (0, 1).
pca_scaler = MinMaxScaler()
df_data_norm[df_data_norm.columns.tolist()] = pca_scaler.fit_transform(df_data_norm[df_data_norm.columns.tolist()])
df_data_norm.head()
# Instead of manually setting the number of PCA components, the number of components is chosen automatically to maximize variance (> 95%).
# +
# PCA
data_norm_pca = PCA(
random_state=42,
n_components=0.95
)
data_norm_pca.fit(df_data_norm.values.T)
df_data_norm_pca = pd.DataFrame(
data_norm_pca.transform(df_data_norm.values.T),
index=df_data_norm.T.index
)
df_data_norm_pca.columns = [
f'PCA-{c}' # ({r:.3f})'
for c, r in zip(df_data_norm_pca.columns, data_norm_pca.explained_variance_ratio_)
]
df_data_norm_pca.index.name = "sample_id"
# -
figure_legend("Table 5","Principle components of RNA-seq data", "The top principle components are the projections of each datapoint onto the axes along which there is the most variation in the dataset.")
display(df_data_norm_pca.head())
# The data can now be plotted with the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package. The points can be shaped and colored by various metadata categories, with the default being the first two metadata columns. They can also be individually searched by sample_id.
# +
# combine metadata with RNA-seq data; note this will fail if sample_ids are
# not exactly matched between both datasets
pca_data = merge(
df_data_norm_pca[["PCA-0", "PCA-1"]],
df_library_size,
df_metadata
)
# name columns for plotting purposes
pca_data = pca_data.rename(columns={'PCA-0': 'x', 'PCA-1': 'y'})
pca_data['sample_id'] = pca_data.index
# normalize dimensions to -10, 10
pca_min, pca_max = -10, 10
pca_x_min, pca_x_max = pca_data['x'].min(), pca_data['x'].max()
pca_y_min, pca_y_max = pca_data['y'].min(), pca_data['y'].max()
pca_data['x'] = (pca_data['x'] - pca_x_min) / (pca_x_max - pca_x_min) * (pca_max - pca_min) + pca_min
pca_data['y'] = (pca_data['y'] - pca_y_min) / (pca_y_max - pca_y_min) * (pca_max - pca_min) + pca_min
# +
pca_scatter_data = pca_data.to_dict('records')
color_def = features[0]
shape_def = features[1]
ScatterBoard(
id='pca-scatterboard',
is3d=False,
data=pca_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
# -
# **Figure 5:** *First two PCA components of RNA-seq data.* Points are labeled by Sample ID and can be color- or shape-coded by any of the metadata categories using the dropdown menus. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around.
# ### 3b. Uniform Manifold Approximation and Projection
#
# The dimensionality of the dataset is further reduced by performing UMAP on the PCA components. Parameters such as `n_neighbors` and `min_dist` are set according to defaults used by the Seurat R Package for single cell genomics analysis.
# +
data_norm_umap = UMAP(
random_state=42,
n_components=2,
n_neighbors=n_neighbors if df_data_norm_pca.shape[1] > n_neighbors else df_data_norm_pca.shape[1]-1,
metric='cosine',
min_dist=min_cluster_dist,
)
n_pca_components = df_data_norm_pca.shape[1]
data_norm_umap.fit(df_data_norm_pca.iloc[:, :n_pca_components].values)
# keep only first two UMAP components
df_data_norm_umap = pd.DataFrame(
data_norm_umap.transform(df_data_norm_pca.iloc[:, :n_pca_components].values),
columns=['UMAP-0', 'UMAP-1'],
index=df_data_norm_pca.index,
)
# +
# project data onto first two UMAP components for visualization
umap_data = merge(
df_data_norm_umap[["UMAP-0", "UMAP-1"]],
df_library_size,
df_metadata
)
umap_data = umap_data.rename(columns={'UMAP-0': 'x', 'UMAP-1': 'y'})
umap_data['sample_id'] = umap_data.index
# normalize to (-10, 10)
umap_min, umap_max = -10, 10
umap_x_min, umap_x_max = umap_data['x'].min(), umap_data['x'].max()
umap_y_min, umap_y_max = umap_data['y'].min(), umap_data['y'].max()
umap_data['x'] = (umap_data['x'] - umap_x_min) / (umap_x_max - umap_x_min) * (umap_max - umap_min) + umap_min
umap_data['y'] = (umap_data['y'] - umap_y_min) / (umap_y_max - umap_y_min) * (umap_max - umap_min) + umap_min
# +
umap_scatter_data = umap_data.to_dict('records')
color_def = features[0]
shape_def = features[1]
ScatterBoard(
id='umap-scatterboard',
is3d=False,
data=umap_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
# -
# **Figure 6:** *First two UMAP components of RNA-seq data.* The datapoints are again labeled by sample ID, and can be color- or shape-coded by any of the metadata categories using the dropdown menu. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around.
# ## 4. Clustering
# The first two UMAP components will be used from here on out.
#
# To compute sample clusters, the k-means method is used. The total number of clusters must be determined, by first testing a range for the number of total clusters, and then computing silhouette scores, which are a measure of how similar an entry is to its own cluster versus other clusters. The goal is to maximize both the similarity within a cluster and the differences between clusters, so the ideal number of clusters is that which produces the highest silhouette score.
# +
silhouette_scores = []
# set max clusters
max_clusters = math.ceil(df_data_norm_umap.shape[0]/2)
# function for weighting results with more clusters
def calc_weighted_score(sil_score, k, max_k):
return sil_score*0.7 + k/max_k*0.3
cluster_range = range(2, max_clusters)
for n in cluster_range:
# apply k-means clustering for each possible k
X = df_data_norm_umap.values
clusterer = KMeans(n_clusters=n, random_state=42).fit(X)
y_pred = clusterer.predict(X)
# The silhouette_score gives the average value for all the samples
silhouette_avg = silhouette_score(X, y_pred, metric='cosine')
# Compute a weighted score that rewards higher numbers of clusters
weighted_score = calc_weighted_score(silhouette_avg, n, max_clusters)
silhouette_scores.append({
"N Clusters": n,
"Silhouette Score": silhouette_avg,
"Weighted Score": weighted_score
})
# Labeling the clusters
centers = clusterer.cluster_centers_
# +
# use unweighted or weighted scores
points = {}
threshold = 0.3
for s in silhouette_scores:
if use_weighted_score:
points[s["N Clusters"]] = s["Weighted Score"]
else:
points[s["N Clusters"]] = s["Silhouette Score"]
# +
silhouette_scores = pd.DataFrame(silhouette_scores)
if use_weighted_score:
figure_legend("Table 6", "Weighted silhouette scores by number of clusters", "Values are sorted by the highest weighted score.")
display(silhouette_scores.head().sort_values(["Weighted Score"], ascending=False).reset_index().drop(columns=['index']))
else:
figure_legend("Table 6", "Silhouette scores by number of clusters", "Values are sorted by the highest silhouette score.")
display(silhouette_scores.head().sort_values(["Silhouette Score"], ascending=False).reset_index().drop(columns=['index']))
# +
best_unweighted = silhouette_scores.sort_values('Silhouette Score').iloc[-1].to_dict()
best_weighted = silhouette_scores.sort_values('Weighted Score').iloc[-1].to_dict()
best = {"Silhouette Score": best_unweighted, "Weighted Score": best_weighted}
if use_weighted_score:
k = int(best['Weighted Score']['N Clusters'])
else:
k = int(best['Silhouette Score']['N Clusters'])
print(f"Ideal k: {k} clusters")
# +
# plot the weighted and unweighted scores as a function of # of clusters
colors = {"Silhouette Score": "#7C88FB", "Weighted Score": "#00CC96"}
for score_type in ["Silhouette Score", "Weighted Score"]:
plt.plot(silhouette_scores['N Clusters'], silhouette_scores[score_type], label=score_type, color=colors[score_type])
plt.scatter([best[score_type]['N Clusters']], [best[score_type][score_type]], label=f"Best {score_type}: {int(best[score_type]['N Clusters'])} clusters", color=colors[score_type])
plt.axvline(k, label = f"Ideal k: {k} clusters", color ="#EF553B", alpha=0.8,dashes=(3,3))
plt.legend()
plt.ylabel('Score')
plt.xlabel('Number of Clusters')
plt.show()
figure_legend("Figure 7", "Cluster size selection", "The dotted line indicates the value of the 'ideal' <i>k</i> as chosen by the selected scoring method. This value will be used in subsequent clustering.")
# +
# Compute the k-means dataframe using the ideal number of clusters
km = KMeans(n_clusters=k, random_state=42)
km_clusters = km.fit_predict(df_data_norm_umap.values)
df_data_norm_km = pd.DataFrame({
'Cluster': [
str(c)
for c in km_clusters
]}, index=df_data_norm_umap.index)
print(f'Computed {len(df_data_norm_km["Cluster"].unique())} clusters')
# +
# Map each cluster to a color for later plots
clusters = df_data_norm_km["Cluster"].unique()
plotly_colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
cluster_colors = {}
i = 0
for c in clusters:
cluster_colors[c] = plotly_colors[i % len(plotly_colors)]
i += 1
def cluster_heading(cluster):
display(HTML(f'''
<center>
<div style='background-color:{cluster_colors[cluster] + '98'};
width:100%;height:3rem;display:flex;align-items:center;
justify-content:center;color:white;font-size:2rem'>
<center>Cluster {cluster}</center>
</div>
</center>'''))
# -
# ## 5. Differential Expression
#
# Next, the differential expression for each cluster is computed. The <a href="http://www.maayanlab.net/CD/">Characteristic Direction method</a> is used for identifying differentially expressed genes among the different clusters.
# +
# Get differential expression for each cluster, using the dataset containing all genes
diff_expr = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
diff_expr[f"Cluster {cluster} CD"] = characteristic_direction(
# expression outside of this cluster
df_data_norm_all_genes.loc[:, df_data_norm_all_genes.columns.difference(samples.index)],
# expression in this cluster
df_data_norm_all_genes.loc[:, samples.index],
)['CD-coefficient']
df_diff_expr = pd.DataFrame(diff_expr)
df_diff_expr = df_diff_expr.sort_values(by='Cluster 0 CD',ascending=True)
df_diff_expr['Symbol'] = df_diff_expr.index.values
# -
figure_legend("Table 7", "Differential expression of genes by cluster", "By default, the top 5 most differentially expressed genes are shown, along with the corresponding characteristic directions for each cluster.")
display(df_diff_expr.head())
# Logistic regression is performed for each metadata category to determine which categories most accurately predict cluster designations for each data point. ROC curves are also plotted for categories with the top two highest AUC scores.
# +
# LR
aucs = {}
rocs = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
aucs[cluster] = {}
rocs[cluster] = []
for feature in features:
lr = LogisticRegression()
X = df_metadata.copy()
X = X[feature]
X = pd.merge(X, df_data_norm_km, left_index = True, right_index = True)
# drop NAs, and move on if dataset is empty
X.replace("not reported", None)
X = X.dropna()
if (X.shape[0] == 0): continue
cluster_data = X["Cluster"]
X = X.drop(columns= ["Cluster"])
# one-hot encode non numerical data
if (not isinstance(X[feature][0], (int, float, complex))):
X = pd.get_dummies(X[feature], prefix=feature)
y_true = (cluster_data == cluster)
if (len(y_true.unique()) < 2): # if there is only one class in the dataset
print(f"Not enough data to classify cluster {cluster} based on category {feature}")
aucs[cluster][feature] = np.nan
continue
lr.fit(X, y_true)
y_score = lr.predict_proba(X)[:, 1]
auc_score = roc_auc_score(y_true, y_score)
aucs[cluster][feature] = auc_score
# save the ROCs
rocs[cluster].append({"auc":auc_score, "lr": lr, "X": X, "y_true":y_true, "title": f'Predictions of cluster {cluster} by category {feature}'})
df_cluster_aucs = pd.DataFrame(aucs)
df_cluster_aucs.index.name="Category"
# sort features by avg AUC across all clusters
df_cluster_aucs["avg"] = [ np.mean(df_cluster_aucs.T[f]) for f in df_cluster_aucs.index.values ]
df_cluster_aucs = df_cluster_aucs.sort_values(by = "avg", ascending=False)
df_cluster_aucs = df_cluster_aucs.drop(columns = "avg")
cols = [('Cluster', col) for col in df_cluster_aucs.columns ]
df_cluster_aucs.columns = pd.MultiIndex.from_tuples(cols)
# -
figure_legend("Table 8", "Average AUC scores for top predictive metadata categories, by cluster", "Scores for the top 5 metadata categories for predicting clusters, as determined by the average AUC score across all clusters, are shown. Higher AUC scores correspond to better classifiers for distinguishing whether or not a datapoint belongs to a certain cluster.")
display(df_cluster_aucs.head(5))
# +
# plot top 2 ROCs for each cluster
plt.rc('font', size=16)
for cluster, plots in rocs.items():
plots.sort(reverse=True, key=lambda x: x["auc"])
cluster_heading(cluster)
if len(plots) < 2:
best_rocs = plots
else:
best_rocs = plots[:2]
num_plots = len(best_rocs)
figure,axes = plt.subplots(int(math.ceil(num_plots / 2.)), 2, figsize=(15,(len(best_rocs)*3.5)))
axes = axes.flatten()
for i in range(len(axes)):
if i >= len(best_rocs):
axes[i].remove()
else:
plot = best_rocs[i]
fig = plot_roc_curve(plot["lr"], plot["X"], plot["y_true"], ax=axes[i])
axes[i].set_title('\n'.join(wrap(plot["title"], 40)))
figure.tight_layout(pad=2)
plt.show()
figure_legend("Figure 8", "ROCs for top cluster-predicting metadata categories")
plt.rcdefaults()
# -
# ## 6. Identify Up- and Down-Regulated Genes
# Find the most up- and down-regulated genes for each cluster for visualization in heatmap, and for enrichment analysis.
# Merge data
df_clustered_umap = pd.merge(left=df_data_norm_km, left_on="sample_id", right=df_data_norm_umap, right_on="sample_id")
# Get top Genes for each cluster
top_genes = {}
all_top_genes = []
heatmap_top_n = 100
for cluster in df_clustered_umap['Cluster'].unique():
cd_col = f'Cluster {cluster} CD'
if cd_col in df_diff_expr.columns:
# top up genes
up_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=False).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
# top down genes
dn_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=True).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
else:
raise Exception('Cant find col for cluster')
all_top_genes.append(up_genes[:heatmap_top_n])
all_top_genes.append(dn_genes[:heatmap_top_n])
# save results
top_genes[cluster] = (up_genes, dn_genes)
all_top_genes = [item for sublist in all_top_genes for item in sublist] # flatten all genes to one list
# Data corresponding to only the top 100 up- and down-regulated genes for each cluster is selected for visualization in a heatmap, with log-transformation and normalization proceeding as before.
# +
df_data_norm_heatmap_f = df_data.loc[all_top_genes, :]
# compute log normalization of matrix
df_data_norm_heatmap_f = log2_normalize(df_data_norm_heatmap_f, offset=1)
# convert to zscores
# df_data_norm_heatmap_f = zscore_normalize(df_data_norm_heatmap_f)
df_data_norm_heatmap_f = pd.DataFrame(zscore(df_data_norm_heatmap_f, axis=1), index=df_data_norm_heatmap_f.index, columns=df_data_norm_heatmap_f.columns)
# Plot heatmap
cases = df_data_norm_heatmap_f.columns
heatmap_cluster_colors = [ cluster_colors[x] for x in df_clustered_umap.loc[cases, :]["Cluster"] ]
# -
sns.clustermap(df_data_norm_heatmap_f,xticklabels=False,col_colors = heatmap_cluster_colors); plt.show()
figure_legend("Figure 9", "Heatmap of most differentially expressed genes", "Color coding along the top edge indicates cluster designation of the corresponding sample.")
# ## 7. Enrichment Analysis with Enrichr
#
# Perform enrichment analysis for each cluster by querying the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The background libraries are the default libraries from Enrichr. A link is provided to download the results.
# +
# enrichment analysis libraries
enrichr_libraries = OrderedDict([
('Diseases/Drugs', disease_drug_libraries),
('Ontologies', ontology_libraries),
('Cell Type', cell_type_libraries),
('Pathways', pathway_libraries),
('Transcription', transcription_libraries),
('Legacy', legacy_libraries),
('Crowd', crowd_libraries)
])
# handle no selected libraries
all_empty = True
for key, libs in enrichr_libraries.items():
if len(libs) > 0:
all_empty = False
break
if all_empty:
enrichr_libraries = OrderedDict([
('Diseases/Drugs', ['GWAS_Catalog_2019']),
('Ontologies', ['GO_Biological_Process_2018', 'MGI_Mammalian_Phenotype_Level_4_2019']),
('Pathways', ['KEGG_2019_Human', 'KEGG_2019_Mouse']),
('Transcription', ['ENCODE_TF_ChIP-seq_2015'])
])
# +
# Util functions
def enrichr_link_from_genes(genes, description='', enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
''' Functional access to Enrichr API
'''
time.sleep(1)
resp = requests.post(enrichr_link + '/addList', files={
'list': (None, '\n'.join(genes)),
'description': (None, description),
})
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
# wait a tinybit before returning link (backoff)
time.sleep(3)
result = resp.json()
return dict(result, link=enrichr_link + '/enrich?dataset=' + resp.json()['shortId'])
def enrichr_get_top_results(userListId, bg, enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
time.sleep(1)
resp = requests.get(enrichr_link + '/enrich?userListId={}&backgroundType={}'.format(userListId, bg))
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
time.sleep(3)
return pd.DataFrame(resp.json()[bg], columns=['rank', 'term', 'pvalue', 'zscore', 'combinedscore', 'overlapping_genes', 'adjusted_pvalue', '', ''])
# +
# Get Enrichr links for each cluster
enrichr_links = {}
for cluster, (up_genes, dn_genes) in top_genes.items():
up_link, dn_link = None, None
if up_genes.size:
try:
up_link = enrichr_link_from_genes(up_genes, f'cluster {cluster} up')
except:
print(f'Enrichr failed for cluster {cluster} up genes')
else:
print(f'cluster {cluster} up: empty')
if dn_genes.size:
try:
dn_link = enrichr_link_from_genes(dn_genes, f'cluster {cluster} down')
except:
print(f'Enrichr failed for cluster {cluster} down genes')
else:
print(f'cluster {cluster} down: empty')
enrichr_links[cluster] = (up_link, dn_link)
# Grab top results for each cluster
all_enrichr_results = []
for cluster, (up_link, dn_link) in enrichr_links.items():
for link_type, link in [('up', up_link), ('down', dn_link)]:
if link is None:
continue
for category, libraries in enrichr_libraries.items():
for library in libraries:
try:
results = enrichr_get_top_results(link['userListId'], library).sort_values('pvalue').iloc[:5]
results['library'] = library
results['category'] = category
results['direction'] = link_type
results['cluster'] = cluster
all_enrichr_results.append(results)
except:
print('{}: {} {} {} cluster {} failed, continuing'.format(link, library, category, link_type, cluster))
df_enrichr_results = pd.concat(all_enrichr_results).reset_index()
# -
# Enrichment results are organized in table format below (Table 10). A description of each of the scores reported can be found on the [Enrichr help page](https://maayanlab.cloud/Enrichr/help#background&q=4). The full table can also be downloaded as a CSV.
# Display a dataframe with clickable enrichr links
figure_legend("Table 10","Enrichment analysis results from Enrichr", "Results are grouped by expression direction (up/down) and gene set library. Within groups, results are sorted by lowest p-value (highest rank) first.")
df_clickable = df_enrichr_results.copy().drop(columns=[''])
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_enrichr_results.to_csv(), 'Download Enrichr results', 'Enrichr results.csv')
# To view the full Enrichr results for the directional gene sets of each cluster, please use the links below. On the webpage that opens, you can explore and visualize how the selected input gene set compares against each background libraries available in Enrichr.
for cluster in enrichr_links.keys():
up_link = enrichr_links[cluster][0]['link']
dn_link = enrichr_links[cluster][1]['link']
display(Markdown(f"[Full Enrichr results for Cluster {cluster} up-regulated genes]({up_link})"))
display(Markdown(f"[Full Enrichr results for Cluster {cluster} down-regulated genes]({dn_link})"))
# ### 7a. Enrichr Result Barplots
# Horizontal barplots are used to display the top Enrichr results for each cluster, by library and characteristic expression direction.
# +
# Make horizontal barplots to visualize top Enrichr results
clusters = df_enrichr_results["cluster"].unique()
for cluster in clusters:
cluster_results = df_enrichr_results.loc[df_enrichr_results["cluster"] == cluster, :]
libraries = cluster_results["library"].unique()
num_rows = len(libraries)
count = 1 # keep track of which subplot we're on
fig = plt.figure(figsize=(15,5*num_rows))
for library in cluster_results["library"].unique():
library_results = cluster_results.loc[cluster_results["library"] == library, :]
for direction in library_results["direction"].unique():
plot_results = library_results.loc[cluster_results["direction"] == direction, :]
plot_results = plot_results.sort_values("pvalue",ascending=False)
labels = plot_results["term"]
labels = [ '\n'.join(wrap(l, 20)) for l in labels ]
values = plot_results["pvalue"]
values = -np.log(values)
# normalize values to map from 0-1 -> color, with opacity also based on normalized pvalue
cmap = plt.get_cmap('cool')
norm_values = [ 0.3 + (x - min(values))/(max(values) - min(values))*0.7 for x in values]
colors = [ [*cmap(val)[:3], 0.4 + 0.2*val] for val in norm_values]
# plot result
ax = fig.add_subplot(num_rows,2,count)
ax.barh(labels,values,color = colors)
ax.set_title(f'{library}\n{direction} genes')
ax.set_xlabel(' – log(pvalue)')
count += 1
cluster_heading(cluster)
fig.tight_layout(pad=3, w_pad=2, h_pad=6)
plt.show()
display(HTML("<br><br>"))
figure_legend("Figure 11", "Enrichment results by cluster", "Bar plots indicate the negative log of the p-value for the specified term. One plot is presented per cluster, per gene-set library, per expression direction (up/down).")
# -
# ### 7b. GSEA Running Sum Visualizations
# While the above barplots display the top enriched terms for each cluster in each direction, individual enriched terms can also be compared to the tissue data using a random walk [GSEA running sum visualization](https://github.com/MaayanLab/react-GSEA/tree/master).
#
# First, each of the four default background libraries from Enrichr can be queried and saved as a JSON object which maps terms to their complete genesets.
libresp = {}
for lib in df_enrichr_results['library'].unique():
resp = requests.get('https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=json&libraryName=' + lib)
if resp.status_code == 200:
libresp[lib] = resp.json()[lib]['terms']
else:
print(f"Failed to access library {lib}, continuing")
# For each cluster, the most enriched term for that cluster from each library can then be compared against the most up-regulated genes in the cluster. Below, GSEA plots display the overlap between the genes from each cluster and their most enriched genesets.
#
# The x-axis of each plot is a list of genes in the tissue sample or uploaded data, ranked by expression level. The y-axis measures the running enrichment score: the score increases when a gene is in both the input gene set and the library gene set, and decreases otherwise. The peak of the plot gives the enrichment score for the library gene set when compared to the input.
# iterate through each cluster
for cluster in clusters:
cluster_heading(cluster)
# iterate through each library for each cluster
for lib in libresp.keys():
# obtain the most enriched library term for the cluster in the up direction
up_df = df_enrichr_results[
df_enrichr_results.direction.eq('up')
& df_enrichr_results.cluster.eq(cluster)
& df_enrichr_results.library.eq(lib)]
try:
top_up_term = up_df[up_df['rank'] == 1]['term'].iloc[0]
except:
display(HTML(f"<div style='font-size:1rem;'>Results unavailable for cluster {cluster} {lib}</div>"))
continue
# store the geneset for the most enriched term
top_up_set = libresp[lib][top_up_term].keys()
display(HTML(f"<div style='font-size:1.25rem;'><b>{top_up_term}</b> <br></div><div style='font-size:1rem;'>Most enriched term from {lib} for samples in Cluster {cluster}</div>"))
# display the GSEA plot comparing the enriched genes and the top up-regulated cluster genes
display(ReactGSEA(
data=dataFromResult(
input_set=top_up_set,
ranked_entities=df_diff_expr['Cluster ' + cluster + ' CD']
.sort_values(ascending=False)
.iloc[:math.ceil((df_diff_expr.shape[0]/2))]
.index.tolist()
)
))
# ## 8. L1000 Analysis
#
# If selected during user input, the most up- and down-regulated genes from each cluster, as identified from above, can be input into the [L1000FWD](https://amp.pharm.mssm.edu/L1000FWD/) API, which will then return the most similar and opposite gene expression signatures from the L1000 database. Links are provided to the interactive L1000FWD projections for each set of results.
# +
def l1000fwd_results_from_genes(up_genes, down_genes, description='', l100fwd_link='http://amp.pharm.mssm.edu/L1000FWD/'):
''' Functional access to L1000FWD API
'''
time.sleep(1)
response = requests.post(l100fwd_link + 'sig_search', json={
'up_genes': list(up_genes),
'down_genes': list(down_genes),
})
l1000fwd_results = {}
if response.status_code != 200:
raise Exception('L1000FWD failed with status {}: {}'.format(
response.status_code,
response.text,
))
if 'KeyError' in response.text:
l1000fwd_results['result_url'] = None
else:
# Get ID and URL
result_id = response.json()['result_id']
l1000fwd_results['result_url'] = 'https://amp.pharm.mssm.edu/l1000fwd/vanilla/result/'+result_id
l1000fwd_results['result_id'] = result_id
# Get Top
l1000fwd_results['signatures'] = requests.get(l100fwd_link + 'result/topn/' + result_id).json()
# wait a tinybit before returning link (backoff)
time.sleep(1)
return l1000fwd_results
def l1000fwd_sig_link(sig_id):
return 'https://amp.pharm.mssm.edu/dmoa/sig/' + sig_id
def get_signature_by_id(sig_id):
response = requests.get("http://amp.pharm.mssm.edu/L1000FWD/sig/" + sig_id)
if response.status_code != 200:
raise Exception('L1000FWD signature query failed with status {}: {}'.format(
response.status_code,
response.text,
))
return response.json()
# -
def display_l1000fwd_results(l1000fwd_results, plot_counter,cluster_id,nr_drugs=7, height=300):
# Check if results
if l1000fwd_results['result_url']:
# Display cluster title
display(HTML('<br><br>'))
cluster_heading(cluster)
# Display IFrae
display(HTML(f"<a href='{l1000fwd_results['result_url']}' target='_blank'> View L1000FWD for cluster {cluster_id}</a>"))
# Display tables
for direction, signature_list in l1000fwd_results['signatures'].items():
# Fix dataframe
rename_dict = {'sig_id': 'Signature ID', 'pvals': 'P-value', 'qvals': 'FDR', 'zscores': 'Z-score', 'combined_scores': 'Combined Score'}
signature_dataframe = pd.DataFrame(signature_list)[list(rename_dict.keys())].rename(columns=rename_dict).sort_values('P-value').rename_axis('Rank')
signature_dataframe.index = [x + 1 for x in range(len(signature_dataframe.index))]
signature_csv = signature_dataframe.to_csv(sep=",")
# Display table
pd.set_option('max.colwidth', None)
signature_dataframe['Signature ID'] = [f'<a href={l1000fwd_sig_link(x)} target="_blank">{x}</a>' for x in signature_dataframe['Signature ID']]
table_html = signature_dataframe.to_html(escape=False, classes='w-100')
display(HTML(f'<h3>{direction.title()} Signatures: </h3>'))
display(HTML(f'<style>.w-100{{width: 100% !important;}}</style><div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
# Display download button
download_button(signature_csv, f'Download {direction.title()} Signatures', f'Cluster {cluster_id} L1000FWD {direction.title()} signatures.csv')
# Link
display(HTML('Full results available at: <a href="{result_url}" target="_blank">{result_url}</a>.'.format(**l1000fwd_results)))
# Display error
else:
display(Markdown('### No results were found.\n This is likely due to the fact that the gene identifiers were not recognized by L1000FWD. Please note that L1000FWD currently only supports HGNC gene symbols (https://www.genenames.org/). If your dataset uses other gene identifier systems, such as Ensembl IDs or Entrez IDs, consider converting them to HGNC. Automated gene identifier conversion is currently under development.'))
if do_l1000:
plot_counter = 0
all_l1000fwd_results = {}
figure_header("Figure 14", "Most similar and opposite L1000 signatures, by cluster")
for cluster, (up_genes, dn_genes) in top_genes.items():
try:
results = l1000fwd_results_from_genes(up_genes,dn_genes)
all_l1000fwd_results[cluster] = results
display_l1000fwd_results(results,plot_counter,cluster)
plot_counter += 1
except:
print(f'L1000FWD API failed for cluster {cluster}, continuing')
figure_legend("Figure 14", "Most similar and opposite L1000 signatures, by cluster", "Results are sorted by smallest p-value.")
# In the case of disease state RNA-seq data, the reverse signatures provide a potential set of drugs that could perturb the cells/tissues towards a "healthy" direction. These may present novel treatments for patients whose samples belong to a certain cluster.
if do_l1000:
df_drugs = pd.read_csv("https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv")
# Load top drug suggestions for each cluster based on the drugs used to produce the top five opposite signatures
drug_results = {}
for cluster, results in all_l1000fwd_results.items():
opposite_sigs = results["signatures"]["opposite"][:5]
sig_ids = [sig["sig_id"] for sig in opposite_sigs]
pert_ids = []
for sig_id in sig_ids:
try:
signature = get_signature_by_id(sig_id)
pert_ids.append(signature["pert_id"])
except:
print(f'L1000FWD API failed for cluster {cluster}, sig_id {sig_id}, continuing')
df_cluster_drugs = df_drugs[df_drugs["pert_id"].isin(pert_ids)].copy()
df_cluster_drugs["cluster"] = cluster
df_cluster_drugs = df_cluster_drugs[["cluster", *list(filter(lambda x: x!="cluster", df_cluster_drugs.columns))]]
drug_results[cluster] = df_cluster_drugs
df_all_drugs = pd.concat(drug_results).reset_index()
if do_l1000:
figure_legend("Table 13", "Drugs used to produce most opposite signatures for each cluster", "Each entry is a drug/chemical used for perturbation in the L1000 experiments that resulted in a gene-expression signature most opposite to that of the specified cluster.")
df_clickable = df_all_drugs.copy()
df_clickable['pert_url'] = df_clickable["pert_url"].apply(make_clickable)
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_all_drugs.to_csv(), 'Download L1000FWD drug results', 'L1000FWD drugs.csv')
| appyters/RNAseq_Data_Metadata_Analysis/RNAseq_Data_Metadata_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi']= 120
import seaborn as sns
sns.set(style="whitegrid")
# +
#hyperparameters
input_size = 7
output_size = 6
seq_len = 3
learning_rate = 0.0080409
num_epochs = 100
dropout = 0
seed = 7123515
load_model = True
# -
#data importing
df = pd.read_excel('ALL VAR cleaned.xlsx')
df.Date = pd.to_datetime(df.Date, format = '%m/%d/%Y')
df = df.set_index('Date')
df.head()
# +
#data scaling
df_scaled = (df - df.mean())/ df.std()
df_scaled.head()
#storing mean and std
df_np_mean = df.mean().to_numpy()
df_np_std = df.std().to_numpy()
#dropping date column
df_scaled.reset_index(inplace = True)
df_scaled = df_scaled.drop('Date', 1)
df_scaled.head()
# +
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix+1 >= len(sequences): break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, 0:7], sequences[end_ix+1, 7:13]
X.append(seq_x)
y.append(seq_y)
return X, y
array = df_scaled.iloc[:, :].values
print ('shape of the datset array: {}'.format(array.shape))
X, y = split_sequences(array, seq_len)
X_array = np.array(X, dtype = np.float32)
y_array = np.array(y)
print ('sequenced X array shape: {}'.format(X_array.shape))
print ('sequenced y array shape: {}'.format(y_array.shape))
# +
#output mask preparation
df_mask = pd.read_excel('COMBINED CAMS MASK.xlsx')
#print(df_mask.head())
mask_array = df_mask.iloc[:, :].values
#print(mask_array.shape)
#sequencing
def mask_sequence(sequence, n_steps):
y = list()
for i in range(len(sequence)):
# find the end of this pattern
end_iy = i + n_steps
# check if we are beyond the dataset
if end_iy+1 >= len(sequence): break
# gather input and output parts of the pattern
seq_y = sequence[end_iy+1, 0:6]
y.append(seq_y)
return y
mask_list = mask_sequence(mask_array, seq_len)
mask_array = np.array(mask_list)
print(mask_array.shape)
# +
#making dataset and subsets
class AirMeteoroDataset(Dataset):
def __init__(self):
self.len = X_array.shape[0]
self.X_data = torch.from_numpy(X_array)
self.y_data = torch.from_numpy(y_array)
self.y_mask = torch.from_numpy(mask_array)
def __getitem__(self, index):
return self.X_data[index], self.y_data[index], self.y_mask[index], index
def __len__(self):
return self.len
dataset = AirMeteoroDataset()
#dataset_random_split
train_size = round(len(X_array) * 0.7)
val_size = round((len(X_array) - train_size)/2)
test_size = len(X_array) - train_size - val_size
train_set, val_set, test_set = torch.utils.data.random_split(dataset,[train_size, val_size, test_size], generator = torch.Generator().manual_seed(seed))
# +
#merge train_val
trainval_sets= []
trainval_sets.append(train_set)
trainval_sets.append(val_set)
train_val_set = torch.utils.data.ConcatDataset(trainval_sets)
# -
class Model(nn.Module):
def __init__(self,
input_size,
output_size,
seq_len,
dropout):
super(Model, self).__init__()
self.input_size = input_size*seq_len
self.output_size = output_size
self.dropout = nn.Dropout(p = dropout)
self.linear = nn.Linear(self.input_size, self.output_size)
def forward(self, X):
out = self.linear(self.dropout(X))
return out
class modsmoothl1(nn.SmoothL1Loss):
def __init__(self, size_average=None, reduce=None, reduction = 'none'):
super(modsmoothl1, self).__init__(size_average, reduce, reduction)
def forward(self, observed, predicted, mask):
predicted_masked = mask*predicted
loss = F.smooth_l1_loss(observed, predicted_masked, reduction=self.reduction)
avg_loss = torch.sum(loss)/torch.sum(mask)
return avg_loss
class modsmoothl1(nn.SmoothL1Loss):
def __init__(self, size_average=None, reduce=None, reduction = 'none'):
super(modsmoothl1, self).__init__(size_average, reduce, reduction)
def forward(self, observed, predicted, mask):
predicted_masked = mask*predicted
loss = F.smooth_l1_loss(observed, predicted_masked, reduction=self.reduction)
avg_loss = torch.sum(loss)/torch.sum(mask)
return avg_loss
# +
forecast_model = Model(input_size,
output_size,
seq_len,
dropout).cuda().float()
criterion = modsmoothl1()
optimizer = torch.optim.RMSprop(forecast_model.parameters(), lr = learning_rate)
# +
def save_checkpoint(state, filename="MLR Final model.pth.tar"):
print("Saving checkpoint...")
torch.save(state, filename)
def load_checkpoint(checkpoint, model, optimizer):
print("Loading checkpoint...")
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
# -
#model train
if load_model == False:
all_train_loss = []
all_val_loss = []
total_iter = 0
for epoch in range(num_epochs):
forecast_model.train()
epoch_total_loss = 0.0
for i, (X_data, y_data, y_mask, index) in enumerate(train_loader):
optimizer.zero_grad()
X_data = X_data.cuda().float()
y_data = y_data.cuda().float()
y_mask = y_mask.cuda().float()
y_pred = forecast_model(X_data.view(batch_size, input_size*seq_len))
loss = criterion(y_data, y_pred, y_mask)
loss.backward()
optimizer.step()
epoch_total_loss = epoch_total_loss + loss.item()
epoch_avg_loss = epoch_total_loss/len(train_loader)
if (epoch +1) % round(num_epochs/10) == 0:
print (f'Train loss after Epoch [{epoch+1}/{num_epochs}]: {epoch_avg_loss:.6f}')
all_train_loss.append(epoch_avg_loss)
#saving test and validation losses
if load_model == False:
print("Saving losses...")
df_train_loss = pd.DataFrame(all_train_loss, columns = ["Values"])
df_train_loss.to_csv('train_loss.csv', index = False)
df_val_loss = pd.DataFrame(all_val_loss, columns = ["Values"])
df_val_loss.to_csv('Validation_loss.csv', index = False)
checkpoint = {"state_dict": forecast_model.state_dict(), "optimizer": optimizer.state_dict()}
save_checkpoint(checkpoint)
plt.plot(list(range(1, num_epochs + 1)), all_train_loss, label = 'Train')
plt.legend(loc="upper right")
plt.xlabel('No. of epochs')
plt.ylabel('Loss')
import winsound
frequency = 2000 # Set Frequency To 2500 Hertz
duration = 100 # Set Duration To 1000 ms == 1 second
winsound.Beep(frequency, duration)
winsound.Beep(frequency, duration)
winsound.Beep(frequency, duration)
winsound.Beep(frequency, duration)
all_index = torch.empty(0).cuda()
all_obs = torch.empty(0, output_size).cuda()
all_pred = torch.empty(0, output_size).cuda()
with torch.no_grad():
total_test_loss = 0.0
for i, (X_test, y_test, y_mask_test, index) in enumerate(test_loader):
X_test = X_test.cuda().float()
y_test = y_test.cuda().float()
y_mask_test = y_mask_test.cuda().float()
index = index.cuda().float()
test_pred = forecast_model(X_test.view(batch_size, input_size*seq_len))
test_loss = criterion(y_test, test_pred, y_mask_test)
total_test_loss = total_test_loss + test_loss.item()
all_index = torch.cat((all_index, index),0)
all_obs = torch.cat((all_obs, y_test), 0)
all_pred = torch.cat((all_pred, test_pred), 0)
avg_test_loss = total_test_loss/len(test_loader)
print(avg_test_loss)
pred_out_np = all_pred.cpu().numpy()
obs_out_np = all_obs.cpu().numpy()
df_out_mean = df_np_mean[7:13]
df_out_std = df_np_std[7:13]
final_pred = pred_out_np * df_out_std + df_out_mean
final_observed = obs_out_np * df_out_std + df_out_mean
all_index = all_index.cpu().numpy()
out_obs_data = pd.DataFrame({'SO2 ': final_observed[:, 0],
'NO2': final_observed[:, 1],
'CO': final_observed[:, 2],
'O3': final_observed[:, 3],
'PM2.5': final_observed[:, 4],
'PM10': final_observed[:, 5],
'Index': all_index})
filename_obs = 'MLR plot_obs.xlsx'
out_obs_data.to_excel(filename_obs, index=True)
out_pred_data = pd.DataFrame({'SO2 ': final_pred[:, 0],
'NO2': final_pred[:, 1],
'CO': final_pred[:, 2],
'O3': final_pred[:, 3],
'PM2.5': final_pred[:, 4],
'PM10': final_pred[:, 5],
'Index': all_index})
filename_pred = 'MLR plot_pred.xlsx'
out_pred_data.to_excel(filename_pred, index=True)
| MLR model/Final Prediction/.ipynb_checkpoints/MLR final prediction - Copy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Asymptotic solutions in short-times
# Projectile motion in a linear potential field with images is described by the equation
#
# $$y_{\tau \tau} + \alpha \frac{1}{(1 + \epsilon y)^2} + 1= 0,$$
#
# with $y(0) = \epsilon$ and $y_{\tau}(0)=1$, and where $\epsilon \ll 1$ is expected.
import sympy as sym
from sympy import init_printing
init_printing(order='rev-lex')
y, eps, a, b, t, alpha = sym.symbols('y, epsilon, a, b, t, alpha')
y0 = sym.Function('y0')(t)
y1 = sym.Function('y1')(t)
y2 = sym.Function('y2')(t)
y3 = sym.Function('y3')(t)
y4 = sym.Function('y4')(t)
# +
y = sym.Eq(y0 + eps*y1 + eps**2*y2 + eps**3*y3 + eps**4*y4) # naive expansion
class f(sym.Function):
@classmethod
def eval(cls, y):
return y.lhs.diff(t,t) + alpha*1/(1 + eps*y.lhs)**2 + 1
#return y.lhs.diff(tau, tau) + eps/y.lhs**2
# -
the_series = sym.series(f(y), eps, x0=0, n=5)
by_order = sym.collect(the_series, eps, evaluate=False)
the_series
# ### $\mathcal{O} \left( 1 \right) \mbox{Solution}$
sym.Eq(by_order[1].removeO())
eqn = sym.Eq(by_order[1].removeO()) #1 + y0(tau).diff(tau, tau))
soln0 = sym.dsolve(eqn, y0)
constants = sym.solve([soln0.rhs.subs(t,0) - 0, \
soln0.rhs.diff(t).subs(t,0) - 1])
C1, C2 = sym.symbols('C1 C2')
soln0 = soln0.subs(constants)
print(sym.latex(soln0))
soln0
# ### $\mathcal{O} \left( \epsilon \right) \mbox{Solution}$
by_order[eps]
# +
try:
eqn = sym.Eq(by_order[eps].replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps])
soln1 = sym.dsolve(eqn, y1)
constants = sym.solve([soln1.rhs.subs(t,0) - 0, \
soln1.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln1 = soln1.subs(constants)
soln1
# -
# ### $\mathcal{O} \left( \epsilon^2 \right) \mbox{Solution}$
by_order[eps**2]
# +
try:
eqn = sym.Eq(by_order[eps**2].replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**2].replace(y1, soln1.rhs))
soln2 = sym.dsolve(eqn, y2)
constants = sym.solve([soln2.rhs.subs(t,0) - 0, \
soln2.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln2 = soln2.subs(constants)
sym.factor(soln2)
# -
# ### $\mathcal{O} \left( \epsilon^3 \right) \mbox{Solution}$
by_order[eps**3]
# +
try:
eqn = sym.Eq(by_order[eps**3].replace(y2, soln2.rhs).replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**3].replace(y2, soln2.rhs))
soln3 = sym.dsolve(eqn, y3)
constants = sym.solve([soln3.rhs.subs(t,0) - 0, \
soln3.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln3 = soln3.subs(constants)
sym.factor(soln3)
# -
# ### $\mathcal{O} \left( \epsilon^4 \right) \mbox{Solution}$
by_order[eps**4]
# +
try:
eqn = sym.Eq(by_order[eps**4].replace(y3, soln3.rhs).replace(
y2, soln2.rhs).replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**4].replace(y3, soln3.rhs))
soln4 = sym.dsolve(eqn, y4)
constants = sym.solve([soln4.rhs.subs(t,0) - 0, \
soln4.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln4 = soln4.subs(constants)
sym.factor(soln4)
# -
# ### $\mbox{Composite Solution}$
# +
y_comp = sym.symbols('y_{comp}', cls=sym.Function)
try:
y_comp = sym.Eq(y_comp, soln0.rhs + eps*soln1.rhs + eps**2*soln2.rhs + eps**3*soln3.rhs + eps**4*soln4.rhs) # + eps**2*soln2.rhs)
except NameError:
y_comp = sym.Eq(y_comp, eps*soln1.rhs + eps**2*soln2.rhs + eps**3*soln3.rhs + eps**4*soln4.rhs) # + eps**2*soln2.rhs)
#print(sym.latex(y_comp))
y_comp
print(str(y_comp.rhs.subs(t, 1)))
# -
# ### $\mbox{The Trajectory}$
def savefig(filename, pics):
if pics == True:
plt.savefig('../doc/figures/{}.pgf'.format(filename), bbox_inches='tight', dpi=400)
else:
pass
pics = True
# +
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import scipy as sp
# %config InlineBackend.figure_format = 'retina'
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
#plt.rcParams['figure.dpi'] = 300
# %matplotlib inline
matplotlib.rcParams.update(
{ 'text.color': 'k',
'xtick.color': 'k',
'ytick.color': 'k',
'axes.labelcolor': 'k'
})
plt.rc('font', size=14)
eps_val = [.1, .5, 1.][::-1]
linestyle = ['rs--', 'bo-', 'cv-.', 'k+:', 'm']
tt = sp.arange(0,1.2,0.001)
al = [2, 1., .5, .01]
fig, axs = plt.subplots(2,2, figsize=(10, 8), sharex='col', sharey='row')
fig.subplots_adjust(hspace = .2, wspace=.2)
axs = axs.ravel()
i = 0
for aas in al:
yc = y_comp.rhs.subs(alpha, aas)
#plt.figure(figsize=(6, 4), dpi=100)
for keys, vals in enumerate(eps_val):
y_compP = sym.lambdify(t, yc.subs(eps, vals), 'numpy')
if aas == 2:
label='$\mathbf{E}\mbox{u}=$'+ ' {}'.format(vals).rstrip('0').rstrip('.')
else:
label=None
axs[i].plot(tt, y_compP(tt), linestyle[keys],label=label,
markevery=100)
axs[i].set_ylim(ymin=0., ymax=0.5)
axs[i].set_xlim(xmax=1.05)
axs[i].tick_params(axis='both', which='major', labelsize=16)
leg = axs[i].legend(title = r'$\mathbf{I}\mbox{g} = $' + ' {:1.2f}'.format(aas).rstrip('0').rstrip('.'), loc=2)
leg.get_frame().set_linewidth(0.0)
i += 1
fig.text(0.5, -0.01, r'$t^*$', ha='center', fontsize=20)
fig.text(-0.03, 0.5, r'$y^*$', va='center', rotation='vertical', fontsize=20)
fig.tight_layout()
savefig('short_times', pics)
plt.show()
# +
eps_val = [.01, .1, 1.][::-1]
linestyle = ['rs--', 'bo-', 'cv-.', 'k+:', 'm']
tt = sp.arange(0,2.5,0.001)
yc = y_comp.rhs.subs(alpha, eps*0.0121 + 0.2121)
plt.figure(figsize=(6, 4))#, dpi=100)
for keys, vals in enumerate(eps_val):
y_compP = sym.lambdify(t, yc.subs(eps, vals), 'numpy')
plt.plot(tt, y_compP(tt), linestyle[keys], label='$\mathbf{E}\mbox{u} =$'+ ' {}'.format(vals).rstrip('0').rstrip('.'),
markevery=100)
plt.ylim(ymin=0., ymax=0.5)
plt.xlim(xmax=2.05)
plt.legend()
plt.xlabel(r'$t^*$')
plt.ylabel(r'$y^*$')
#savefig('short_times_better', pics)
plt.show()
# -
# ## Time aloft
y2 = sym.symbols('y2', cls=sym.Function)
y2 = sym.Function('y2')(t)
try:
y2 = sym.Eq(y2, soln0.rhs + eps*soln1.rhs + eps**2*soln2.rhs) # + eps**2*soln2.rhs)
except NameError:
y2 = sym.Eq(y2, eps*soln1.rhs + eps**2*soln2.rhs)
y2.rhs
#y2.diff(t)
tau0, tau1, tau2 = sym.symbols('tau0 tau1 tau2')
tau = sym.Eq(tau0 + eps*tau1 + eps**2*tau2)
y3 = y2.rhs.subs(t, tau.lhs).series(eps)
col = sym.collect(y3, eps, evaluate=False)
# ### $\mathcal{O} \left( 1 \right) \mbox{Solution}$
#tau0 = 2
sym.Eq(col[1].removeO())
# ### $\mathcal{O} \left( \epsilon \right) \mbox{Solution}$
order_eps = col[eps].subs(tau0, 2)
order_eps
soln_eps = sym.solve(order_eps, tau1)
# ### $\mathcal{O} \left( \epsilon^2 \right) \mbox{Solution}$
order_eps2 = col[eps**2].subs(tau0, 2).subs(tau1, soln_eps[0])
order_eps2
soln_eps2 = sym.solve(order_eps2, tau2)
# ### Composite Solution
# Using the linear regression for Im.
tau0, tau1, tau2 = sym.symbols('tau0 tau1 tau2')
tau = sym.Eq(tau0 + eps*tau1 + eps**2*tau2)
tau = tau.subs(tau0, 2).subs(tau1, soln_eps[0]).subs(tau2, soln_eps2[0])
print(str(tau.subs(alpha, eps*0.0121 + 0.2121)))
tau.subs(alpha, eps*0.0121 + 0.2121)
# +
ttt = np.arange(0.01, 2.,0.001)
#betas = [bet]
linestyle = ['k','rs--', 'bo-', 'cv-.', 'k+:', 'm']
plt.figure(figsize=(6, 4), dpi=100)
#taun = tau.subs(beta, vals)
tau_soln = sym.lambdify(eps, tau.lhs.subs(alpha, eps*0.0121 + 0.2121), 'numpy')
plt.semilogx(ttt, tau_soln(ttt), 'k', markevery=100)
plt.xlabel(r'$\mathbf{E}\mbox{u}$')
plt.ylabel(r'$t_f$')
#plt.legend()
#savefig('drag', pics)
plt.show();
# -
| src/asymptotic-short.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("../archive/DATA/cancer_classification.csv")
# exploratory data analysis
# df.info()
df.describe().transpose()
sns.countplot(x="benign_0__mal_1", data=df)
# correlation between features
df.corr()
df.corr()['benign_0__mal_1'].sort_values(ascending=False).plot(kind='bar')
plt.figure(figsize=(10,10))
sns.heatmap(df.corr(), cmap='RdYlGn')
# train test split
X = df.drop(['benign_0__mal_1'], axis=1).values
y = df['benign_0__mal_1'].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=101)
from sklearn.preprocessing import MinMaxScaler
# +
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# -
# creating the model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
X_train.shape
# +
model = Sequential()
model.add(Dense(units=30, activation='relu'))
model.add(Dense(units=15, activation='relu'))
# binary classification
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
# -
model.fit(x=X_train, y=y_train, epochs=600, validation_data=(X_test, y_test), verbose=2)
losses = pd.DataFrame(model.history.history)
# example of overfitting
losses.plot(figsize=(10,10))
# +
# redefine the model
model = Sequential()
model.add(Dense(units=30, activation='relu'))
model.add(Dense(units=15, activation='relu'))
# binary classification
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
# -
# add callback -> earlystopping
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', mode = 'min', patience=25, verbose=2)
# fit the new model with new params
model.fit(x=X_train, y=y_train, epochs=600, validation_data=(X_test, y_test), verbose=2,
callbacks=[early_stop])
model_loss = pd.DataFrame(model.history.history)
model_loss.plot(figsize=(10,10))
# another attempt - adding dropout layer to prevent overfitting
from tensorflow.keras.layers import Dropout
# +
# redefine the model
model = Sequential()
model.add(Dense(units=30, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=15, activation='relu'))
model.add(Dropout(0.5))
# binary classification
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
# -
# fit the new model with existing early_stop
model.fit(x=X_train, y=y_train, epochs=600, validation_data=(X_test, y_test), verbose=2,
callbacks=[early_stop])
model_loss_2 = pd.DataFrame(model.history.history)
model_loss_2.plot(figsize=(10,10))
# model prediction
# model.predict_classes(x=X_test)
predictions = (model.predict(X_test) > 0.5).astype("int32")
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
| practical_ai/02_keras/keras_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Repeating Actions with Loops
# In the last lesson,
# we wrote some code that plots some values of interest from our first inflammation dataset,
# and reveals some suspicious features in it, such as from `inflammation-01.csv`
#
# 
#
# We have a dozen data sets right now, though, and more on the way.
# We want to create plots for all of our data sets with a single statement.
# To do that, we'll have to teach the computer how to repeat things.
#
# An example task that we might want to repeat is printing each character in a
# word on a line of its own.
word = 'lead'
# We can access a character in a string using its index. For example, we can get the first
# character of the word `'lead'`, by using `word[0]`. One way to print each character is to use
# four `print` statements:
print(word[0])
print(word[1])
print(word[2])
print(word[3])
# This is a bad approach for two reasons:
#
# 1. It doesn't scale:
# if we want to print the characters in a string that's hundreds of letters long,
# we'd be better off just typing them in.
#
# 1. It's fragile:
# if we give it a longer string,
# it only prints part of the data,
# and if we give it a shorter one,
# it produces an error because we're asking for characters that don't exist.
word = 'tin'
print(word[0])
print(word[1])
print(word[2])
print(word[3])
# Here's a better approach:
word = 'lead'
for char in word:
print(char)
# This is shorter --- certainly shorter than something that prints every character in a
# hundred-letter string --- and more robust as well:
word = 'oxygen'
for char in word:
print(char)
# The improved version uses a [for loop]({{ page.root }}/reference/#for-loop)
# to repeat an operation --- in this case, printing --- once for each thing in a sequence.
# The general form of a loop is:
#
# ```python
# for variable in collection:
# do things with variable
# ```
# Using the oxygen example above, the loop might look like this:
# 
#
# where each character (`char`) in the variable `word` is looped through and printed one character
# after another. The numbers in the diagram denote which loop cycle the character was printed in (1
# being the first loop, and 6 being the final loop).
#
# We can call the [loop variable]({{ page.root }}/reference/#loop-variable) anything we like, but
# there must be a colon at the end of the line starting the loop, and we must indent anything we
# want to run inside the loop. Unlike many other languages, there is no command to signify the end
# of the loop body (e.g. `end for`); what is indented after the `for` statement belongs to the loop.
#
# ## What's in a name?
# In the example above, the loop variable was given the name `char` as a mnemonic;
# it is short for 'character'.
# We can choose any name we want for variables. We might just as easily have chosen the name
# `banana` for the loop variable, as long as we use the same name when we invoke the variable inside
# the loop:
word = 'oxygen'
for banana in word:
print(banana)
# It is a good idea to choose variable names that are meaningful, otherwise it would be more
# difficult to understand what the loop is doing.
# Here's another loop that repeatedly updates a variable:
length = 0
for vowel in 'aeiou':
length = length + 1
print('There are', length, 'vowels')
# It's worth tracing the execution of this little program step by step.
# Since there are five characters in `'aeiou'`,
# the statement on line 3 will be executed five times.
# The first time around,
# `length` is zero (the value assigned to it on line 1)
# and `vowel` is `'a'`.
# The statement adds 1 to the old value of `length`,
# producing 1,
# and updates `length` to refer to that new value.
# The next time around,
# `vowel` is `'e'` and `length` is 1,
# so `length` is updated to be 2.
# After three more updates,
# `length` is 5;
# since there is nothing left in `'aeiou'` for Python to process,
# the loop finishes
# and the `print` statement on line 4 tells us our final answer.
#
# Note that a loop variable is just a variable that's being used to record progress in a loop.
# It still exists after the loop is over,
# and we can re-use variables previously defined as loop variables as well:
letter = 'z'
for letter in 'abc':
print(letter)
print('after the loop, letter is', letter)
# Note also that finding the length of a string is such a common operation
# that Python actually has a built-in function to do it called `len`:
print(len('aeiou'))
# `len` is much faster than any function we could write ourselves,
# and much easier to read than a two-line loop;
# it will also give us the length of many other things that we haven't met yet,
# so we should always use it when we can.
# ## From 1 to N
#
# Python has a built-in function called `range` that creates a sequence of numbers. `range` can
# accept 1, 2, or 3 parameters.
#
# * If one parameter is given, `range` creates an array of that length,
# starting at zero and incrementing by 1.
# For example, `range(3)` produces the numbers `0, 1, 2`.
# * If two parameters are given, `range` starts at
# the first and ends just before the second, incrementing by one.
# For example, `range(2, 5)` produces `2, 3, 4`.
# * If `range` is given 3 parameters,
# it starts at the first one, ends just before the second one, and increments by the third one.
# For exmaple `range(3, 10, 2)` produces `3, 5, 7, 9`.
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge:</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Using <code>range</code>,
# write a loop that uses <code>range</code> to print the first 3 natural numbers:</p>
# <div class="codehilite"><pre><span></span>1
# 2
# 3
# </pre></div>
#
# </div>
#
# </section>
#
#
# <section class="solution panel panel-primary">
# <div class="panel-heading">
# <h2><span class="fa fa-eye"></span> Solution</h2>
# </div>
#
# </section>
#
for i in range(1, 4):
print(i)
# ## Computing Powers With Loops
#
# Exponentiation is built into Python:
print(5 ** 3)
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge:</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Write a loop that calculates the same result as <code>5 ** 3</code> using
# multiplication (and without exponentiation).</p>
#
# </div>
#
# </section>
#
#
# <section class="solution panel panel-primary">
# <div class="panel-heading">
# <h2><span class="fa fa-eye"></span> Solution</h2>
# </div>
#
# </section>
#
result = 1
for i in range(0, 3):
result = result * 5
print(result)
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge: Reverse a String</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Knowing that two strings can be concatenated using the <code>+</code> operator,
# write a loop that takes a string
# and produces a new string with the characters in reverse order,
# so <code>'Newton'</code> becomes <code>'notweN'</code>.</p>
#
# </div>
#
# </section>
#
#
# <section class="solution panel panel-primary">
# <div class="panel-heading">
# <h2><span class="fa fa-eye"></span> Solution</h2>
# </div>
#
# </section>
#
newstring = ''
oldstring = 'Newton'
for char in oldstring:
newstring = char + newstring
print(newstring)
# ## Computing the Value of a Polynomial
#
# The built-in function `enumerate` takes a sequence (e.g. a list) and generates a
# new sequence of the same length. Each element of the new sequence is a pair composed of the index
# (0, 1, 2,...) and the value from the original sequence:
#
# ```python
# for i, x in enumerate(xs):
# # Do something with i and x
# ```
#
# The code above loops through `xs`, assigning the index to `i` and the value to `x`.
# Suppose you have encoded a polynomial as a list of coefficients in
# the following way: the first element is the constant term, the
# second element is the coefficient of the linear term, the third is the
# coefficient of the quadratic term, etc.
#
# +
x = 5
cc = [2, 4, 3]
y = cc[0] * x**0 + cc[1] * x**1 + cc[2] * x**2
y
# -
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge:</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Write a loop using <code>enumerate(cc)</code> which computes the value <code>y</code> of any polynomial, given <code>x</code> and <code>cc</code>.</p>
#
# </div>
#
# </section>
#
#
# <section class="solution panel panel-primary">
# <div class="panel-heading">
# <h2><span class="fa fa-eye"></span> Solution</h2>
# </div>
#
# </section>
#
y = 0
for i, c in enumerate(cc):
y = y + x**i * c
y
# ---
# The material in this notebook is derived from the Software Carpentry lessons
# © [Software Carpentry](http://software-carpentry.org/) under the terms
# of the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.
| 03-fundamentals-of-python/02-repeating-actions_instructor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 词嵌入
# 下面我们看看 pytorch 中如何调用词向量
# ## PyTorch 实现
# 词嵌入在 pytorch 中非常简单,只需要调用 `torch.nn.Embedding(m, n)` 就可以了,m 表示单词的总数目,n 表示词嵌入的维度,其实词嵌入就相当于是一个大矩阵,矩阵的每一行表示一个单词
import torch
from torch import nn
from torch.autograd import Variable
# 定义词嵌入
embeds = nn.Embedding(2, 5) # 2 个单词,维度 5
# 得到词嵌入矩阵
embeds.weight
# 我们通过 `weight` 得到了整个词嵌入的矩阵,注意,这个矩阵是一个可以改变的 parameter,在网络的训练中会不断更新,同时词嵌入的数值可以直接进行修改,比如我们可以读入一个预训练好的词嵌入等等
# 直接手动修改词嵌入的值
embeds.weight.data = torch.ones(2, 5)
embeds.weight
# 访问第 50 个词的词向量
embeds = nn.Embedding(100, 10)
single_word_embed = embeds(Variable(torch.LongTensor([50])))
single_word_embed
# 可以看到如果我们要访问其中一个单词的词向量,我们可以直接调用定义好的词嵌入,但是输入必须传入一个 Variable,且类型是 LongTensor
# 下次课,我们会讲一讲词嵌入到底有什么用。
| 06.循环神经网络(进阶)/nlp/word-embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (live_camera_effects)
# language: python
# name: pycharm-5f4d0b09
# ---
# + pycharm={"name": "#%%\n", "is_executing": true}
from __future__ import print_function
import tensorflow as tf
img=tf.io.read_file('a.jpg')
img=tf.image.decode_image(img, channels=3)
img=tf.image.convert_image_dtype(img, tf.float32)
print("ASdad")
# + pycharm={"name": "#%%\n"}
| best_practices_tf_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supplementary - check leakage
#
# In this supplementary notebook, we check how different classification model would perform if we include the leaked features in our analysis (especially the last fico range)
# +
# Load general utilities
# ----------------------
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import numpy as np
import pickle
import time
import seaborn as sns
import os
# Load sklearn utilities
# ----------------------
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score, roc_curve, brier_score_loss, mean_squared_error, r2_score
from sklearn.calibration import calibration_curve
# Load classifiers
# ----------------
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegressionCV
# Load debugger, if required
#import pixiedust
pd.options.mode.chained_assignment = None #'warn'
# -
# ## Load the raw data
# load teh data from the file
directory = '../Data/1805_download/'
all_files = os.listdir(directory)
output = {}
for i in all_files:
print(" Reading file " + i)
output[i] = pd.read_csv(directory + i, dtype = str, skiprows = 1)
# +
data = pd.concat([output[i] for i in output.keys()], join='inner')
data = data[['id','loan_amnt','funded_amnt','funded_amnt_inv','term','int_rate',
'installment','grade','sub_grade','emp_title','emp_length',
'home_ownership','annual_inc','verification_status','issue_d',
'loan_status','purpose','title','zip_code','addr_state','dti','total_pymnt',
'delinq_2yrs','earliest_cr_line','open_acc','pub_rec','last_pymnt_d',
'last_pymnt_amnt','fico_range_high','fico_range_low','last_fico_range_high',
'last_fico_range_low','application_type','revol_bal','revol_util','recoveries']]
data.dropna(subset=['annual_inc','loan_status','issue_d','last_pymnt_d','loan_amnt',
'int_rate','earliest_cr_line','open_acc','pub_rec','delinq_2yrs','recoveries',
'grade','fico_range_high','fico_range_low','installment', 'last_fico_range_high',
'last_fico_range_low','funded_amnt','dti','funded_amnt_inv','revol_bal','revol_util']
,inplace=True)
data.shape
# +
# Identify the type of each of these column
float_cols = ['loan_amnt', 'funded_amnt', 'installment', 'annual_inc',
'dti', 'revol_bal', 'delinq_2yrs', 'open_acc', 'pub_rec',
'fico_range_high', 'fico_range_low','last_fico_range_low',
'last_fico_range_high','total_pymnt', 'recoveries']
cat_cols = ['term', 'grade', 'emp_length', 'home_ownership',
'verification_status', 'loan_status', 'purpose']
perc_cols = ['int_rate', 'revol_util']
date_cols = ['issue_d', 'earliest_cr_line', 'last_pymnt_d']
for j in float_cols:
data[j] = pd.to_numeric(data[j])
for j in perc_cols:
data[j] = data[j].str.strip('%')
data[j] = pd.to_numeric(data[j])
data[j] = data[j]/100
for j in date_cols:
data[j] = pd.to_datetime(data[j])
# -
# ## Engineer the features and generate the training/testing set
# +
default_seed = 1
np.random.seed(default_seed)
# select only terminated loans
data = data[data.loan_status.isin(['Fully Paid','Charged Off','Default'])]
# downsample
data = data.sample(n=50000)
# create labels for the dataset
data['label'] = (data.loan_status.str.contains('Charged Off') |
data.loan_status.str.contains('Default'))
data['cr_hist'] = (data.issue_d - data.earliest_cr_line) / np.timedelta64(1, 'M')
data.label = data.label.astype(int)
# clean and get training/testing data
temp = pd.get_dummies(data[['term','grade','emp_length','home_ownership',
'verification_status','purpose']],dummy_na=True)
X = data.as_matrix(columns=['loan_amnt','funded_amnt','int_rate','installment',
'annual_inc','dti','delinq_2yrs','open_acc','pub_rec',
'fico_range_high','fico_range_low','cr_hist','revol_bal',
'recoveries','last_fico_range_high','last_fico_range_low',
'revol_util', 'total_pymnt'])
X = np.concatenate((X,temp.as_matrix()),axis=1)
y = data.label.as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.transform(X_test)
print(X_train.shape)
print(X_test.shape)
# -
# ## Classification models
# ### $l_2$ penalized logistic regression
logisticModel = LogisticRegressionCV(cv=10,penalty='l2')
logisticModel.fit(X_train,y_train)
y_pred = logisticModel.predict(X_test)
print('accuracy: ',accuracy_score(y_test,y_pred))
target_names = ['Non-Defaulted Loan','Defaulted Loan']
print(classification_report(y_test,y_pred,target_names=target_names,digits=4))
print('AUC: ',roc_auc_score(y_test,logisticModel.predict_proba(X_test)[:,1]))
fpr, tpr, thresholds = roc_curve(y_test, logisticModel.predict_proba(X_test)[:,1],
pos_label=1)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',
label='AUC = %0.2f'% roc_auc_score(y_test,logisticModel.predict_proba(X_test)[:,1]))
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# ### Random forest
# In addition to logistic regression, let's also take a quick look at random forest
random_forest = RandomForestClassifier(min_samples_leaf=100,n_estimators=50)
random_forest.fit(X_train,y_train)
y_pred = random_forest.predict(X_test)
print('accuracy: ',accuracy_score(y_test,y_pred))
target_names = ['Non-Defaulted Loan','Defaulted Loan']
print(classification_report(y_test,y_pred,target_names=target_names,digits=4))
print('AUC: ',roc_auc_score(y_test,random_forest.predict_proba(X_test)[:,1]))
# As we can see, with leaked features the AUC of the models is ridiculously high
| files/research/lc/modeling_leakage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #Intro into IPython notebooks
# %pylab inline
from IPython.display import YouTubeVideo
YouTubeVideo("qb7FT68tcA8", width=600, height=400, theme="light", color="blue")
# You can ignore this, it's just for aesthetic purposes
matplotlib.rcParams['figure.figsize'] = (8,5)
rcParams['savefig.dpi'] = 100
# #Fitting Lines to Data
#
# We'll cover very basic line fitting, largely ignoring the subtleties of the statistics in favor of showing you *how* to perform simple fits of models to data.
# +
# These import commands set up the environment so we have access to numpy and pylab functions
import numpy as np
import pylab as pl
# Data Fitting
# First, we'll generate some fake data to use
x = np.linspace(0,10,50) # 50 x points from 0 to 10
# Remember, you can look at the help for linspace too:
# help(np.linspace)
# -
# y = m x + b
y = 2.5 * x + 1.2
# let's plot that
pl.clf()
pl.plot(x,y)
# looks like a simple line. But we want to see the individual data points
pl.plot(x,y,marker='s')
# We need to add noise first
noise = pl.randn(y.size)
# Like IDL, python has a 'randn' function that is centered at 0 with a standard deviation of 1.
# IDL's 'randomu' is 'pl.rand' instead
# What's y.size?
print y.size
print len(y)
# `y.size` is the number of elements in y, just like `len(y)` or, in IDL, `n_elements(y)`
# We can add arrays in python just like in IDL
noisy_flux = y + noise
# We'll plot it too, but this time without any lines
# between the points, and we'll use black dots
# ('k' is a shortcut for 'black', '.' means 'point')
pl.clf() # clear the figure
pl.plot(x,noisy_flux,'k.')
# We need labels, of course
pl.xlabel("Time")
pl.ylabel("Flux")
# Now we're onto the fitting stage.
# We're going to fit a function of the form
# $$y = m*x + b$$
# which is the same as
# $$f(x) = p[1]*x + p[0]$$
# to the data.
# This is called "linear regression", but it is also a special case of a more
# general concept: this is a first-order polynomial.
# "First Order" means that the highest exponent of x in the equation is 1
#
# We'll use polyfit to find the values of the coefficients. The third
# parameter is the "order"
p = np.polyfit(x,noisy_flux,1)
# help(polyfit) if you want to find out more
# print our fit parameters. They are not exact because there's noise in the data!
# note that this is an array!
print p
print type(p) # you can ask python to tell you what type a variable is
# Great! We've got our fit. Let's overplot the data and the fit now
pl.clf() # clear the figure
pl.plot(x,noisy_flux,'k.') # repeated from above
pl.plot(x,p[0]*x+p[1],'r-') # A red solid line
pl.xlabel("Time") # labels again
pl.ylabel("Flux")
# Cool, but there's another (better) way to do this. We'll use the polyval
# function instead of writing out the m x + b equation ourselves
pl.clf() # clear the figure
pl.plot(x,noisy_flux,'k.') # repeated from above
pl.plot(x,np.polyval(p,x),'r-') # A red solid line
pl.xlabel("Time") # labels again
pl.ylabel("Flux")
# +
# help(polyval) if you want to find out more
# -
# Let's do the same thing with a noisier data set. I'm going to leave out most of the comments this time.
noisy_flux = y+noise*10
p = polyfit(x,noisy_flux,1)
print p
# plot it
pl.clf() # clear the figure
pl.plot(x,noisy_flux,'k.') # repeated from above
pl.plot(x,np.polyval(p,x),'r-',label="Best fit") # A red solid line
pl.plot(x,2.5*x+1.2,'b--',label="Input") # a blue dashed line showing the REAL line
pl.legend(loc='best') # make a legend in the best location
pl.xlabel("Time") # labels again
pl.ylabel("Flux")
# Despite the noisy data, our fit is still pretty good! One last plotting trick, then we'll move on.
pl.clf() # clear the figure
pl.errorbar(x,noisy_flux,yerr=10,marker='.',color='k',linestyle='none') # errorbar requires some extras to look nice
pl.plot(x,np.polyval(p,x),'r-',label="Best fit") # A red solid line
pl.plot(x,2.5*x+1.2,'b--',label="Input") # a blue dashed line showing the REAL line
pl.legend(loc='best') # make a legend in the best location
pl.xlabel("Time") # labels again
pl.ylabel("Flux")
# #Curve Fitting
#
# We'll now move on to more complicated curves. What if the data looks more like a sine curve? We'll create "fake data" in basically the same way as above.
# this time we want our "independent variable" to be in radians
x = np.linspace(0,2*np.pi,50)
y = np.sin(x)
pl.clf()
pl.plot(x,y)
# We'll make it noisy again
noise = pl.randn(y.size)
noisy_flux = y + noise
pl.plot(x,noisy_flux,'k.') # no clear this time
# That looks like kind of a mess. Let's see how well we can fit it.
# The function we're trying to fit has the form:
# $$f(x) = A * sin(x - B)$$
# where $A$ is a "scale" parameter and $B$ is the side-to-side offset (or the "delay" if the x-axis is time). For our data, they are $A=1$ and $B=0$ respectively, because we made $y=sin(x)$
# curve_fit is the function we need for this, but it's in another package called scipy
from scipy.optimize import curve_fit
# we need to know what it does:
help(curve_fit)
# Look at the returns:
#
#
# Returns
# -------
# popt : array
# Optimal values for the parameters so that the sum of the squared error
# of ``f(xdata, *popt) - ydata`` is minimized
# pcov : 2d array
# The estimated covariance of popt. The diagonals provide the variance
# of the parameter estimate.
#
#
# So the first set of returns is the "best-fit parameters", while the second set is the "covariance matrix"
def sinfunc(x,a,b):
return a*np.sin(x-b)
fitpars, covmat = curve_fit(sinfunc,x,noisy_flux)
# The diagonals of the covariance matrix are variances
# variance = standard deviation squared, so we'll take the square roots to get the standard devations!
# You can get the diagonals of a 2D array easily:
variances = covmat.diagonal()
std_devs = np.sqrt(variances)
print fitpars,std_devs
# Let's plot our best fit, see how well we did
# These two lines are equivalent:
pl.plot(x, sinfunc(x, fitpars[0], fitpars[1]), 'r-')
pl.plot(x, sinfunc(x, *fitpars), 'r-')
# Again, this is pretty good despite the noisiness.
# #Fitting a Power Law
#
# Power laws occur all the time in physis, so it's a good idea to learn how to use them.
#
# What's a power law? Any function of the form:
# $$f(t) = a t^b$$
# where $x$ is your independent variable, $a$ is a scale parameter, and $b$ is the exponent (the power).
#
# When fitting power laws, it's very useful to take advantage of the fact that "a power law is linear in log-space".
# That means, if you take the log of both sides of the equation (which is allowed) and change variables, you get a
# linear equation!
# $$\ln(f(t)) = \ln(a t^b) = \ln(a) + b \ln(t)$$
# We'll use the substitutions $y=\ln(f(t))$, $A=\ln(a)$, and $x=\ln(t)$, so that
# $$y=a+bx$$
# which looks just like our linear equation from before (albeit with different letters for the fit parameters).
#
# We'll now go through the same fitting exercise as before, but using powerlaws instead of lines.
t = np.linspace(0.1,10)
a = 1.5
b = 2.5
z = a*t**b
pl.clf()
pl.plot(t,z)
# Change the variables
# np.log is the natural log
y = np.log(z)
x = np.log(t)
pl.clf()
pl.plot(x,y)
pl.ylabel("log(z)")
pl.xlabel("log(t)")
# It's a straight line. Now, for our "fake data", we'll add the noise *before* transforming from "linear" to "log" space
noisy_z = z + pl.randn(z.size)*10
pl.clf()
pl.plot(t,z)
pl.plot(t,noisy_z,'k.')
noisy_y = np.log(noisy_z)
pl.clf()
pl.plot(x,y)
pl.plot(x,noisy_y,'k.')
pl.ylabel("log(z)")
pl.xlabel("log(t)")
# Note how different this looks from the "noisy line" we plotted earlier. Power laws are much more sensitive to noise! In fact, there are some data points that don't even show up on this plot because you can't take the log of a negative number. Any points where the random noise was negative enough that the curve dropped below zero ended up being "NAN", or "Not a Number". Luckily, our plotter knows to ignore those numbers, but `polyfit` doesnt.
print noisy_y
# try to polyfit a line
pars = np.polyfit(x,noisy_y,1)
print pars
# In order to get around this problem, we need to *mask the data*. That means we have to tell the code to ignore all the data points where `noisy_y` is `nan`.
#
# My favorite way to do this is to take advantage of a curious fact: $1=1$, but `nan`!=`nan`
print 1 == 1
print np.nan == np.nan
# So if we find all the places were `noisy_y` != `noisy_y`, we can get rid of them. Or we can just use the places where `noisy_y` equals itself.
OK = noisy_y == noisy_y
print OK
# This `OK` array is a "boolean mask". We can use it as an "index array", which is pretty neat.
print "There are %i OK values" % (OK.sum())
masked_noisy_y = noisy_y[OK]
masked_x = x[OK]
print "masked_noisy_y has length",len(masked_noisy_y)
# now polyfit again
pars = np.polyfit(masked_x,masked_noisy_y,1)
print pars
# cool, it worked. But the fit looks a little weird!
fitted_y = polyval(pars,x)
pl.plot(x, fitted_y, 'r--')
# The noise seems to have affected our fit.
# Convert bag to linear-space to see what it "really" looks like
fitted_z = np.exp(fitted_y)
pl.clf()
pl.plot(t,z)
pl.plot(t,noisy_z,'k.')
pl.plot(t,fitted_z,'r--')
pl.xlabel('t')
pl.ylabel('z')
# That's pretty bad. A "least-squares" approach, as with `curve_fit`, is probably going to be the better choice. However, in the absence of noise (i.e., on your homework), this approach *should* work
def powerlaw(x,a,b):
return a*(x**b)
pars,covar = curve_fit(powerlaw,t,noisy_z)
pl.clf()
pl.plot(t,z)
pl.plot(t,noisy_z,'k.')
pl.plot(t,powerlaw(t,*pars),'r--')
pl.xlabel('t')
pl.ylabel('z')
# #Tricks with Arrays
#
# We need to cover a few syntactic things comparing IDL and python.
#
# In IDL, if you wanted the maximum value in an array, you would do:
# `maxval = max(array, location_of_max)`
#
#
# In python, it's more straightforward:
# `location_of_max = array.argmax()`
# or
# `location_of_max = np.argmax(array)`
#
# Now, say we want to determine the location of the maximum of a number of different functions. The functions we'll use are:
# `sin(x)`
# `sin`$^2$`(x)`
# `sin`$^3$`(x)`
# `sin(x)cos(x)`
#
# We'll define these functions, then loop over them.
# sin(x) is already defined
def sin2x(x):
""" sin^2 of x """
return np.sin(x)**2
def sin3x(x):
""" sin^3 of x """
return np.sin(x)**3
def sincos(x):
""" sin(x)*cos(x) """
return np.sin(x)*np.cos(x)
list_of_functions = [np.sin, sin2x, sin3x, sincos]
# we want 0-2pi for these functions
t = np.linspace(0,2*np.pi)
# this is the cool part: we can make a variable function
for fun in list_of_functions:
# the functions know their own names (in a "secret hidden variable" called __name__)
print "The maximum of ",fun.__name__," is ", fun(t).max()
# OK, but we wanted the location of the maximum....
for fun in list_of_functions:
print "The location of the maximum of ",fun.__name__," is ", fun(t).argmax()
# well, that's not QUITE what we want, but it's close
# We want to know the value of t, not the index!
for fun in list_of_functions:
print "The location of the maximum of ",fun.__name__," is ", t[fun(t).argmax()]
# Finally, what if we want to store all that in an array?
# Well, here's a cool trick: you can sort of invert the for loop
# This is called a "list comprehension":
maxlocs = [ t[fun(t).argmax()] for fun in list_of_functions ]
print maxlocs
# Confused? OK. Try this one:
print range(6)
print [ii**2 for ii in range(6)]
# #Further info on IPython Notebooks
#
# | Overview | link |
# |--------------------------------------|------------------------------------------------------------------------------------|
# | Blog of IPython creator | http://blog.fperez.org/2012/09/blogging-with-ipython-notebook.html |
# | Blog of an avid IPython user | http://www.damian.oquanta.info/index.html |
# | Turning notebook into a presentation | https://www.youtube.com/watch?v=rBS6hmiK-H8 |
# | Tutorial on IPython & SciPy | https://github.com/esc/scipy2013-tutorial-numpy-ipython |
# | IPython notebooks gallery | https://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks |
#
from IPython.display import YouTubeVideo
YouTubeVideo("xe_ATRmw0KM", width=600, height=400, theme="light", color="blue")
from IPython.display import YouTubeVideo
YouTubeVideo("zG8FYPFU9n4", width=600, height=400, theme="light", color="blue")
| howto/00-intro_ipython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Images are not shown in notebook intentionally
#
# Can do plt.imshow() but I've to
# - change the channels from BGR to RGB to display or
# - if displaying grayscale image - set cmap to gray
#
# Not displaying here as
# - I've done it in previous notebook - counting objects notebook; Not interested any more
# - I'm developing code in Atom using cv2.imshow() directly
#
#
import cv2
import imutils
from skimage.filters import threshold_local
import numpy as np
def ordered_rectangle_points(pts):
'''
Takes an array of 4 co-ordinates and returns a array with consistent ordering of the points for the rectangle
List contains points from top left to bottom left in that order
-> top left, top right, bottom right, bottom left
'''
#initializng a array of coordinates that will be ordered
#such that the first entry is the top-left,the second entry is the top-right,
#the third is the bottom-right, and the fourth is the bottom-left
rect = np.zeros((4,2), dtype="float32")
#[0]top-left point will have the smallest sum of the co-ordinates
#[2]bottom-right point will have largest sum
#[1]top-right point will have smallest difference between the co-ordinates
#[3]where as bottom-left will have largest diff
p_sum = np.sum(pts,axis=1)
p_diff = np.diff(pts,axis=1)
# From top left to bottom left in that order -> top left, top right, bottom right, bottom left
rect[0] = pts[np.argmin(p_sum)]
rect[1] = pts[np.argmin(p_diff)]
rect[2] = pts[np.argmax(p_sum)]
rect[3] = pts[np.argmax(p_diff)]
return rect
def four_point_perspective_transform(image, pts):
'''
Will do a 4 point perspective transform to obtain a top-down, “birds eye view” of an image
Takes input as image and list of four reference points that contain the ROI of the image we want to transform
'''
# obtaining consistent order of points and unpacking them individually
rect = ordered_rectangle_points(pts)
(tl,tr,br,bl) = rect
# computing the width of the new image, which will be the maximum distance between
# bottom-right and bottom-left coordiates or
# top-right and top-left coordinates
widthA = np.sqrt(((tr[0]-tl[0])**2) + ((tr[1]-tl[1])**2)) # tr - tl
widthB = np.sqrt(((br[0]-bl[0])**2) + ((br[1]-bl[1])**2)) # br - bl
#maxWidth = max(widthA,widthB) # returning np.array output
maxWidth = max(int(widthA), int(widthB))
# computing the height of the new image, which will be the maximum distance between
# top-right and bottom-right coordiates or
# top-left and bottom-left coordinates
heightA = np.sqrt(((tr[0]-br[0])**2) + ((tr[1]-br[1])**2)) # tr - tl
heightB = np.sqrt(((tl[0]-bl[0])**2) + ((tl[1]-bl[1])**2)) # br - bl
maxHeight = max(int(heightA), int(heightB))
# Constructing set of destination points to obtain a "birds eye view" (i.e. top-down view) of the image,
# again specifying points in the top-left, top-right, bottom-right, and bottom-left order
# We get the dimensions of the new image based on the width and height calculated
dest = np.array([[0, 0], [maxWidth-1, 0], [maxWidth-1, maxHeight-1], [0, maxHeight-1]], dtype="float32")
# making it float32 as getPerspectiveTransform requires it
# compute the perspective transform matrix and then apply it
transformation_matrix = cv2.getPerspectiveTransform(rect, dest)
warped = cv2.warpPerspective(image, transformation_matrix, (maxWidth, maxHeight))
# return warped image
return warped
# +
# cv2.getPerspectiveTransform??
# -
# To actually obtain the top-down, “birds eye view” of the image we’ll utilize the cv2.getPerspectiveTransform function.
# - This function requires two arguments, rect and dst
# - rect is Coordinates of quadrangle vertices in the source image.
# - dst is Coordinates of the corresponding quadrangle vertices in the destination image.
# - getPerspectiveTransform requires float32
#
# The cv2.getPerspectiveTransform function returns a matrix , which is the actual transformation matrix.
#
# We apply the transformation matrix using the cv2.warpPerspective function. We pass in the image , our transform matrix , along with the width and height of our output image.
# ### Edge Detection
#
# The first step to building our document scanner app using OpenCV is to perform edge detection. Let’s take a look:
# +
file='scan_4.jpg'
image = cv2.imread(file)
original = image.copy()
#In order to speedup image processing, as well as make our edge detection step more accurate resizing image
image = imutils.resize(image, height=500)
# To scale back the image, if required when displaying the output
aspect_ratio = original.shape[0] / 500.0
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# perform Gaussian blurring to remove high frequency noise (aiding in contour detection), and performing Canny edge detection.
gray = cv2.GaussianBlur(gray,(5,5),0)
edged = cv2.Canny(gray, 5,100)
##print("Edge detection ")
##cv2.imshow("Blurred Gray", gray)
#cv2.imshow("Edged", edged)
#cv2.waitKey(1000)
# -
# ### Finding contours
# - sorting the contours by area and keep only the largest ones. This allows us to only examine the largest of the contours, discarding the rest.
#
# #### Contour Area
# - cv2.contourArea
# - Contour area is given by the function cv2.contourArea() or from moments, M['m00'].
#
# #### Contour Perimeter
# - cv2.arcLength(contour, True)
# - It is also called arc length. It can be found out using cv2.arcLength() function. Second argument specify whether shape is a closed contour (if passed True), or just a curve
#
# #### Contour Approximation
# - cv2.approxPolyDP(c, 0.02 * perimeter, True)
# - It approximates a contour shape to another shape with less number of vertices depending upon the precision we specify. It is an implementation of Douglas-Peucker algorithm. Check the wikipedia page for algorithm and demonstration.
#
# - To understand this, suppose you are trying to find a square in an image, but due to some problems in the image, you didn't get a perfect square, but a "bad shape". Now you can use this function to approximate the shape.
# - ***In this, second argument is called epsilon, which is maximum distance from contour to approximated contour. It is an accuracy parameter. A wise selection of epsilon is needed to get the correct output. *** Here 0.02*perimeter implies it is 2% of the arclength
# - Third argument specifies whether curve is closed or not.
#
# +
# Finding contours
cnts_mat = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts_mat[1] # as we have cv2 version 3.4
#sorting the contours by area and keep only the largest ones. This allows us to only examine the largest of the contours, discarding the rest.
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
for c in cnts:
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * perimeter, True)
# if our approximated contour has four points, then we
# can assume that we have found our surface
if len(approx) == 4:
op_Cnt = approx
break
##print("Finding contours of object")
#cv2.drawContours(image, [op_Cnt], -1, (0, 255, 0), 2)
#cv2.imshow("Outline", image)
#cv2.waitKey(1000)
# -
# #### threshold function
# The scikit-image adaptive threshold function is more powerful than the OpenCV one. It includes more than just Gaussian and mean, it includes support for custom filtering along with median (Although I only use Gaussian for this example). I also found it substantially easier to use than the OpenCV variant. In general, I just (personally) like the scikit-image version more.
#
#
# If somebody wants to use the opencv threshold I think this is an equivalent substitute:
#
# warped = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 251, 11)
#
# +
warped = four_point_perspective_transform(image, op_Cnt.reshape(4, 2))
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped, 11, offset = 10, method = "gaussian")
warped = (warped > T).astype("uint8") * 255
##print("Applying perspective transform")
#cv2.imshow("Scanned", warped)
#cv2.waitKey(3000)
#cv2.destroyAllWindows()
# -
# If We are getting _NameError: name 'op_Cnt' is not defined_ It means that there is no contour with 4 sides.
#
# That is most probably our code is not able to detect edges correctly or the image passed does not fit our current code.
# - Can add try and exept block
#
# ## Try,
# given the transformation matrix M you can calculate the mapped location of any source image pixel (x,y) (or a range of pixels) using:
# dest(x) = [M11x + M12y + M13]/[M31x + M32y + M33]
# dest(y) = [M21x + M22y + M23]/[M31x + M32y + M33]
#
# Why bother?
# I used this method to map a laser pointer from a keystoned camera image of a large screen image back onto the original image…allowing me to “draw” on the large screen.
#
#
#
# # Comments
#
#
# In Live feed camera -
# You can eliminate motion blur using a really good camera sensor, and manual control of white balance. This is critical since a lot of sensors come with a controller which by default will try to increase the brightness of the image by capturing multiple frames in succession and adding them together. This process is what creates motion blur so you need to simply disable automatic white balance in the controller, and you’ll get clean frames every time. However this also means that in some situations it will be too dark for the sensor to see anything. One way to solve this is to put a large amount of powerful infrared LED lights around or behind the sensor, and remove the infrared filter from the sensor so it becomes sensitive to infrared light. The sensor will not see colors, but for reading text from a page you don’t need colors. This way your sensor will see images even in total “darkness” without blinding the non-blind with a potentially strong white light. Reach out to me if you’re interested and I will send you information about such a sensor that we use in my company.
| Document scanner/.ipynb_checkpoints/Scanner_practice and notes-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/local/lib/python3.9
#Libraries
# #%conda install matplotlib
# #%conda install seaborn
import sys
# #!{sys.executable} -m pip install --user matplotlib==3.4.3
# #!pip install matplotlib==3.4.3
# import pkg_resources
# pkg_resources.require("matplotlib==`3.4.3") # modified to use specific numpy
import pandas as pd
import matplotlib.pyplot as plot
import matplotlib.ticker as mtick
from functools import reduce
import seaborn as sns
from textwrap import wrap
# %matplotlib inline
# -
# ## Import survey data
# Link to the Mobile Payment & Currency Survey - https://docs.google.com/forms/d/1jP8PFZMUfEO6nppEGKtfeooYeYBipu16hGsrcqCuIg/edit
survey_data = pd.read_csv('/Users/manjaripokala/Documents/Software_Engineering_Masters/Mobile Computing/CourseProject/survey_response.csv')
print(survey_data.shape);
print(list(survey_data.columns))
# ## Update columns names to readable names
column_names = {'Timestamp': 'timestamp',
'Your Mobile OS': 'mobile-os',
'Do you use mobile payments apps ? (e.g Venmo, Apple pay, Google Pay)': 'payment-apps',
'What do you believe mobile wallets could replace in the future? (Select all that apply)': 'replace',
'Do you believe Mobile Wallets is a safer form of transaction ?': 'safer_form',
'What concerns you about Mobile Payments ? (Select all that apply)': 'concerns',
'How do you use Mobile Payments ? (Select all that apply)': 'usage',
'How often do you use Mobile Wallets (transactions per week) ?': 'freqency_per_week',
'Do you own crypto currency ?': 'crypto',
'Your Age Group': 'age'
}
survey_data.rename(columns=column_names, inplace=True)
print(list(survey_data.columns))
# ## Categorize 'Age'
survey_data['age'].unique()
# +
#Group ages function
def age_group (row):
if row['age'] == '18-22' :
return 'GenZ'
if row['age'] == '23-30' :
return 'Millennials'
if row['age'] == '31-38' :
return 'Millennials'
if row['age'] == '39-45' :
return 'GenX'
if row['age'] == '46-54' :
return 'GenX'
if row['age'] == '55-60' :
return 'Boomers'
if row['age'] == '60-65' :
return 'Boomers'
return 'Other'
survey_data['age_category'] = survey_data.apply (lambda row: age_group(row), axis=1)
# -
survey_data[['age','age_category']].drop_duplicates()
# ## Plot parameters set up
# +
plot.rcParams.update(plot.rcParamsDefault)
# %matplotlib inline
#plot.style.use('seaborn-pastel')
plot.style.use('seaborn-poster')
plot.rcParams['figure.figsize'] = [12, 5]
#plot.rcParams['axes.titlesize'] = 'large'
print(plot.style.available)
# -
# ## Age & Age Category
print(survey_data['age'].value_counts(normalize=True))
labels = survey_data['age'].drop_duplicates()
ax = survey_data['age'].value_counts().plot(kind='pie', autopct='%1.1f%%', labels=None)
plot.legend(labels=labels)
ax.set_ylabel(None)
plot.savefig('age.png', bbox_inches='tight')
# +
print(survey_data['age_category'].value_counts(normalize=True))
labels = survey_data['age_category'].drop_duplicates()
ax = survey_data['age_category'].value_counts().plot(kind='pie', autopct='%1.1f%%', labels=None)
ax.set_ylabel(None)
plot.legend(labels=labels, loc='center left', bbox_to_anchor=(1.0, 0.5))
plot.savefig('age_category.png', bbox_inches='tight')
# -
#print(survey_data.head(5))
df = survey_data
# ## Evaluation of responses and plotting key metrics
# Split the responses by delimiter ';' if it is multiple options based <br>
# Get the proportions of the responses by 'Age' <br>
# Combine all the age specific dataframes into a single one 'xxx_survey_summary' <br>
# Create a bar plot on the responses w.r.t Age <br>
# ## What do you believe mobile wallets could replace in the future?
# #### Replacement options provided in the survey :
# * Debit Cards
# * Credit Cards
# * Cash
# * Passports
# * Driver License
# +
#column3_long_list = df['replace'].str.split(';', expand=True)
column3_long_list = pd.concat([df[['age_category']], df['replace'].str.split(';', expand=True)], axis=1)
#print(column3_long_list)
# df.loc[df['A'] == 'foo']
response_boomers_replace = column3_long_list \
.loc[column3_long_list['age_category'] == 'Boomers'] \
.loc[:, column3_long_list.columns != 'age_category']
response_millennials_replace = column3_long_list \
.loc[column3_long_list['age_category'] == 'Millennials'] \
.loc[:, column3_long_list.columns != 'age_category']
response_genZ_replace = column3_long_list \
.loc[column3_long_list['age_category'] == 'GenZ'] \
.loc[:, column3_long_list.columns != 'age_category']
response_genX_replace = column3_long_list \
.loc[column3_long_list['age_category'] == 'GenX'] \
.loc[:, column3_long_list.columns != 'age_category']
print('response_boomers_replace: ', len(response_boomers_replace))
print('response_millennials_replace: ', len(response_millennials_replace))
print('response_genZ_replace: ', len(response_genZ_replace))
print('response_genX_replace: ', len(response_genX_replace), '\n')
summary_boomers_replace = pd.DataFrame(response_boomers_replace.stack().value_counts(normalize=True))
summary_millennials_replace = pd.DataFrame(response_millennials_replace.stack().value_counts(normalize=True))
summary_genZ_replace = pd.DataFrame(response_genZ_replace.stack().value_counts(normalize=True))
summary_genX_replace = pd.DataFrame(response_genX_replace.stack().value_counts(normalize=True))
summary_boomers_replace.insert(loc=0, column='type', value=summary_boomers_replace.index)
summary_millennials_replace.insert(loc=0, column='type', value=summary_millennials_replace.index)
summary_genZ_replace.insert(loc=0, column='type', value=summary_genZ_replace.index)
summary_genX_replace.insert(loc=0, column='type', value=summary_genX_replace.index)
# +
dfs_to_join = [summary_genZ_replace, summary_millennials_replace, summary_genX_replace, summary_boomers_replace]
replace_survey_summary = reduce(lambda left,right: pd.merge(left,right,on='type'), \
dfs_to_join)
replace_survey_summary.columns = ['replace_type', 'genZ', 'millennials', 'genX', 'boomers']
#print(replace_survey_summary)
replace_survey_summary['replace_type'] = ['\n'.join(wrap(x, 12)) for x in replace_survey_summary['replace_type']]
ax = replace_survey_summary.plot(x='replace_type', \
kind='bar', \
stacked=True,\
title='What do you believe mobile wallets could replace in the future?', \
mark_right=True)
ax.set(xlabel=None)
ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol='%'))
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
if(width == 0):
continue
#print(width, height)
ax.text(x+width/2,
y+height/2,
'{:.0f} %'.format(height*100),
ha='center',
va='center')
ax.set_yticklabels([])
plot.xticks(fontsize=14, rotation=0)
plot.savefig('replace.png', bbox_inches='tight')
# -
# ## What concerns you about Mobile Payments ?
# #### Concerns options provided in the survey :
# * Setup & Ease of Use
# * Identity Protection
# * Card Security
# * Transaction Security
# * Cash Transaction Traceability
# * Fraudulent Payments
# * Merchant Acceptance
# * None of the above
# +
column5_long_list = pd.concat([df[['age_category']], df['concerns'].str.split(';', expand=True)], axis=1)
#print(column5_long_list)
response_boomers_concerns = column5_long_list \
.loc[column5_long_list['age_category'] == 'Boomers'] \
.loc[:, column5_long_list.columns != 'age_category']
response_millennials_concerns = column5_long_list \
.loc[column5_long_list['age_category'] == 'Millennials'] \
.loc[:, column5_long_list.columns != 'age_category']
response_genZ_concerns = column5_long_list \
.loc[column5_long_list['age_category'] == 'GenZ'] \
.loc[:, column5_long_list.columns != 'age_category']
response_genX_concerns = column5_long_list \
.loc[column5_long_list['age_category'] == 'GenX'] \
.loc[:, column5_long_list.columns != 'age_category']
print('response_boomers_concerns: ', len(response_boomers_concerns))
print('response_millennials_concerns: ', len(response_millennials_concerns))
print('response_genZ_concerns: ', len(response_genZ_concerns))
print('response_genX_concerns: ', len(response_genX_concerns), '\n')
summary_boomers_concerns = pd.DataFrame(response_boomers_concerns.stack().value_counts(normalize=True))
summary_millennials_concerns = pd.DataFrame(response_millennials_concerns.stack().value_counts(normalize=True))
summary_genZ_concerns = pd.DataFrame(response_genZ_concerns.stack().value_counts(normalize=True))
summary_genX_concerns = pd.DataFrame(response_genX_concerns.stack().value_counts(normalize=True))
summary_boomers_concerns.insert(loc=0, column='type', value=summary_boomers_concerns.index)
summary_millennials_concerns.insert(loc=0, column='type', value=summary_millennials_concerns.index)
summary_genZ_concerns.insert(loc=0, column='type', value=summary_genZ_concerns.index)
summary_genX_concerns.insert(loc=0, column='type', value=summary_genX_concerns.index)
# print('summary_boomers_concerns:\n', summary_boomers_concerns, '\n')
# print('summary_millennials_concerns:\n', summary_millennials_concerns, '\n')
# print('summary_genZ_concerns:\n', summary_genZ_concerns, '\n')
# print('summary_genX_concerns:\n', summary_genX_concerns, '\n')
# +
dfs_to_join = [summary_genZ_concerns, summary_millennials_concerns, summary_genX_concerns, summary_boomers_concerns]
concerns_survey_summary = reduce(lambda left, right: left.merge(right, how='outer', on='type'), dfs_to_join)
concerns_survey_summary.columns = ['concerns_type', 'genZ', 'millennials', 'genX', 'boomers']
#print(concerns_survey_summary)
concerns_survey_summary = concerns_survey_summary. set_index ('concerns_type', drop = False )
fig, ax = plot.subplots(1, 1, figsize = (12, 5))
fmt = lambda x,pos: '{:.0%}'.format(x)
sns.heatmap(concerns_survey_summary.loc[:, concerns_survey_summary.columns != 'concerns_type'], \
annot=True,\
fmt = '.1%',\
cbar_kws={'format': mtick.FuncFormatter(fmt)},\
cmap = sns.color_palette("Blues"))
#for t in ax.texts: t.set_text(t.get_text() + " %")
ax.set_ylabel(None)
ax.set_title('What concerns you about Mobile Payments ?')
plot.savefig('concerns.png', bbox_inches='tight')
# -
# ## How do you use Mobile Payments ?
# #### Usage options provided in the survey :
# * Identity Verification (for DL or other ID)
# * Send Cash
# * Mobile App Payments
# * Merchant Transactions (in store)
# * Online Payment Transactions
# * None of the above
# +
column6_long_list = pd.concat([df[['age_category']], df['usage'].str.split(';', expand=True)], axis=1)
#print(column6_long_list)
response_boomers_usage = column6_long_list \
.loc[column6_long_list['age_category'] == 'Boomers'] \
.loc[:, column6_long_list.columns != 'age_category']
response_millennials_usage = column6_long_list \
.loc[column6_long_list['age_category'] == 'Millennials'] \
.loc[:, column6_long_list.columns != 'age_category']
response_genZ_usage = column6_long_list \
.loc[column6_long_list['age_category'] == 'GenZ'] \
.loc[:, column6_long_list.columns != 'age_category']
response_genX_usage = column6_long_list \
.loc[column6_long_list['age_category'] == 'GenX'] \
.loc[:, column6_long_list.columns != 'age_category']
print('response_boomers_usage: ', len(response_boomers_usage))
print('response_millennials_usage: ', len(response_millennials_usage))
print('response_genZ_usage: ', len(response_genZ_usage))
print('response_genX_usage: ', len(response_genX_usage), '\n')
summary_boomers_usage = pd.DataFrame(response_boomers_usage.stack().value_counts(normalize=True))
summary_millennials_usage = pd.DataFrame(response_millennials_usage.stack().value_counts(normalize=True))
summary_genZ_usage = pd.DataFrame(response_genZ_usage.stack().value_counts(normalize=True))
summary_genX_usage = pd.DataFrame(response_genX_usage.stack().value_counts(normalize=True))
summary_boomers_usage.insert(loc=0, column='type', value=summary_boomers_usage.index)
summary_millennials_usage.insert(loc=0, column='type', value=summary_millennials_usage.index)
summary_genZ_usage.insert(loc=0, column='type', value=summary_genZ_usage.index)
summary_genX_usage.insert(loc=0, column='type', value=summary_genX_usage.index)
# +
dfs_to_join = [summary_genZ_usage, summary_millennials_usage, summary_genX_usage, summary_boomers_usage]
usage_survey_summary = reduce(lambda left, right: left.merge(right, how='outer', on='type'), dfs_to_join)
usage_survey_summary.columns = ['usage_type', 'genZ', 'millennials', 'genX', 'boomers']
#print(usage_survey_summary)
usage_survey_summary['usage_type'] = ['\n'.join(wrap(x, 12)) for x in usage_survey_summary['usage_type']]
ax = usage_survey_summary.plot(x='usage_type', \
kind='bar', \
# cmap='rocket', \
# stacked=True,\
edgecolor='black',\
title='How do you use Mobile Payments ?', \
mark_right=True)
ax.set(xlabel=None)
ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol='%'))
ax.set(ylabel=None)
plot.xticks(fontsize=14, rotation=0)
#ax.xaxis.label.set_size(5)
#ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol='%'))
plot.savefig('usage.png', bbox_inches='tight')
# -
| CourseProject/SurveyDataAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Question 4
# +
def isUpper_Or_isLower(letter):
no_of_uppercase = 0
no_of_lowercase = 0
for letters in letter:
if letters.isupper():
no_of_uppercase +=1
elif letters.islower():
no_of_lowercase +=1
return f"No of Uppercase: {no_of_uppercase}, No of Lowercase: {no_of_lowercase}"
Letter = input("Input a word: ")
print( isUpper_Or_isLower(Letter) )
# -
# ## Question 3
# +
def cube(number):
result = number**3
return result
def by_three(number):
if number % 3 == 0:
result = cube(number)
return result
else:
return False
Number = int(input("Input a number: "))
print( by_three(Number) )
# -
# ## Question 1
# +
def shut_down(keyboard_input):
if keyboard_input == "yes":
return "Shutting down"
elif keyboard_input == "no":
return "Shutdown aborted"
else:
return "Sorry, such argument is not welcomed here"
Keyboard_input = input("Do you want to shutdown? : ")
print( shut_down(Keyboard_input) )
# -
# ## Question 2
# +
import pandas as pd
from termcolor import colored
def showEmployee(name,salary):
data = {"Name":name,
"Salary":salary
}
frame = pd.DataFrame(data)
return frame
Name = list()
Salary = list()
for a in range(6):
Name.append(input("Input your name: "))
Salary.append(int(input("Input your salary: ")))
Frame = showEmployee(Name,Salary)
Frame.style.highlight_null(null_color="red")
#print(Frame)
def color(val):
if val > 1000000:
Color = "green"
elif val > 500000 and val < 1000000:
Color = 'blue'
else:
Color = "red"
return "color: %s" % Color
def highlight(value):
if value.Salary > 1000000:
return ["color: green"]*2
elif value.Salary > 500000 and value.Salary < 1000000:
return ["color: blue"]*2
elif value.Salary < 500000:
return ["color: red"]*2
else:
return ["color: red"]*2
#print(Frame.style.applymap(color,subset=["Salary"]))
Frame.style.apply(highlight,axis=1)
#Frame.to_csv("Employee_register.csv", index=False)
# -
| Exercise(06_05_2021).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pré-processamento de dados e validação
#
# Oi pessoal!
#
# Nessa aula, vamos falar um pouco sobre pré-processamento de dados e validação de modelos! Vamos aprender:
# - Como processar seus dados para entegá-los da melhor forma para seu modelo.
# - A prever quem vive e quem morre em um acidente marítimo.
# - Métricas de avaliação.
# - Estratégias de validação.
# - Como submeter em uma competição do Kaggle.
#
# Nós vamos passar por diversas técnicas e colocá-las em prática em um dos datasets mais clássicos: Sobreviventes do Titanic!
#
# Espero que vocês gostem, não esqueçam de deixar o like e adicionar o notebook aos seus favoritos.
#
# ### *IMPORTANTE BAIXAR OS DADOS*
#
# Para baixar os dados você vai precisar se cadastrar no Kaggle - explico no próximo tópico o que é esse site.
#
# Os dados podem ser baixados em: https://www.kaggle.com/c/titanic/data
# Nesse link você também encontra uma descrição sobre cada uma das features do dataset.
#
# Três arquivos serão fornecidos:
#
# - Dados de treino: conjunto de dados dos quais sabemos a resposta.
# - Dados de teste: conjunto de dados para o qual faremos nossas previsões.
# - Exemplo de submissão: Arquivo que nos mostra o formato de uma submissão para essa competição.
# ## O que é o Kaggle?
#
# O Kaggle é a maior comunidade de Ciência de Dados do mundo. Nele, você encontra diversos datasets, estudos de pessoas do mundo todo e ainda pode participar de competições organizadas pelo site - muitas delas valendo dinheiro.
#
# Nosso objetivo é que, ao fim dessa aula, você faça sua primeira submissão em uma competição do Kaggle. A competição é a do [Titanic](https://www.kaggle.com/c/titanic/overview), e ela foi criada apenas para fins educacionais.
# ## Importando os dados
#
# Vamos começar importando os dados que acabamos de baixar e dando uma primeira olhada neles. Nós vamos assumir que vocês ja sabem Pandas mas, qualquer dúvida que surgir, não deixem de dar uma olhada na aula específica do assunto, pesquisar no Google (a documentação dessas bibliotecas é bem feita) ou perguntar pra gente!
import numpy as np
import pandas as pd
# +
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
combine = [train, test]
# -
train.head()
# ## Tipos de variáveis
#
# Vamos começar entendendo melhor sobre o nosso material de estudo: os dados. Um dataset pode ser visto como um conjunto de *data objects*, normalmente chamados de instâncias, observações, vetores, linhas ou exemplos. Esses são cada uma das linhas do nosso conjunto de dados.
#
# Cada um desses *data objects* é definido por um conjunto de varíaveis - ou *features*. Que expressam, de diferentes maneiras, as características da nossa instância. Ao descrever uma sala de aula, por exemplo, podemos ter as seguintes features: localização, capacidade, quadro negro ou branco, quantidade de tomadas, número de portas, se possui um ar condicionado. A ideia é que essas variáveis sejam capazes de descrever as caracteristicas básicas das salas.
#
# Existem diferentes tipos de variáveis, eles são:
#
# - Numéricas
# - Discretas (representam contagens) e.g. número de cadeiras em uma sala
# - Contínuas (assumem valores em um intervalo contínuo) e.g preço de uma passagem
# - Categóricas
# - Ordinais (a ordem importa) e.g. tamanho de roupa (P < M < G)
# - Nominais (não existe ordem entre os valores) e.g. cor da roupa
#
# Vamos identificar agora os tipos de variáveis que temos no dataset do Titanic. Todo *DataFrame* possui um atributo chamado **dtypes**, que nos mostra o tipo de cada variável.
train.dtypes
# Agora nós sabemos de que tipo cada variável é. O tipo *object* indica que a variável é uma string, que trataremos como variáveis categóricas.
#
# É importante perceber que nem toda variável númerica é **realmente** numérica. Pegue a variável *Pclass*, por exemplo, seus valores são: 1a, 2a ou 3a classe. Esses valores já foram processados utilizando uma das técnicas que veremos nesse notebook e, por isso, ela agora é tratada como um número pelo código, embora permaneça sendo uma feature categórica.
#
# Para entender de verdade o tipo de cada variável, o melhor que podemos fazer é ler a documentação que é normalmente fornecida com os dados. Abaixo, separaremos as variáveis nos tipos que mencionamos anteriormente.
#
# **Quais variáveis são numéricas?**
# Contínuas: Age, Fare.<br> Discretas: SibSp, Parch.
#
# **Quais variáveis são categóricas?**
# Nominais: Survived, Sex, and Embarked.<br> Ordinais: Pclass.
# ## Valores faltantes
#
# Antes de começarmos a criar nossas features - ou fazer qualquer coisa - é bom sempre dar uma olhada no nosso conjunto de dados. Podemos ver se nenhum dado está faltando, verificar se os dados fazem sentido, se estão consistentes e coisas do tipo.
#
# Além disso, é uma boa ideia fazer perguntas e buscar as respostas no seu dataset, assim como criar gráficos que te façam entender melhor seus dados e que forneçam insights sobre eles. Essa etapa do processo é chamada de Análise Exploratória de Dados (EDA).
#
# Como o foco da aula é no pré-processamento, vamos apenas tratar dos valores faltantes pois eles podem nos trazer alguns problemas mais pra frente. Utilizando os comandos mostrados abaixo, você pode descobrir quantos valores estão faltando em cada coluna do seu dataset.
train.isna().sum()
test.isna().sum()
# Existem diversas formas de preencher os *missing values*, por exemplo:
# - Média ou mediana de toda a coluna.
# - Agrupar os dados por outras features e pegar média ou mediana depois. Por exemplo, digamos que queremos preencher valores vazios de idade. É possível que pessoas de diferentes classes tenham médias de idades diferentes, então, na hora de preencher podemos olhar para a média ou mediana da idade daqueles que estão na mesma classe que nosso exemplo com idade vazia está.
# - Preencher com 0.
# - Excluir as linhas que contém valores nulos.
#
# Todas essas formas são válidas e tem seus prós e contras. Por motivo de simplicidade, vamos preencher os valores faltantes em Age e Fare com a mediana da coluna e em Embarked com o valor mais frequente. Não vamos preencher Cabin pois não usaremos essa feature.
# +
embarked_mode = train['Embarked'].mode()[0]
train['Embarked'] = train['Embarked'].fillna(embarked_mode)
test['Embarked'] = test['Embarked'].fillna(embarked_mode)
train_median_fare = train['Fare'].dropna().median()
train['Fare'] = train['Fare'].fillna(train_median_fare)
test['Fare'] = test['Fare'].fillna(train_median_fare)
train_median_age = train['Age'].dropna().median()
train['Age'] = train['Age'].fillna(train_median_age)
test['Age'] = test['Age'].fillna(train_median_age)
# -
# ## Feature engineering
#
# Protinho, agora podemos começar a criar algumas features básicas.
#
# Uma das partes mais importantes do pré-processamento de dados é a criação de features que ajudem seu modelo a encontrar padrões mais facilmente. Nessa parte do notebook, nós vamos criar algumas novas variáveis e analisar como elas se relacionam com a sobrevivência dos passageiros.
#
# As features criadas aqui foram retiradas de: https://www.kaggle.com/startupsci/titanic-data-science-solutions
# ### Título do passageiro
#
# Se pensarmos bem, o nome de um indivíduo não tem relação alguma com suas chances de sobrevivência. Podemos então retirar essa coluna?<br>Bom, talvez. Vamos dar uma olhada em alguns dos nomes.
train['Name'].head()
# Os nomes em nossos conjuntos de dados são únicos, mas os títulos dos indivíduos (Mr., Mrs., Miss) se repetem. Vamos isolar os títulos de cada nome para analisarmos essa fato. A tabela abaixo mostra, para cada título, quantos homens e quantas mulheres o possuem.
#
# O método *Series.str.extract()* extrai texto se baseando na regex utilizada.
# +
for df in combine:
df['Title'] = df['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train['Title'], train['Sex'])
# -
# Antes de ver a relação dos títulos com sobrevivência, vamos agrupá-los. Talvez seja uma boa ideia deixar todos os mais raros em um único grupo, então vamos fazer isso.
# +
for df in combine:
df['Title'] = df['Title'].replace(['Lady', 'Countess','Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Sir',
'Jonkheer', 'Dona'], 'Rare')
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# -
# Muito interessante. O título de um passageiro realmente influencia sua chance de sobrevivência. Podemos manter essa feature que criamos e excluir a feature *Name*.
# +
train = train.drop(['Name'], axis=1)
test = test.drop(['Name'], axis=1)
combine = [train, test]
# -
# ### O passageiro está sozinho?
#
# Uma pessoa que está sozinha tem mais chances de sobreviver? Abaixo criaremos uma feature que indica o tamanho da família do passageiro que está a bordo, e uma outra feature para indicar se um indivíduo está sozinho ou não.
# +
for df in combine:
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['IsAlone'] = 0
df.loc[df['FamilySize'] == 1, 'IsAlone'] = 1
train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# -
# Aparentemente não. Você consegue imaginar um motivo para isso?
#
# Vou excluir as features SibSp e Parch pois acho que as features que criamos já nos dão bastante informação. Você pode escolher mantê-las se preferir ou até mesmo testar posteriormente de que forma seu modelo foi melhor.
# +
train = train.drop(['SibSp', 'Parch'], axis=1)
test = test.drop(['SibSp', 'Parch'], axis=1)
combine = [train, test]
# -
# ### Como ficaram nossos datasets?
#
# Vamos dar uma olhada nos nossos dois datasets (treino e teste) após a criação das features.
print(train.shape, test.shape)
train.head()
test.head()
# Algumas dessas variáveis não nos dão muita informação, então podemos excluí-las. São elas *Ticket* e *Cabin*.
# +
train = train.drop(['Ticket', 'Cabin'], axis=1)
test = test.drop(['Ticket', 'Cabin'], axis=1)
combine = [train, test]
# -
train.head()
# ## Feature encoding
#
# O modelo que usaremos só aceita variáveis numéricas, por isso, precisaremos transformar nossos dados de alguma forma. Existem várias maneiras de fazer isso e nós vamos dar uma olhada em duas delas.
# ### Label encoding
#
# Método que mapeia cada categoria em um número. É mais utilizado com métodos de árvore.
#
# OBS: O For não é necessário aqui pois estamos encodando apenas uma variável, mas preferi deixar uma forma mais geral.
from sklearn.preprocessing import LabelEncoder
# +
label_encoder_columns = ['Embarked']
for column in label_encoder_columns:
# cria um encoder
label_encoder = LabelEncoder()
# cria um dicionario para o encoder
label_encoder.fit(train[column])
# aplica as transformações nos datasets
train[column] = label_encoder.transform(train[column])
test[column] = label_encoder.transform(test[column])
# -
train.head()
# Como podemos ver, *Embarked* agora possui apenas valores numéricos. Caso seja necessário voltar aos valores iniciais em algum ponto do código, basta usar o método *LabelEncoder.inverse_transform()*. Abaixo temos um exemplo de como isso funciona.
label_encoder.inverse_transform(train['Embarked'])[0:5]
# ### One-hot encoding
#
# É um método que cria, para cada categoria, uma coluna com valores binários indicando a presença ou não da categoria na instância.
#
# - Normalmente utilizado com métodos lineares, kNN e redes neurais.
# - É necessário tomar cuidado caso existam muitos valores diferentes de categoria.
#
# Evita que o modelo pense algo como: Mrs. > Miss > Mr. Deixa mais clara para o modelo a independência das categorias. Vamos aplicar em algumas colunas apenas para exemplificar. Para fazer isso podemos usar a função *pd.get_dummies()*
#
# #### Exemplo de retorno da função
pd.get_dummies(train['Title'])
# #### Aplicação
# +
one_hot_columns = ['Sex', 'Title']
one_hot = pd.get_dummies(train[one_hot_columns])
train = pd.concat([train, one_hot], axis=1).drop(one_hot_columns, axis=1)
one_hot = pd.get_dummies(test[one_hot_columns])
test = pd.concat([test, one_hot], axis=1).drop(one_hot_columns, axis=1)
# -
train.head()
# ## Feature scaling
#
# Alguns algorítmos são sensíveis a escala dos dados e, por essa razão, precisamos colocar todos os dados na mesma escala para que esses modelos funcionem da melhor forma possível. Vamos dar uma olhada em dois dos métodos mais comuns para fazer isso.
#
# É importante saber que uma Árvores de Decisão não é um desses modelos sensíveis, então só aplicaremos esses métodos aqui para exemplificação.
# Como queremos aplicar essas tranformações nos nossos dados de treino e teste, é necessário que o Scaler tenha uma visão geral dos dados na hora de se adequar a eles, por isso, criaremos um dataset que concatena train e test.
df_concat = pd.concat([train.drop(['Survived'], axis=1), test], axis=0)
# ### Normalização
#
# Faz com que todos os valores fiquem no intervalo [0, 1]:
#
# $
# \begin{align}
# {x_{i}}' = \frac{x_{i} - min(x)}{max(x) - min(x)}
# \end{align}
# $
#
# Podemos aplicar na coluna *Fare*.
from sklearn.preprocessing import MinMaxScaler
# +
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(df_concat[['Fare']])
train[['Fare']] = scaler.transform(train[['Fare']])
test[['Fare']] = scaler.transform(test[['Fare']])
# -
# ### Padronização
#
# Faz com que os valores tenham média 0 e desvio padrão 1:
#
# $
# \begin{align}
# {x_{i}}' = \frac{x_{i} -\mu}{\sigma}
# \end{align}
# $
#
# Vamos utilizar a padronização na feature *Age* para exemplificar.
#
# Explicação (em inglês, infelizmente) da importância de normalizar os padronizar os dados: https://humansofdata.atlan.com/2018/12/data-standardization/
from sklearn.preprocessing import StandardScaler
# +
scaler = StandardScaler()
scaler.fit(df_concat[['Age']])
train[['Age']] = scaler.transform(train[['Age']])
test[['Age']] = scaler.transform(test[['Age']])
# -
# ## Resultado final
#
# Vamos dar uma olhada final nos nossos dados antes se seguirmos em frente!
train.head()
test.head()
# ## Métricas de avaliação
# Antes de começarmos a utilizar modelos para prever o futuro dos nossos passageiros, precisamos estudar um pouco sobre como avaliar o modelo que criarmos.
#
# Métricas de avaliação são super importantes pois nos dão um valor que representa o quão bem nosso modelo está se saindo.
#
# A métrica utilizada depende se estamos tratando de um problema de classificação ou regressão. Vamos ensinar algumas para cada um dos casos:
#
# - Classificação
# - Acurácia
# - Precision e Recall
# - F-measure
# - AUC-ROC Curve
# - Regressão
# - Erro quadrático médio (MSE)
# - R²
# ### Matriz de confusão
#
# É uma medida de performance para modelos de classificação. É uma tabela com a combinação de valores previstos pelo modelo e valores reais:
#
# <img src="https://miro.medium.com/max/712/1*Z54JgbS4DUwWSknhDCvNTQ.png" alt="confusion matrix">
#
# Vamos entender o que cada uma das siglas representa fazendo uma analogia com um teste de alguma doença:
#
# - TP (True Positive): Seu modelo previu positivo e estava correto. Esse caso seria equivalente a você **estar** doente e receber um **teste positivo**.
#
# - TN (True Negative): Seu modelo previu negativo e estava correto. Esse caso é equivalente a você **não estar** doente e receber um **teste negativo**.
#
# - FP (False Positive): Seu modelo previu positivo e estava incorreto: Esse caso é equivalente a você **não estar** doente e receber um **teste positivo**.
#
# - FN (False Negative): Seu modelo previu negativo e estava incorreto. Esse caso é equivalente a você **estar** doente e receber um **teste negativo**.
#
# Nossas previsões corretas ficam todas na diagonal principal da matrix.
#
# É importante perceber que a matrix de confusão pode ser usada para problemas de classficação de mais de 2 classes. Abaixo, temos um exemplo da matrix de confusão sendo usada em um problema de classificação de 3 classes:
#
# <img src="https://scikit-learn.org/stable/_images/sphx_glr_plot_confusion_matrix_001.png" alt="confusion matrix">
#
#
# ### Precision e Recall
#
# **Precision**<br>
# Qual a proporção de previsões positivas realmente corretas?
#
# $
# \begin{align}
# Precision = \frac{TP}{TP+FP}
# \end{align}
# $
#
# Um modelo sem falso positivo tem uma precisão de 1.0.
#
# **Recall**<br>
# Qual a proporção de observações realmente positivas foi identificada corretamente?
#
# $
# \begin{align}
# Recall = \frac{TP}{TP+FN}
# \end{align}
# $
#
# Um modelo sem falso negativo tem recall de 1.0.
# ### Acurácia
#
# Indica a porcentagem de predições corretas que o modelo realizou.
# Pode ser expressa da seguinte forma:
#
# $
# \begin{align}
# Accuracy = \frac{TP+TN}{Total}
# \end{align}
# $
#
# Pode enganar caso o número de ocorrências de cada classe esteja desbalanceado.
# ### F-measure
#
# É uma medida de acurácia que leva em conta precision e recall ao mesmo tempo. Ela é dada por uma média harmônica das duas medidas:
#
# $
# \begin{align}
# F_{1} = \frac{1}{recall^{-1} + precision^{-1}} = 2 \cdot \frac{recall \cdot precision}{recall+precision}
# \end{align}
# $
# ### AUC (Area under the curve)
#
# Área abaixo de qual curva? Da curva **ROC** (Receiver operating characteristic).
#
# E como funciona essa curva?
#
# - Essa curva indica a relação entre as taxas de falsos positivos e verdadeiros positivos
# - Nosso modelo terá como saída a probabilidade de classificar os dados como da classe 1
# - Vamos supor que se P(Y=1) for > 0.5, dizemos que aquela instância é da classe 1, caso contrário classe 0.
# - Isso irá nos dar uma taxa de falsos positivos e uma taxa de verdadeiros positivos e isso é um ponto na curva ROC.
# - Agora escolhemos outros thresholds - diferentes de 0.5. Cada um desses gera um novo ponto na curva ROC.
#
# <img src="https://miro.medium.com/max/751/1*RqK5DjVxcj4qZsCdN4FOSQ.png" alt="auc-roc">
#
# Essa metrica não é sensível a distribuição das classes e, por essa razão, é comumente utilizada como métrica de diversas competições.
#
# É importante notar que um **classificador aleatório** possui uma ROC-AUC de 0.5.
# ### Mean Squared Error
#
# Método mais simples para avaliação de modelos de regressão. Basicamente, para cada instância, calculamos o quadrado da diferença entre o que deveríamos prever e o que previmos e então tiramos a média:
#
# $
# \begin{align}
# MSE = \frac{1}{N} \sum_{i=0}^{N}(f(x_{i}) - y_{i})^{2}
# \end{align}
# $
# ### R² Score
#
# Outro método para avaliar modelos de regressão. É também chamado de coeficiente de determinação e tem todos seus valores no intervalo [0,1].
#
# $
# \begin{align}
# SStot = \sum_{i=0}(y_{i} - \bar{y}_{i})^{2}
# \end{align}
# $
#
# $
# \begin{align}
# SSres = \sum_{i=0}(y_{i} - \hat{y}_{i})^{2}
# \end{align}
# $
#
# $
# \begin{align}
# R^{2} = 1 - \frac{SSres}{SStot}
# \end{align}
# $
# ## Estratégias de validação
#
# Com nossa métrica escolhida em mãos, chegou a hora de criar e validar nosso modelo!
#
# Vamos aprender agora dois métodos de validação e, depois de validar nosso modelo, faremos uma submissão no Kaggle para ver o quanto nossa validação se aproxima do nosso resultado real no conjunto de teste.
#
# Antes disso, vamos só preparar nossos dados para os processos.
# +
train = train.drop(['PassengerId'], axis=1)
pass_ids = test['PassengerId']
test = test.drop(['PassengerId'], axis=1)
# -
X = train.drop(['Survived'], axis=1)
y = train['Survived']
X.shape
test.shape
# Perceba que agora temos o X e o test com as mesmas dimensões, isso vai facilitar as coisas para nós.
# ### Hold Out
#
# Nessa estratégia, nós dividiremos nosso conjunto de dados em dois conjuntos. Usaremos um deles para treinar nosso modelo (conjunto de treino) e o outro para calcular a métrica de avaliação (conjunto de validação).
#
# Nós sabemos a resposta correta para todo o nosso conjunto de dados mas vamos fingir que não sabemos para uma parte dele, usar nosso modelo pra prever essas respostas e depois comparar as predições com as respostas verdadeiras.
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
# +
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
preds = model.predict(X_val)
print(f'Accuracy = {accuracy_score(y_val, preds)}')
# matriz de confusão do nosso modelo
confusion_matrix(y_val, preds)
# -
# ### k-Fold Cross Validation
#
# Nessa estratégia vamos dividir nossos dados em K conjuntos. Em cada iteração, utilizaremos K-1 desses conjuntos para treinamento e um deles para validação (variando qual conjunto é utilizado para validação em cada iteração). Depois disso, calculamos a média das iterações.
from sklearn.model_selection import KFold
# +
NFOLDS = 5
folds = KFold(n_splits=NFOLDS)
columns = X.columns
score = 0
# criando os folds
splits = folds.split(X, y)
# para cada fold, pegaremos os index de treino e index de validação
for fold_n, (train_index, valid_index) in enumerate(splits):
X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train, y_train)
y_pred_valid = model.predict(X_valid)
print(f"Fold {fold_n + 1} | Accuracy: {accuracy_score(y_valid, y_pred_valid)}")
score += accuracy_score(y_valid, y_pred_valid)
print(f"\nMean accuracy = {score/NFOLDS}")
# -
# A ideia é utilizar a validação para escolher o melhor modelo e seus respectivos parâmetros.
# ## Prevendo para o teste e submetendo no Kaggle
#
# Chegou a hora de usar o modelo que criamos e prever a resposta para os dados que não temos resposta!
# treinando o modelo com todos os dados
model = DecisionTreeClassifier(max_depth=3)
model.fit(X, y)
# usando o arquivo de exemplo para
sub = pd.read_csv('gender_submission.csv')
test_preds = model.predict(test)
sub['Survived'] = test_preds
sub.head()
sub.to_csv('submission.csv', index=False)
# Uma vez que você tem seu arquivo de predições, basta seguir o tutorial [nessa página](https://www.kaggle.com/c/titanic/overview) para aprender a submeter no site! E pronto!
#
# ### *Parabéns!!!*
#
# Infelizmente, você vai perceber que nossa pontuação ainda não foi muito alta. Na verdade, ela foi só um pouco melhor que a submissão fornecida como exemplo. Mas tudo bem, nós ainda podemos melhorar muito! E isso nos leva ao nosso penúltimo tópico...
# ## É hora de ser criativo e botar a mão na massa!
#
# Crie um novo notebook e faça as coisas do seu jeito! Esse notebook serviu apenas para te ensinar algumas técnicas, mas existe muito mais coisas para fazer. Crie novas features, processe os dados de forma diferente, teste e aprenda.
#
# Uma boa fonte de aprendizado é a [sessão de Notebooks](https://www.kaggle.com/c/titanic/notebooks) dessa competição no Kaggle. Lá, competidores vão mostrar toda a sua solução e você pode aprender muito com eles. Não deixe de dar uma olhada.
# ## Considerações finais
#
# Essa aula abordou diversos tópicos de pré-processamento dos dados e validação dos modelos, levando a uma solução, não ótima, mas completa do dataset do Titanic. É importante ter sempre em mente que cada tipo de modelo prefere os dados de uma certa forma quando realizando um pré-processamento. As técnicas ensinadas aqui podem te ajudar a processar os dados pra vários modelos.
#
# Espero que essa aula tenha sido útil pra você. Não deixe de nos dar seu feedback, nós também estamos apredendo. Além disso, se você tiver qualquer dúvida, é só chamar no Telegram ou no Slack!
#
# Obrigado pela atenção!
| Aula4/Pré-processamento e validação.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Querying a data frame
#vamos começar com um exemplo
import pandas as pd
df = pd.read_csv('resources/week-1/datasets/Admission_Predict.csv', index_col = 0)
df.head()
df.columns = [x.lower().strip() for x in df.columns]
df.head()
# +
#mascaras booleanas são criadas aplicando operadores diretamente nos objetos dataframe ou series do pandas
#por exemplo, no dataset graduate_admission poderíamos estar interessados em ver apenas estudantes
#que tem chance de admissão maior que 0.7
#para construir uma mascara booleana para essa query, queremos projetar a chance de admissão usando o operador
#indexado e aplicando o operador maior que > como comparação do valor de 0.7.
#a série resultante é indexada onde o valor de cada célular é Verdadeiro ou Falso dependendo se o estudane
#tem chance de admissão maior que 0.7
admit_mask = df['chance of admit'] > 0.7
admit_mask.head()
# +
#e agora oque fazemos com o resultado da mascara booleana?
#nós colocamos por cima dos dados para 'esconder' os dado que nós não queremos, que são representados como Falso
#isso é feito usando a função .where() no dataframe original
df.where(admit_mask).head()
# +
#notamos que os valores representados por True aparecem normais, os que são False vêm acompanhado com NaN
#se não quisermos esses dados NaN usamos a função dropna()
df.where(admit_mask).dropna().head()
# +
#o where é pratico mas não muito usado, daí os devs do pandas criaram outro operador para fazer isso
df[df['chance of admit'] > 0.7].head()
# -
| Querying a data frame.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Exercise: A Small Example for Linear Regression with Numpy
#
# ** A small scale example **
#
#
# by
#
# [__<NAME>__ (<EMAIL>)]( http://www.mendeley.com/profiles/michael-granitzer/)
#
#
# __License__
#
# This work is licensded under a [Creative Commons Attribution 3.0 Unported License](http://creativecommons.org/licenses/by/3.0/)
#
# The table below describes four cars by their age and stopping distance for a full braking at 100km/h till stop
#
# |Car |Age | Mileage |Stopping Distance|
# |------------|-----|------------|-----------------|
# |Wartburg |5 |30530 |50|
# |Moskvich |7 |90000 |79|
# |Lada |15 | 159899 |124|
# |Trabi |28 | 270564 |300|
# |Average |13,75|137748,25 |138,25|
#
#
# ## Question 1: Determine the weights $w_i$ for the linear regression for the age variable.
#
# We first create the data, than take the equation from the [lecture slides, pg 58-59](http://www.uni-weimar.de/medien/webis/teaching/lecturenotes/machine-learning/unit-en-regression.pdf). The derivation from the equation can be found at the lecture notes from [Prof. Kirchner](http://seismo.berkeley.edu/~kirchner/eps_120/Toolkits/Toolkit_10.pdf)
#
#Creating the data and python setup
# %pylab inline
#the data
x=np.array([[5,30530],[7,90000],[15,159899],[28,270564]])
y=np.array([50,79,124,300])
print "Independent variables:"
print x, type(x)
print "Dependent variable:"
print y, type(y)
# Estimating $w_0$ and $w_1$ is done as
#
# $w_0=\bar{x}-w_1\bar{y}$
#
# $w_1 =\frac{Cov(x,y)}{Var(x)}=\frac{\frac{1}{n} \sum_{i=1}^{n} (x_i-\bar{x})(y_i-\bar{y})}{\frac{1}{n}\sum_{i=1}^{n} (x_i-\bar{x})^2}$
#
# +
def w0_estimate1D(x,y,w1):
x_bar=np.average(x)
y_bar=np.average(y)
return y_bar-x_bar*w1
def w1_estimate1D(x,y):
x_bar=np.average(x)
y_bar=np.average(y)
#note that multiplication between the two vector is pointwise
return np.sum((x-x_bar)*(y-y_bar))/np.sum((x-x_bar)**2)
w1=w1_estimate1D(x[:,0],y)
w0=w0_estimate1D(x[:,0],y,w1)
print w0, w1
# -
# ## Question 2: Draw a scatter plot of the data and the linear regression for a variable of your choice.
#
#
# Now lets plot the results and the parameters. Did we do a correct calculation?
#
xlim((0,35))
ylim((-10,310))
plot(x[:,0],y,'o')
plot(np.arange(0,35,1),np.arange(0,35,1)*w1+w0,'-r')
show()
# **Some Numpy details**
# To see a little bit more of numpy's behaviour lets investigate the numpy details on that simple example:
# +
#Calculating w1 covariance requires the sum of two vectors
x1d = x[:,0]
x_bar = np.average(x1d)
y_bar=np.average(y)
np.sum((x1d-x_bar)*(y-y_bar))
#we first calculate a new list containtin (x1d-x_bar)*(y-y_bar)
l = (x1d-x_bar)*(y-y_bar)
print 'pointwise multiplication',l.shape, l
#From this we take the sum.
print 'sum from pointwise multiplication',sum(l)
# A valid second approach would be using the inner product
# Note: inner product is multiplication and sum at once.
print 'inner product',(x1d-x_bar).dot((y-y_bar))
# -
# ## Question 3: Consider the mileage of the cars as an additional variable and estimate the regression parameters
#
# So this requires us to use the matrix equation derived in [slide 60](http://www.uni-weimar.de/medien/webis/teaching/lecturenotes/machine-learning/unit-en-regression.pdf):
#
# $$ w=(X^TX)^{-1}X^Ty $$
#
# Note that we have to include a bias term below
import numpy.linalg as la
#include the bias term in the data
x=np.array([[1, 5,30530],[1,7,90000],[1,15,159899],[1,28,270564]])
y=np.array([50,79,124,300])
#now estimate parameters
w = la.inv(x.T.dot(x)).dot(x.T).dot(y)
print w
# ## Question 4: What are the pitfalls of the extrapolation
#
# The model is linear, which means that extrapolation is not constrained. So given the example of a 40 year old car, it is unlikely that the distance to stop will increase beyond 300 meters.
#
# Moreover, there may not be a linear relationship at all between the age and the stopping distance. For example can a newly bought Travi stop in 3 meters (according to our first model) when it is new (1st year)? Most likely not, since not even a Formula 1 car could do it.
#
# Note however, that linear models can approximate non-linear ones in a small intervall, but not far beyond those intervals (think of a taylor series expansion of a function)
#
#
#car in its first year stops at
print "Car in its year %f stops at %f meters"%(1,1*w1+w0)
| II.ML-and-DM/II.ML-DM-Simple-Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 4
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path="stud loan.csv"
data=pd.read_csv(path)
data.shape
data.head()
plt.scatter(data['age'],data['terms'],c='crimson')
plt.xlabel('Age in years')
plt.ylabel('Terms')
plt.show()
plt.hist(data['Principal'])
plt.hist(data['age'],
color = 'green',
edgecolor = 'orange',
bins = 5)
plt.title('Histograms of age')
plt.xlabel('Age in years')
plt.ylabel('No of customers')
plt.show()
counts = [979, 120, 12]
fuelType = ('Petrol', 'Diesel', 'CNG')
index = np.arange(len(fuelType))
plt.bar(index, counts, color=['red', 'blue', 'cyan'])
plt.title('Bar plot of fuel types')
plt.xlabel('Fuel Types')
plt.ylabel('Frequency')
plt.xticks(index, fuelType, rotation = 90)
plt.show()
import seaborn as sns
sns.set(style="darkgrid")
sns.regplot(x=data['age'],y=data['terms'])
sns.distplot(data['age'])
sns.distplot(data['age'],kde=False)
sns.distplot(data['age'],kde=False,bins=5)
sns.countplot(x="terms", data=data)
sns.countplot(x="terms", data=data, hue = "Gender")
#box plot diagram
sns.boxplot(y=data["age"])
#muliple data representation on box plot
sns.boxplot(x=data['Principal'], y=data["terms"])
sns.boxplot(x = "Principal", y=data["terms"], hue = "age",data=data)
f,(ax_box, ax_hist)=plt.subplots(2, gridspec_kw={"height_ratios":(.20,.80)})
| 18CSE010-Assignment 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PythonData]
# language: python
# name: conda-env-PythonData-py
# ---
# # Econmic Model 3: Predict Retail with Year, Brent, ICO Composite
# ## Random Forest Decision Tree
# This model employs a Random Forest Regressor Decision Tree algorithm to predict retail coffe price bassed on economic metrics such as supply, trade, inventories, consumption, energy costs, and commodity pricing data. Data was obtained from International Coffee Organization (The ICO Historic Data).
#
# ### Goal
# - Train model with Time and ICO spot price to predict retail price
# - Three imputs: Year, Brent crude, ICO SPOT
# - User input: Year + Brent crude + ICO Spot = Retial price (for year)
# Example
#
# Enter 2021 (Year)
#
# Enter 100 (Annual Brent Crude)
#
# Enter 1.09 (ICO composite (USD $/lb))
#
# Output: Average global retail price
#
# ### Scoring Metrics:
# - Training Data Score: 0.9597823614739235
# - Testing Data Score: 0.8959434042614792
# - MSE: 0.12347079250000448
# - RMSE: 0.06173539625000224
# Dependencies
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
from sklearn import tree
import os
import matplotlib.pyplot as plt
# Machine learning libraries
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# import processed data
path = "Trends/"
file = "finalFeatures.csv"
path_file = path + file
df = pd.read_csv(path_file)
df
df.shape
three_features_df = df.drop(columns= ['Coffe Imports (In 1K 60-kg bags) Calendar Year 137c',
're-exports (In 1K 60-kg bags) Calendar Year 35c',
'Inventories (In 1K 60-kg bags)',
'Coffe Export (In 1K 60-kg bags) crop year',
'Coffe Produciton (In 1K 60-kg bags) crop year',
'Domestic Consumption (In 1K 60-kg bags)',
'WTI ave closing price USD'])
three_features_df.head()
Years = df["Year"]
# needed for ploting final graph
# Years
# +
# Create dataframe for running decision tree
# This was done to upload existing code with similar variables
# Main Data set: data_df
x = three_features_df.drop("retail", axis=1)
y = three_features_df["retail"]
target = y
data_df = x
print(x.shape)
print(y.shape)
# -
# ### RandomForestRegressor
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=42)
#from sklearn.ensemble import RandomForestRegressor
Econ_model_Three_Features = RandomForestRegressor(n_estimators=200)
Econ_model_Three_Features = Econ_model_Three_Features.fit(X_train, y_train)
Econ_model_Three_Features.score(X_test, y_test)
print('Test Acc: %.3f' % Econ_model_Three_Features.score(X_test, y_test))
print(f"Training Data Score: {Econ_model_Three_Features.score(X_train, y_train)}")
print(f"Testing Data Score: {Econ_model_Three_Features.score(X_test, y_test)}")
# +
#sorted(zip(Econ_model_twoFeatures.feature_importances_, feature_names), reverse=True)
# +
# Model stats and needed for ploting first graph
# +
ypred = Econ_model_Three_Features.predict(X_test)
mse = mean_squared_error(y_test, ypred)
print("MSE: ", mse)
print("RMSE: ", mse*(1/2.0))
# Taken from refrence
# Target MSE: 0.130713987032462
# Target RMSE: 0.065356993516231
# -
print(f"train set shape: {y_test.shape}")
print(f"Test set shape: {ypred.shape}")
x_ax = range(len(y_test))
plt.plot(x_ax, y_test, linewidth=2, label="original", color ="green")
plt.plot(x_ax, ypred, linewidth=3.0, label="predicted", color = "red")
plt.title("y-test and y-predicted data")
plt.xlabel('X-axis (# Retail data points)')
plt.ylabel('Y-axis (Retail values: USD/lb)')
plt.legend(loc='best',fancybox=True, shadow=True)
plt.grid(True)
plt.show()
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'z3_Economic_web_model_3_inputs.sav'
joblib.dump(Econ_model_Three_Features, filename)
# +
import pickle
filename = 'pickel3_Economic_web_model_3_inputs.pickle'
with open(filename, "wb") as f:
pickle.dump(Econ_model_Three_Features, f)
# +
# Set up for desired graph
# -
y_pred_bs = Econ_model_Three_Features.predict(data_df)
data_df.head(1)
# +
x_ax = Years
plt.plot(x_ax, target, linewidth=2, label="Original", color ="green")
plt.plot(x_ax, y_pred_bs, linewidth=3.0, label="Predicted", color = "red")
plt.title("Predicted Retail Price: Inputs (Year, Brent ICO composite)")
plt.xlabel('1990-2018')
plt.ylabel('Retail values (USD/lb)')
plt.legend(loc='best',fancybox=True, shadow=True)
plt.grid(True)
plt.savefig('Econimic_model.png')
plt.show()
# -
print(f"Training Data Score: {Econ_model_Three_Features.score(X_train, y_train).round(decimals=4)}")
print(f"Testing Data Score: {Econ_model_Three_Features.score(X_test, y_test).round(decimals=4)}")
# # web model development
three_features_df.head(10)
input_test_1 = three_features_df.head(1)
User_Test_df = (input_test_1).drop("retail", axis=1)
input_test_1
# ### Enter data below to test model
# Enter data hear to test model
#print(User_Test_df.dtypes)
test_model_data = (1999, 17.90, 0.86)
test_model_data
user_test_data = np.array([[test_model_data]])
test_outpout = Econ_model_Three_Features.predict(User_Test_df)
print(f"Input (Year: Brent: ICO spot): {test_model_data}")
print(f"Predicted Retail Price: {test_outpout} USD/LB")
| Paul_coffee/00_3_Econ_model_3features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/patrickjwolf/DS-Unit-2-Kaggle-Challenge/blob/master/module2/Patrick_Wolf_assignment_kaggle_challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uAId8HrAgrCv" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 2*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Random Forests
#
# ## Assignment
# - [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.
# - [ ] Continue to participate in our Kaggle challenge.
# - [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features.
# - [ ] Try Ordinal Encoding.
# - [ ] Try a Random Forest Classifier.
# - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
# ## Stretch Goals
#
# ### Doing
# - [ ] Add your own stretch goal(s) !
# - [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.
# - [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/).
# - [ ] Get and plot your feature importances.
# - [ ] Make visualizations and share on Slack.
#
# ### Reading
#
# Top recommendations in _**bold italic:**_
#
# #### Decision Trees
# - A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_
# - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)
# - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)
# - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)
# - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU)
#
# #### Random Forests
# - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods
# - [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)
# - _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_
#
# #### Categorical encoding for trees
# - [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)
# - [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)
# - _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_
# - _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_
# - [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)
# - [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html)
#
# #### Imposter Syndrome
# - [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)
# - [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)
# - ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html)
# - _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_
#
#
# ### More Categorical Encodings
#
# **1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:
#
# - **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.
# - **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).
# - **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).
# - **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).
#
#
# **2.** The short video
# **[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.
#
# Category Encoders has multiple implementations of this general concept:
#
# - [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)
# - [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)
# - [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)
# - [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)
# - [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)
# - [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)
#
# Category Encoder's mean encoding implementations work for regression problems or binary classification problems.
#
# For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:
#
# ```python
# encoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting
# X_train_encoded = encoder.fit_transform(X_train, y_train=='functional')
# X_val_encoded = encoder.transform(X_train, y_val=='functional')
# ```
#
# For this reason, mean encoding won't work well within pipelines for multi-class classification problems.
#
# **3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.
#
# ```python
# dirty_cat.TargetEncoder(clf_type='multiclass-clf')
# ```
# It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).
#
# However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.
#
# **4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals.
#
# _**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categoricals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_
# + [markdown] id="0JpnV48cgrDM" colab_type="text"
# ### Setup
#
# You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + colab_type="code" id="QJBD4ruICm1m" outputId="d8818afb-63ab-46e3-b02a-d9fb01bc8ce7" colab={"base_uri": "https://localhost:8080/", "height": 34}
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
# + id="umluXn_bgrEM" colab_type="code" colab={}
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
# Also create a "missing indicator" column, because the fact that
# values are missing may be a predictive signal.
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
X['years_MISSING'] = X['years'].isnull()
# return the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# + id="Pob-awfPyerw" colab_type="code" colab={}
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality <= 50].index.tolist()
# Combine the lists
features = numeric_features + categorical_features
# + id="1utAy95JyepF" colab_type="code" colab={}
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# + id="hEksrgNOyemc" colab_type="code" outputId="f16ffcc4-71a2-4971-ae43-30e9b4d1dbae" colab={"base_uri": "https://localhost:8080/", "height": 68}
# %%time
# WARNING: the %%time command sometimes has quirks/bugs
# TODO
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=0)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="hYdvOyenyeju" colab_type="code" outputId="28d74831-e9e2-4850-e833-afab9ede42db" colab={"base_uri": "https://localhost:8080/", "height": 51}
print('X_train shape before encoding', X_train.shape)
encoder = pipeline.named_steps['onehotencoder']
encoded = encoder.transform(X_train)
print('X_train shape after encoding', encoded.shape)
# + id="-PZjlIMxyegh" colab_type="code" outputId="3061b2fc-653f-438b-ebcd-01a132de3c74" colab={"base_uri": "https://localhost:8080/", "height": 607}
# %matplotlib inline
import matplotlib.pyplot as plt
# Get feature importances
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, encoded.columns)
# Plot top n feature importances
n = 20
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
# + id="cy8KIZSsyeYR" colab_type="code" outputId="665cf3ca-1b36-46b8-dc9c-cd549534284a" colab={"base_uri": "https://localhost:8080/", "height": 68}
# %%time
# Arrange data into X features matrix and y target vector
# so we use *all* features, including the high-cardinality categoricals
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# The pipeline is identical to the example cell above,
# except we're replacing one-hot encoder with "ordinal" encoder
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=0, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="ej5JukcM09p3" colab_type="code" outputId="15ed8a3f-0bb0-4472-9dab-f6765a6cecc5" colab={"base_uri": "https://localhost:8080/", "height": 51}
print('X_train shape before encoding', X_train.shape)
encoder = pipeline.named_steps['ordinalencoder']
encoded = encoder.transform(X_train)
print('X_train shape after encoding', encoded.shape)
# + id="Rypxtbtn0-AW" colab_type="code" outputId="0c9e0c16-f3c3-4830-996a-e503da5b0df3" colab={"base_uri": "https://localhost:8080/", "height": 607}
# Get feature importances
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, encoded.columns)
# Plot top n feature importances
n = 20
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
# + id="kH1c49iy099q" colab_type="code" outputId="4acd441c-907a-4c98-fa2e-ee80c6549a11" colab={"base_uri": "https://localhost:8080/", "height": 102}
X_train['wpt_name'].describe()
# + id="G7kUMLyL097H" colab_type="code" outputId="a27ff907-8c65-4639-9351-1e9c9f90cd2b" colab={"base_uri": "https://localhost:8080/", "height": 221}
X_train['wpt_name'].value_counts()
# + id="q9EgvzHa094D" colab_type="code" outputId="8dcae716-aa9d-4327-82ac-ef4c26243490" colab={"base_uri": "https://localhost:8080/", "height": 221}
encoded['wpt_name'].value_counts()
# + id="Z5TiFhG309zJ" colab_type="code" colab={}
feature = 'extraction_type_class'
# + id="K-Wk98671UiY" colab_type="code" outputId="d60a5ef0-51e2-427c-9d7e-590b02bbc1e4" colab={"base_uri": "https://localhost:8080/", "height": 153}
X_train[feature].value_counts()
# + id="wtHZoCP71U7y" colab_type="code" outputId="5b785a0e-ea94-4b96-aaa5-d39c411111d8" colab={"base_uri": "https://localhost:8080/", "height": 552}
import seaborn as sns
plt.figure(figsize=(16,9))
sns.barplot(
x=train[feature],
y=train['status_group']=='functional',
color='grey'
);
# + id="izHf4Nln1U1z" colab_type="code" outputId="7d0abb88-6fad-4081-97b9-3ecd17eaece5" colab={"base_uri": "https://localhost:8080/", "height": 374}
X_train[feature].head(20)
# + id="ZEexoQyj1Uw4" colab_type="code" outputId="6ddcfe3b-bb0f-4f90-e60e-fcabc7b9a1fc" colab={"base_uri": "https://localhost:8080/", "height": 723}
encoder = ce.OneHotEncoder(use_cat_names=True)
encoded = encoder.fit_transform(X_train[[feature]])
print(f'{len(encoded.columns)} columns')
encoded.head(20)
# + [markdown] id="SFIGEEdp27Bq" colab_type="text"
# **One-Hot Encoding, Logistic Regression, Validation Accuracy**
# + id="TqLZofxs1Uto" colab_type="code" outputId="bf68d382-9d42-4232-cf09-d21159a07bc6" colab={"base_uri": "https://localhost:8080/", "height": 34}
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
lr = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
StandardScaler(),
LogisticRegressionCV(multi_class='auto', solver='lbfgs', cv=5, n_jobs=-1)
)
lr.fit(X_train[[feature]], y_train)
score = lr.score(X_val[[feature]], y_val)
print('Logistic Regression, Validation Accuracy', score)
# + [markdown] id="xgP6Ix_52-mR" colab_type="text"
# **One-Hot Encoding, Decision Tree, Validation Accuracy**
# + id="ozyuv3OP1Uo4" colab_type="code" outputId="0cd990da-1165-40eb-ba88-7d3dbe05059f" colab={"base_uri": "https://localhost:8080/", "height": 34}
from sklearn.tree import DecisionTreeClassifier
dt = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
DecisionTreeClassifier(random_state=42)
)
dt.fit(X_train[[feature]], y_train)
score = dt.score(X_val[[feature]], y_val)
print('Decision Tree, Validation Accuracy', score)
# + [markdown] id="y9b6P3wW3Fll" colab_type="text"
# **One-Hot Encoding, Logistic Regression, Model Interpretation**
# + id="RZFEgs8A3F8Y" colab_type="code" outputId="01024255-44a5-4916-8577-5c8235a6255e" colab={"base_uri": "https://localhost:8080/", "height": 265}
model = lr.named_steps['logisticregressioncv']
encoder = lr.named_steps['onehotencoder']
encoded_columns = encoder.transform(X_val[[feature]]).columns
coefficients = pd.Series(model.coef_[0], encoded_columns)
coefficients.sort_values().plot.barh(color='grey');
# + [markdown] id="yRY5eh353Ord" colab_type="text"
# **One-Hot Encoding, Decision Tree, Model Interpretation**
# + id="yyDaXVVA3GOA" colab_type="code" outputId="dd8ba69c-6901-44ad-bd99-6bac8c0717f8" colab={"base_uri": "https://localhost:8080/", "height": 954}
# Plot tree
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html
import graphviz
from sklearn.tree import export_graphviz
model = dt.named_steps['decisiontreeclassifier']
encoder = dt.named_steps['onehotencoder']
encoded_columns = encoder.transform(X_val[[feature]]).columns
dot_data = export_graphviz(model,
out_file=None,
max_depth=7,
feature_names=encoded_columns,
class_names=model.classes_,
impurity=False,
filled=True,
proportion=True,
rounded=True)
display(graphviz.Source(dot_data))
# + [markdown] id="WL_gvh7W3X9j" colab_type="text"
# **Ordinal Encoding**
#
# Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed in; in this case, we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes are assumed to have no true order and integers are selected at random.
# + id="qctJegl83Gbc" colab_type="code" outputId="f05d3387-e0fe-4397-afd6-a8432fe86d97" colab={"base_uri": "https://localhost:8080/", "height": 686}
encoder = ce.OrdinalEncoder()
encoded = encoder.fit_transform(X_train[[feature]])
print(f'1 column, {encoded[feature].nunique()} unique values')
encoded.head(20)
# + [markdown] id="-onlX0Dl3fss" colab_type="text"
# **Ordinal Encoding, Logistic Regression, Validation Accuracy**
# + id="x42tukD93gEs" colab_type="code" outputId="e414a7c3-c498-45e3-b2f5-4ba8eb4f41cf" colab={"base_uri": "https://localhost:8080/", "height": 34}
lr = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
StandardScaler(),
LogisticRegressionCV(multi_class='auto', solver='lbfgs', cv=5, n_jobs=-1)
)
lr.fit(X_train[[feature]], y_train)
score = lr.score(X_val[[feature]], y_val)
print('Logistic Regression, Validation Accuracy', score)
# + [markdown] id="2R3hrYqh3qpl" colab_type="text"
# **Ordinal Encoding, Decision Tree, Validation Accuracy**
# + id="8qNfbCG53gZr" colab_type="code" outputId="ac06aaf9-8294-43f5-c4fa-13255960246c" colab={"base_uri": "https://localhost:8080/", "height": 34}
dt = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
DecisionTreeClassifier(random_state=42)
)
dt.fit(X_train[[feature]], y_train)
score = dt.score(X_val[[feature]], y_val)
print('Decision Tree, Validation Accuracy', score)
# + [markdown] id="ysv5IMK63wTd" colab_type="text"
# **Ordinal Encoding, Logistic Regression, Model Interpretation**
# + id="Nnokf3z83gqn" colab_type="code" outputId="785a82a2-c6ce-42d5-db75-e113654909b4" colab={"base_uri": "https://localhost:8080/", "height": 265}
model = lr.named_steps['logisticregressioncv']
encoder = lr.named_steps['ordinalencoder']
encoded_columns = encoder.transform(X_val[[feature]]).columns
coefficients = pd.Series(model.coef_[0], encoded_columns)
coefficients.sort_values().plot.barh(color='grey');
# + [markdown] id="oTFi3fV-35I4" colab_type="text"
# **Ordinal Encoding, Decision Tree, Model Interpretation**
# + id="DdGav9uU3g5O" colab_type="code" outputId="89d48679-8c58-4487-fe59-bb851805df6a" colab={"base_uri": "https://localhost:8080/", "height": 816}
model = dt.named_steps['decisiontreeclassifier']
encoder = dt.named_steps['ordinalencoder']
encoded_columns = encoder.transform(X_val[[feature]]).columns
dot_data = export_graphviz(model,
out_file=None,
max_depth=5,
feature_names=encoded_columns,
class_names=model.classes_,
impurity=False,
filled=True,
proportion=True,
rounded=True)
display(graphviz.Source(dot_data))
# + [markdown] id="llI82vQy4DJz" colab_type="text"
# **Understand how tree ensembles reduce overfitting compared to a single decision tree with unlimited depth**
#
# **Overview**
#
# **What's "random" about random forests?**
# Each tree trains on a random bootstrap sample of the data. (In scikit-learn, for RandomForestRegressor and RandomForestClassifier, the bootstrap parameter's default is True.) This type of ensembling is called Bagging. (Bootstrap AGGregatING.)
# Each split considers a random subset of the features. (In scikit-learn, when the max_features parameter is not None.)
#
#
# For extra randomness, you can try "extremely randomized trees"!
#
# In extremely randomized trees (see ExtraTreesClassifier and ExtraTreesRegressor classes), randomness goes one step further in the way splits are computed. As in random forests, a random subset of candidate features is used, but instead of looking for the most discriminative thresholds, thresholds are drawn at random for each candidate feature and the best of these randomly-generated thresholds is picked as the splitting rule. This usually allows to reduce the variance of the model a bit more, at the expense of a slightly greater increase in bias
# + [markdown] id="HBkTEMFB4O6-" colab_type="text"
# **Example: predicting golf putts**
# + id="QQrjJYDu3hFt" colab_type="code" colab={}
putts = pd.DataFrame(
columns=['distance', 'tries', 'successes'],
data = [[2, 1443, 1346],
[3, 694, 577],
[4, 455, 337],
[5, 353, 208],
[6, 272, 149],
[7, 256, 136],
[8, 240, 111],
[9, 217, 69],
[10, 200, 67],
[11, 237, 75],
[12, 202, 52],
[13, 192, 46],
[14, 174, 54],
[15, 167, 28],
[16, 201, 27],
[17, 195, 31],
[18, 191, 33],
[19, 147, 20],
[20, 152, 24]]
)
putts['rate of success'] = putts['successes'] / putts['tries']
putts_X = putts[['distance']]
putts_y = putts['rate of success']
# + id="zrk47Fyj4VrE" colab_type="code" outputId="a82cc5e1-b23c-421a-8a4b-37fed0daddae" colab={"base_uri": "https://localhost:8080/", "height": 637, "referenced_widgets": ["1a91e438640f4f59a16e2ab31b4e2a8c", "<KEY>", "f043940b3a364d8294c7e0c191dcd69b", "495728a733254b2d96fbe4d49705df9e", "7e943f269b4e4234ba9f7146129f0be3", "733ee2b187ac4572abe1aed407564e42", "9f58fcc96a9943d8bdc8226b451a5c07", "<KEY>", "<KEY>"]}
# %matplotlib inline
import matplotlib.pyplot as plt
from ipywidgets import interact
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
def putt_trees(max_depth=1, n_estimators=1):
models = [DecisionTreeRegressor(max_depth=max_depth),
RandomForestRegressor(max_depth=max_depth, n_estimators=n_estimators)]
for model in models:
name = model.__class__.__name__
model.fit(putts_X, putts_y)
ax = putts.plot('distance', 'rate of success', kind='scatter', title=name)
ax.step(putts_X, model.predict(putts_X), where='mid')
plt.show()
interact(putt_trees, max_depth=(1,6,1), n_estimators=(10,40,10));
# + [markdown] id="bQXudycQ4dvE" colab_type="text"
# **Bagging demo, with golf putts data**
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html
# + id="YV-kMCuw4V3P" colab_type="code" outputId="a81faf8d-c39d-4a10-85c9-279ed8c97ece" colab={"base_uri": "https://localhost:8080/", "height": 915, "referenced_widgets": ["0da69dc153384a678a9a9b010b17900f", "<KEY>", "f43a4d54519b43e398aa9939c3c25e87", "f8674ca1962540508d74bd20f1950f30", "<KEY>", "bc0e82ffc13940e99b75045abe34ca40", "<KEY>", "d22dbd202017401ea4c6d279d80aafd2", "913e0257fc4e4f3f94bdd175f677e44f"]}
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
def diy_bagging(max_depth=1, n_estimators=1):
y_preds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = putts.sample(n=len(putts), replace=True).sort_values(by='distance')
bootstrap_X = bootstrap_sample[['distance']]
bootstrap_y = bootstrap_sample['rate of success']
tree = DecisionTreeRegressor(max_depth=max_depth)
tree.fit(bootstrap_X, bootstrap_y)
y_pred = tree.predict(bootstrap_X)
y_preds.append(y_pred)
ax = bootstrap_sample.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(bootstrap_X, y_pred, where='mid')
plt.show()
ensembled = np.vstack(y_preds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
ax = putts.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(putts_X, ensembled, where='mid')
plt.show()
interact(diy_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
# + [markdown] id="SNwR-kjn4o7W" colab_type="text"
# **Go back to Tanzania Waterpumps ...**
#
# **Helper function to visualize predicted probabilities**
# + id="5WNOSaOg4WHt" colab_type="code" colab={}
import itertools
import seaborn as sns
def pred_heatmap(model, X, features, class_index=-1, title='', num=100):
"""
Visualize predicted probabilities, for classifier fit on 2 numeric features
Parameters
----------
model : scikit-learn classifier, already fit
X : pandas dataframe, which was used to fit model
features : list of strings, column names of the 2 numeric features
class_index : integer, index of class label
title : string, title of plot
num : int, number of grid points for each feature
Returns
-------
y_pred_proba : numpy array, predicted probabilities for class_index
"""
feature1, feature2 = features
min1, max1 = X[feature1].min(), X[feature1].max()
min2, max2 = X[feature2].min(), X[feature2].max()
x1 = np.linspace(min1, max1, num)
x2 = np.linspace(max2, min2, num)
combos = list(itertools.product(x1, x2))
y_pred_proba = model.predict_proba(combos)[:, class_index]
pred_grid = y_pred_proba.reshape(num, num).T
table = pd.DataFrame(pred_grid, columns=x1, index=x2)
sns.heatmap(table, vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.title(title)
plt.show()
return y_pred_proba
# + [markdown] id="5nW-7pmJ42GW" colab_type="text"
# **Compare Decision Tree, Random Forest, Logistic Regression**
# + id="MDmqcv0W4WBc" colab_type="code" outputId="12296dcc-78a6-48d6-eeb8-dc03aba75c9c" colab={"base_uri": "https://localhost:8080/", "height": 864, "referenced_widgets": ["864d024394ee4401a86d4dddd05a6342", "f2615afd5f454ff6b2ccf3b32a1de88f", "47b4d3c510dd4db399e1268c9e1f04a7", "00e4b4af4ba14fabb8c7ef317e0a89a7", "<KEY>", "2295116939de4f409d6ecc695399269d", "776829f0987a49a2947aefa75af03c1d", "d599b654ed524eea8e398952da409811", "7be04891bea949e7bff3049011a436a8"]}
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'longitude'
feature2 = 'quantity'
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
def get_X_y(df, feature1, feature2, target):
features = [feature1, feature2]
X = df[features]
y = df[target]
X = X.fillna(X.median())
X = ce.OrdinalEncoder().fit_transform(X)
return X, y
def compare_models(max_depth=1, n_estimators=1):
models = [DecisionTreeClassifier(max_depth=max_depth),
RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators),
LogisticRegression(solver='lbfgs', multi_class='auto')]
for model in models:
name = model.__class__.__name__
model.fit(X, y)
pred_heatmap(model, X, [feature1, feature2], class_index=0, title=name)
X, y = get_X_y(train, feature1, feature2, target='status_group')
interact(compare_models, max_depth=(1,6,1), n_estimators=(10,40,10));
# + [markdown] id="N0uYZC0F4_Rv" colab_type="text"
# **Bagging**
# + id="opg6nml9448r" colab_type="code" outputId="60d764e0-2c15-4826-e86b-43b3bfacfb31" colab={"base_uri": "https://localhost:8080/", "height": 864, "referenced_widgets": ["77f201c0ec3d497eac6b04960ce2358d", "791fec9f12874b5bb59d03691afa2ebb", "b42c7503f7df4504985f2173f4be964a", "cffecd4e01184e07bce51c0cd9f5dd1a", "6a13a68be4e6485f98d84352650e65c4", "<KEY>", "d846ff0b0e1546e6b9e8cee32cbcf608", "83b425e00f754041be9ffe032fcfd248", "2402d379f52b493b907f3f42d17a6c29"]}
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'quantity'
feature2 = 'extraction_type_group'
def waterpumps_bagging(max_depth=1, n_estimators=1):
predicteds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = train.sample(n=len(train), replace=True)
X, y = get_X_y(bootstrap_sample, feature1, feature2, target='status_group')
tree = DecisionTreeClassifier(max_depth=max_depth)
tree.fit(X, y)
predicted = pred_heatmap(tree, X, [feature1, feature2], class_index=0, title=title)
predicteds.append(predicted)
ensembled = np.vstack(predicteds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
sns.heatmap(ensembled.reshape(100, 100).T, vmin=0, vmax=1)
plt.title(title)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.xticks([])
plt.yticks([])
plt.show()
interact(waterpumps_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
# + id="S3OopZva45N-" colab_type="code" colab={}
# + id="HgJRiO-f45J-" colab_type="code" colab={}
# + id="-074ZrN945Fz" colab_type="code" colab={}
| module2/Patrick_Wolf_assignment_kaggle_challenge_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lmcanavals/algorithmic_complexity/blob/main/notebooks/bp_greg_graph.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="sgbUFtlKZ58s"
# # [Greg and Graph 295B](https://codeforces.com/problemset/problem/295/B)
# + id="UbSLswvoZ5LD"
import numpy as np
# + id="Lbl5L241Z6ek"
def relax(cost, cost_uv, u, v):
if cost[u] + cost_uv < cost[v]:
cost[v] = cost[u] + cost_uv
def floydWarshall(G):
n = len(G)
cost = G.copy()
for k in range(n):
for i in range(n):
for j in range(n):
if i != j and i != k and k != j and cost[i, k] != 0:
relax(cost[i], cost[k, j], k, j)
return np.sum(cost)
# + colab={"base_uri": "https://localhost:8080/"} id="Jnul1G7caW6-" outputId="4873f5d7-304e-4674-aa85-d3f8f411a0fb"
# %%file 1.in
2
0 5
4 0
1 2
# + colab={"base_uri": "https://localhost:8080/"} id="BE1sSAhybZ7g" outputId="d420daaa-e446-4868-9364-a15227dbb9db"
# %%file 2.in
4
0 3 1 1
6 0 400 1
2 4 0 1
1 1 1 0
4 1 2 3
# + colab={"base_uri": "https://localhost:8080/"} id="7rmU_ckyaa0Y" outputId="98126a04-0b83-4906-f735-c9ce232d5d32"
with open("2.in") as f:
n = int(f.readline())
G = np.zeros((n, n), dtype=np.int)
for i in range(n):
G[i] = list(map(int, f.readline().split()))
x = list(map(int, f.readline().split()))
print(floydWarshall(G), end=" ")
for xi in x[:-1]:
G = np.delete(G, xi - 1, 0)
G = np.delete(G, xi - 1, 1)
print(floydWarshall(G), end=" ")
print()
| notebooks/bp_greg_graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
a=0
b=0
c=0
d=0
e=0
X_Test = pd.DataFrame([[32,0,2,8,0]],columns = [0,0,0,0,0])
print(X_Test)
naivebayes_model = open("F:/Final Project/Flask/models/NBClassifier.pkl","rb")
celf = joblib.load(naivebayes_model)
sc = StandardScaler()
X_Test = sc.fit_transform(X_Test)
mypred = celf.predict(X_Test)
print(mypred)
| Test Files/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import codecs
import csv
import time
import os
import re
import gzip
import pandas as pd
import numpy as np
PATH_TO_DATA = 'Data/'
PATH_TO_DATA_EN = PATH_TO_DATA+"enwiki/"
PATH_TO_DATA_UK = PATH_TO_DATA+"ukwiki/"
ENWIKI_ART_FNMS = []
for file in os.listdir(PATH_TO_DATA_EN):
if re.match(r"enwiki-20180620-pages-meta-current\d{2}-p\d+p\d+.xml_art.csv.gz", file):
ENWIKI_ART_FNMS.append(file)
def unpack(file_name):
file_name_new = file_name.replace(".gz","")
with gzip.open(file_name, 'rb') as f_in, open(file_name_new, 'wb') as f_out:
f_out.writelines(f_in)
return file_name_new
def pack_and_remove(file_name):
file_name_new = file_name+'.gz'
with open(file_name, 'rb') as f_in, gzip.open(file_name_new, 'wb') as f_out:
f_out.writelines(f_in)
os.remove(file_name)
return file_name_new
# +
first = True
for fn in ENWIKI_ART_FNMS:
fn = PATH_TO_DATA_EN+fn
print(fn)
fn_new = unpack(fn)
df_articles = pd.read_csv(fn_new, encoding='ISO-8859-1', quotechar="'")
if first:
df_articles.to_csv('enwiki-20180620-pages-links.csv', index = False, encoding='ISO-8859-1', quotechar="'", escapechar ="\\")
first = False
else:
df_articles.to_csv('enwiki-20180620-pages-links.csv', mode='a', header=False, index = False,
encoding='ISO-8859-1', quotechar="'", escapechar ="\\")
pack_and_remove('enwiki-20180620-pages-links.csv')
# -
UKWIKI_ART_FNMS = []
for file in os.listdir(PATH_TO_DATA_UK):
if re.match(r"ukwiki-20180620-pages-meta-current\d{2}-p\d+p\d+.xml_art.csv.gz", file):
UKWIKI_ART_FNMS.append(file)
# +
first = True
for fn in UKWIKI_ART_FNMS:
fn = PATH_TO_DATA_UK+fn
print(fn)
fn_new = unpack(fn)
df_articles = pd.read_csv(fn_new, encoding='UTF-8', quotechar="\"")
if first:
df_articles.to_csv('ukwiki-20180620-pages-links.csv', index = False, encoding='UTF-8', quotechar="\"")
first = False
else:
df_articles.to_csv('ukwiki-20180620-pages-links.csv', mode='a', header=False, index = False,
encoding='UTF-8', quotechar="\"")
pack_and_remove('ukwiki-20180620-pages-links.csv')
| data/preprocessing/data-combine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dados dos números, el primero ‘a’ y el segundo ‘b’, se pide:
# La suma de los dos números:
#
# 2) La diferencia de los dos números:
# 3) El producto de los dos números:
# 4) El cociente del primero entre el segundo:
# 5) El cociente del segundo entre el primero:
# 6) El doble del primero:
# 7) El doble del primero por el segundo:
# 8) El triple del segundo por el primero:
# 9) El doble de la suma de los dos números:
# 10) El triple de la diferencia de los dos números:
# +
a, b = 254, -111
print('La suma de los dos numeros: ', a+b)
print('La diferencia de los dos numeros: ', a-b)
print('El producto de los dos numeros: ', a*b)
print('El cociente del primero entre el segundo: ', a/b)
print('El cociente del segundo entre el primero: ', b/a)
print('El doble del primero: ', 2*a)
print('El doble del primero por el segundo: ', (2*a)*b)
print('El triple del segundo por el primero: ', (3*b)*a)
print('El doble de la suma de los dos numeros: ', 2*(a+b))
print('El triple de la diferencia de los dos numeros: ', 3*(a-b))
| 01_algebra/modulo_I_comceptos_basicos/ejercicios/ejercicios06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yohanesnuwara/python-bootcamp-for-geoengineers/blob/master/demo_starting_python_E%26P_1hour.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="u1NwL3fQ_-PS"
# # Demo: Starting Python for Exploration and Production in 1 Hour
#
# **Created by:** *<NAME>*
#
# This notebook is used for personal demonstration in my technical talk and knowledge sharing sessions.
#
#
# + [markdown] id="_2IZDnx5BVap"
# ## Demo 0: Kickstarting
#
# This session demonstrate how to use Google Colab (switching from CPU to GPU vice versa, connecting to runtime, scripting, and running the cell)
# + id="dmRoI54fbH12" outputId="a52c409f-cbcf-4fe6-d549-0d9d7679a169" colab={"base_uri": "https://localhost:8080/", "height": 34}
print('hello world')
# + id="EAlK4Y6CbPLh" outputId="26e1d4be-9e6f-44de-a568-1926815299d7" colab={"base_uri": "https://localhost:8080/", "height": 34}
a = 10
if a > 5:
b = a**2
print(b)
if a <= 5:
b = 1 / a
print(b)
# + [markdown] id="1bzdzcjfAuEs"
# ## Demo 1: Python and its Effectiveness
#
# This session demonstrate how to apply list comprehension to solve problems in engineering fast, easily, and effectively i.e. to reduce the lines of codes.
#
# In the example below, we want to compute the difference between two consecutive elements in an array
# + id="FiQvGV7xX3Od"
# create array
array = [13, 56, 78, 23, 45, 67, 82, 34, 25, 66, 23, 12, 45, 56, 78, 89]
# + id="GmNtQpP1X5pf" outputId="951cff86-08c4-4e3b-ecde-a06a1995c96c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# NOT using LIST COMPREHENSION
difference = []
for i, j in zip(range(0, len(array)-1), range(1, len(array))):
diff = array[j] - array[i]
# append result of each iter to the blank array
difference.append(diff)
print(difference)
# + id="VhAIIw5LYC9B" outputId="396bdeea-dc1a-4642-b4d1-4a47755f186d" colab={"base_uri": "https://localhost:8080/", "height": 34}
# using LIST COMPREHENSION
difference = [j-i for i, j in zip(array[:-1], array[1:])]
print(difference)
# + [markdown] id="K-O7IZY1bQ1x"
# ## Demo 2: Numpy, Matplotlib, and Pandas
#
# This session demonstrate how to use Numpy, Matplotlib, and Pandas libraries, and to guarantee that if you master these libraries, you will master how to solve engineering problems with Python.
#
# In the example below, we use Numpy to create array of x and y trigonometric function, Matplotlib to visualize the function, and Pandas to convert the data into a dataframe.
# + id="NbWHHnNcb1GO"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="fDsksszCcAvh"
# Numpy
## create x array
x = np.arange(0, 370, 10)
## compute sine and cosine from x array
ysin = np.sin(np.deg2rad(x))
ycos = np.cos(np.deg2rad(x))
# + id="lo4H8l6jdimi" outputId="30841425-8d4a-4a6f-b461-90a1d4c526cc" colab={"base_uri": "https://localhost:8080/", "height": 364}
# Matplotlib
## resize the plot
plt.figure(figsize=(10,5))
## plot, specify color, linetype, linewidth, and give labels
plt.plot(x, ysin, '.-', color='purple', label='y=sin(x)')
plt.plot(x, ycos, '*-', color='green', linewidth=1, label='y=cos(x)')
## give title, with size and pad
plt.title('Sine and Cosine Function', size=20, pad=10)
## give labels, with size
plt.xlabel('x', size=15)
plt.ylabel('y', size=15)
## limit the axes
plt.xlim(0, 360)
## show the legends and specify its location in the plot
plt.legend(loc='upper center')
## show the grids
plt.grid()
plt.show()
# + id="7XSNKsTIeG6g" outputId="39e1fb75-c44f-4e5d-867b-80954d62f73e" colab={"base_uri": "https://localhost:8080/", "height": 349}
# Pandas
## create dataframe
df = pd.DataFrame({'x': x, 'sin(x)': ysin, 'cos(x)': ycos})
## display the first 10 rows
df.head(10)
# + [markdown] id="DqQJTV2QckCQ"
# ## Demo 3: Access Exploration Open Dataset
#
# This session demonstrates how to stream EXPLORATION open dataset from a website directly to Google Colab (without downloading to our PC) and open the data quickly.
#
# In the example below, we access a well-log dataset from the KGS web data repository, use Lasio to read the data, and display it.
# + id="U7w5lKDDhYQG" outputId="cf8d3bb4-0d2d-4d24-efdf-c5caf4fa2cec" colab={"base_uri": "https://localhost:8080/", "height": 207}
# get dataset from the open repo website (e.g. KGS)
# !wget 'http://www.kgs.ku.edu/PRS/Scans/Log_Summary/2020.zip'
# + id="BQNv3d61h5UM"
# unzip file and store to directory "KGS"
# !unzip '/content/2020.zip' -d '/content/KGS'
# !unzip '/content/KGS/1051325649.zip' -d '/content/KGS/logs'
# + id="YvOaiMu7jg8O" outputId="a7a95ac0-4cda-485c-c08c-9b6a9b37614a" colab={"base_uri": "https://localhost:8080/", "height": 124}
# install Lasio library to read well log data
# !pip install lasio
# + id="R6N5pD09jri8"
# use Lasio
import lasio
# read well log data
well = lasio.read('/content/KGS/logs/1051325649.las')
# + id="Pt5QWniglBuz" outputId="da077424-10dc-41bd-d6c8-5f208a5e6207" colab={"base_uri": "https://localhost:8080/", "height": 519}
plt.figure(figsize=(3,8)) # chart size
plt.plot(well['NPHI'], well['DEPT'], color='blue') # plot the data
plt.title('Neutron Porosity Log', size=15, pad=10)
plt.xlabel("NPHI (v/v)"); plt.ylabel("Depth (m)") # labels
plt.xlim(0, 0.45)
plt.grid(True) # give chart grids
plt.gca().invert_yaxis() # invert y-axis
plt.show()
# + [markdown] id="z9K98xR1nWd1"
# ## Demo 4: Access Production Open Dataset
#
# This session demonstrates how to stream PRODUCTION open dataset from a website directly to Google Colab (without downloading to our PC) and open the data quickly.
#
# In the example below, we access a production history data of the Volve field in North Sea from an available database in Zenodo (Alfonso Reyes), and display the production plot.
# + id="_3e_26m9na8T" outputId="3e010ffa-59c0-4981-b018-01f88472c6fc" colab={"base_uri": "https://localhost:8080/", "height": 207}
# get the dataset from the open repo website (e.g. Volve Zenodo repo)
# !wget 'https://zenodo.org/record/2596620/files/f0nzie/volve_eclipse_reservoir-v0.1.zip'
# + id="Km9QZaFGn8QF"
# unzip the zip file
# !unzip '/content/volve_eclipse_reservoir-v0.1.zip' -d '/content/Volve'
# + id="UHecglrgoRLl" outputId="1bd5fd50-bf7d-413d-f3f4-8ca9fdfcfe02" colab={"base_uri": "https://localhost:8080/", "height": 349}
# define the filepath of production data in Excel file
filepath = '/content/Volve/f0nzie-volve_eclipse_reservoir-413a669/inst/rawdata/Volve production data.xlsx'
# read excel from the above filepath
df = pd.read_excel(filepath, sheet_name='Monthly Production Data')
# output the last 10 rows of the dataframe
df.head(10)
# + id="QYl4zWcYp7XX" outputId="d520076c-cc26-45f4-abd8-8d040865ef9e" colab={"base_uri": "https://localhost:8080/", "height": 416}
# select only data of well 15/9-F-12
well_prod = df[df['Wellbore name']=='15/9-F-12']
# make array of months
t = np.arange(len(well_prod))
# specify row contains oil cum. prod.
Np = well_prod['Oil']
# plot oil cum. prod vs month
plt.figure(figsize=(12,6))
plt.step(t, Np, label='Well 15/9-F-12 (Volve Field)')
plt.title('Oil Cumulative Production over Months Since February 2008', size=20, pad=10)
plt.xlim(min(t), max(t)); plt.ylim(ymin=0)
plt.xlabel('Months Since February 2008', size=12)
plt.ylabel('Cumulative Oil Production (Sm3)', size=12)
plt.legend()
plt.show()
| demo_starting_python_E&P_1hour.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import os
import requests
from difflib import SequenceMatcher
from tqdm import tqdm_notebook
from time import sleep
import ast
def loadCoinData():
dataList = []
tokenFileList = os.listdir('./tokenData')
for token in tqdm_notebook(tokenFileList):
tokenData = pd.read_csv('./tokenData/'+token, skiprows=1).set_index('id')
dataList.append(tokenData)
return pd.concat(dataList, axis=1).T
def hasCategory(x):
if len(ast.literal_eval(x)) > 0:
return 1
else:
return 0
def hasSpecificCategory(x, category):
if category in x:
return 1
else:
return 0
# +
def hasBaseCategory(x, catArr):
# print(set(catArr), set(ast.literal_eval(x)))
# print(set(catArr).intersection(ast.literal_eval(x)))
# print(any(x in catArr for x in ast.literal_eval(x)))
if len(set(catArr).intersection(ast.literal_eval(x))) > 0:
return 1
else:
return 0
# -
def getTokensWithCategory(coinDF):
coinDF['has_category'] = coinDF['categories'].map(lambda x: hasCategory(x))
return coinDF[coinDF['has_category'] == 1].drop(columns=['has_category'])
def getAllCategories(coinDF):
cat_list = [ast.literal_eval(x) for x in list(coinDF['categories'].values)]
all_categories = set([item for sublist in cat_list for item in sublist])
return all_categories
def getCategoryCounts(coinDF, all_categories):
categoryTotals = []
for cat in all_categories:
categoryCount = coinDF['categories'].map(lambda x: hasSpecificCategory(x, cat))
categoryTotals.append(categoryCount.sum())
return pd.Series(categoryTotals, index=all_categories).sort_values(ascending=False)
def getCoinsWithCategory(coinDF, category):
coinDF['has_given_category'] = coinDF['categories'].map(lambda x: hasSpecificCategory(x, category))
return coinDF[coinDF['has_given_category'] > 0].drop(columns=['has_given_category'])
def getCoinsInBaseCategory(coinDF, catArr):
coinDF['has_given_category'] = coinDF['categories'].map(lambda x: hasBaseCategory(x, catArr))
return coinDF[coinDF['has_given_category'] > 0].drop(columns=['has_given_category'])
def mapTrustScore(score):
if score == 'green':
return 1
if score == 'yellow':
return 0
if score == 'red':
return -1
def getNumericalQualitative(CC, useColumns):
DF = CC[useColumns].copy()
DF['trust_score'] = DF['trust_score'].map(lambda x: mapTrustScore(x))
return DF
def describeCategory(catCoinDF, category, useColumns):
CC = getCoinsInBaseCategory(catCoinDF, baseCategories[category])
NQ = getNumericalQualitative(CC, useColumns)
print('Number of tokens:', CC.shape[0])
return NQ.astype('float').fillna(0).describe()
# ### run scripts
coinDF = loadCoinData()
catCoinDF = getTokensWithCategory(coinDF)
coinDF.shape
catCoinDF.shape
all_categories = getAllCategories(catCoinDF)
categoryTotals = getCategoryCounts(coinDF, all_categories)
pd.set_option('display.max_rows', 100)
categoryTotals.shape
categoryTotals[categoryTotals > 10]
baseCategories = {
'entertainment': ['Non-Fungible Tokens (NFT)', 'Entertainment'],
'social': ['Gaming', 'Communication', 'Fan Token', 'Media', 'Sports', 'Tourism', 'Charity', 'Collectible', 'Social Money'],
'defi': ['Launchpad', 'Lending/Borrowing', 'Automated Market Maker (AMM)', 'Seigniorage'],
'business': ['Business Platform', 'Energy', 'Big Data', 'Software', 'Insurance', 'Retail', 'Real Estate'],
'blockchain': ['Smart Contract Platform','Protocol','Oracle'],
'meme': ['Meme Tokens']
}
useColumns = ['sentiment_votes_up_percentage', 'sentiment_votes_down_percentage','market_cap_rank',\
'coingecko_rank', 'coingecko_score', 'developer_score', 'community_score', 'liquidity_score', \
'public_interest_score', 'facebook_likes', 'twitter_followers', \
'reddit_subscribers','reddit_accounts_active_48h', 'telegram_channel_user_count', \
'mean', 'std', 'kurt', 'trust_score']
# #### describe category
for k, v in baseCategories.items():
CC = getCoinsInBaseCategory(catCoinDF, v)
CC.to_csv('./categoryLists/'+k+'_token_list.csv')
# ### entertainment
describeCategory(catCoinDF, 'entertainment', useColumns)
# ### social
describeCategory(catCoinDF, 'social', useColumns)
# ### defi
describeCategory(catCoinDF, 'defi', useColumns)
# ### blockchain
describeCategory(catCoinDF, 'blockchain', useColumns)
# ### business
describeCategory(catCoinDF, 'business', useColumns)
# ### meme
describeCategory(catCoinDF, 'meme', useColumns)
| coingecko_api_bot_analytics/Coingecko Analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importation
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mendeleev import element
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MaxAbsScaler
import warnings
warnings.filterwarnings('ignore')
# -
# ## Openning from text file - HALIDES
df = pd.DataFrame(pd.read_table("ICSD-halides.txt"))
df.rename(columns = {"Unnamed: 4": "Perovskite_label"},
inplace = True)
df['Perovskite_label']=0
df.fillna('-',inplace = True)
# ### The one that are perovskite
for l in range(len(df)):
if 'Perovskite' in df.StructureType[l] or 'perovskite' in df.StructureType[l]:
df.Perovskite_label[l] = 1
# ### Get rid of COMPLEX
# +
a = []
for l in range(len(df)):
if '.' in df.StructuredFormula[l]:
a.append(l)
df_complex = df.drop(a, axis=0)
df_complex.sort_values(by=['Perovskite_label'], ascending=False, inplace=True)
df_complex.reset_index(drop=True, inplace=True)
# -
# ### Get rid of DUPLICATE taking into account polymorphism
# +
seen = []
uniq = []
for l in range(len(df_complex)):
uniq.append(l)
if df_complex.StructuredFormula[l] not in seen:
del uniq[-1]
seen.append(df_complex.StructuredFormula[l])
df_complex_multiplicate = df_complex.drop(uniq, axis=0)
df_complex_multiplicate.reset_index(drop=True, inplace=True)
# -
# ## Code - FINISHED and now extraction
df_complex_multiplicate.to_csv('Parsed-halides.csv', columns = ['StructuredFormula', 'Perovskite_label'], index=False)
# ## DataFrame for the Oxide Perovskite
# +
dfo1 = pd.DataFrame(pd.read_table("ICSD-oxides-st.txt"))
dfo2 = pd.DataFrame(pd.read_table("ICSD-oxides-2nd.txt"))
dfo3 = pd.DataFrame(pd.read_table("ICSD-oxides-3nd.txt"))
dfoxide = pd.concat([dfo1, dfo2,dfo3])
# -
dfoxide.rename(columns = {"Unnamed: 9": "Perovskite_label"},
inplace = True)
dfoxide['Perovskite_label']=0
dfoxide.fillna('-',inplace = True)
dfoxide.reset_index(drop=True, inplace=True)
# ## Labelling
for l in range(len(dfoxide)):
if 'Perovskite' in dfoxide.StructureType[l] or 'perovskite' in dfoxide.StructureType[l]:
dfoxide.Perovskite_label[l] = 1
# ### Get rid of COMPLEX
# +
aoxide = []
for l in range(len(dfoxide)):
if '.' in dfoxide.StructuredFormula[l]:
aoxide.append(l)
dfoxide_complex = dfoxide.drop(aoxide, axis=0)
dfoxide_complex.sort_values(by=['Perovskite_label'], ascending=False, inplace=True)
dfoxide_complex.reset_index(drop=True, inplace=True)
# -
# ### Get rid of DUPLICATE taking into account polymorphism
# +
seenoxide = []
uniqoxide = []
for l in range(len(dfoxide_complex)):
uniqoxide.append(l)
if dfoxide_complex.StructuredFormula[l] not in seenoxide:
del uniqoxide[-1]
seenoxide.append(dfoxide_complex.StructuredFormula[l])
dfoxide_complex_multiplicate = dfoxide_complex.drop(uniqoxide, axis=0)
dfoxide_complex_multiplicate.reset_index(drop=True, inplace=True)
dfoxide_complex_multiplicate['StructuredFormula'] = dfoxide_complex_multiplicate['StructuredFormula'].str.replace('(', '').str.replace(')','')
# -
# ## Code - FINISHED and now extraction
dfoxide_complex_multiplicate.to_csv('Parsed-oxides.csv', columns = ['StructuredFormula', 'Perovskite_label'], index=False)
# +
NN = dfoxide_complex_multiplicate[['StructuredFormula','Perovskite_label']]
NN[['Atom1','Atom2','Atom3','Atom4','Atom5','Atom6','Atom7','Atom8','Atom9','Atom10']] = NN.StructuredFormula.str.split(expand=True)
for l in range(len(NN)):
if NN.Atom4[l] == None:
NN.drop([l], axis=0, inplace=True)
continue
if NN.Atom5[l] != None:
NN.drop([l], axis=0, inplace=True)
NN_final = NN[['Atom1','Atom2','Atom3','Atom4','Perovskite_label']]
# +
NN_final.Atom1 = NN_final.Atom1.str.replace('\d+', '')
NN_final.Atom2 = NN_final.Atom2.str.replace('\d+', '')
NN_final.Atom3 = NN_final.Atom3.str.replace('\d+', '')
NN_final.Atom4 = NN_final.Atom4.str.replace('\d+', '')
NN_final
# -
# ## From the dataframe to the numpy array of features. Each compound is defined by 4 features where the features correspond to the atomic number (Z) of the atom. The array should contain the label.
#
for l in range(len(NN_final)):
NN_final.Atom1.iloc[l] = element(NN_final.Atom1.iloc[l]).atomic_number
NN_final.Atom2.iloc[l] = element(NN_final.Atom2.iloc[l]).atomic_number
NN_final.Atom3.iloc[l] = element(NN_final.Atom3.iloc[l]).atomic_number
NN_final.Atom4.iloc[l] = element(NN_final.Atom4.iloc[l]).atomic_number
# +
NN_4atoms = NN_final.sample(frac = 1)
NN_4atoms
Features = NN_4atoms.to_numpy()
# -
# ## DOING THE NN
# +
X = Features[:,0:4]
y = Features[:,-1].astype('int')
X = sklearn.preprocessing.normalize(X, norm='l2', axis=1, copy=True, return_norm=False)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=1)
transformer = MaxAbsScaler().fit(X_train)
transformer.transform(X_train)
clf = MLPClassifier(random_state=1, max_iter=1750, activation='relu', solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100,150,50),learning_rate='adaptive').fit(X_train, y_train)
#clf = MLPClassifier(random_state=1, max_iter=360, activation='relu', solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100,150,50,100),learning_rate='adaptive').fit(X_train, y_train)
clf.predict_proba(X_test[:1])
clf.predict(X_test[:, :])
print(clf.score(X_test, y_test))
clf.score(X, y)
# +
X = Features[:,0:4]
y = Features[:,-1].astype('int')
X = sklearn.preprocessing.normalize(X, norm='l2', axis=1, copy=True, return_norm=False)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=1)
transformer = MaxAbsScaler().fit(X_train)
transformer.transform(X_train)
score = []
for l in range(20,2000,10):
clf = MLPClassifier(random_state=1, max_iter=l, activation='relu', solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(20,20,20,20),learning_rate='adaptive').fit(X_train, y_train)
score.append(clf.score(X_test, y_test))
# +
plt.plot(range(len(score)), score)
plt.ylabel('score')
plt.xlabel('epochs or max_iter')
plt.show()
print(max(score))
20 + score.index(max(score))*10
# +
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
Features = NN_4atoms.to_numpy()
score = [[],[],[]]
X = Features[:,0:4]
y = Features[:,-1].astype('int')
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state = 1)
max_abs_scaler = preprocessing.MaxAbsScaler()
X_train_maxabs = max_abs_scaler.fit_transform(X_train)
for l in range(20,1000,30):
for i in range(20,1000,30):
for g in range(20,1000,30):
clf = MLPClassifier(random_state=1, max_iter=240, activation='relu', solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(l,i,g),learning_rate='adaptive').fit(X_train, y_train)
score.append(clf.score(X_test, y_test))
layer[0].append(l)
layer[1].append(i)
layer[2].append(g)
# +
plt.plot(range(len(score[4:])), score[4:])
plt.ylabel('score')
plt.xlabel('trials')
plt.show()
# +
X = Features[:,0:4]
y = Features[:,-1].astype('int')
n_classes = y.shape
#X = sklearn.preprocessing.normalize(X, norm='l2', axis=1, copy=True, return_norm=False)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=1)
transformer = MaxAbsScaler().fit(X_train)
transformer.transform(X_train)
y_score = MLPClassifier(random_state=1, max_iter=240, activation='relu', solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(80,50,680),learning_rate='adaptive').fit(X_train, y_train)
fpr2, tpr2, threshold = roc_curve(y_test, clf.predict_proba(X_test)[:,1])
roc_auc2 = auc(fpr2, tpr2)
# image drawing
plt.figure()
plt.title('Receiver Operating Characteristic %d iter' %240)
plt.plot(fpr2, tpr2, label = 'MLP AUC = %0.2f' % 0.8331)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# +
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
X = Features[:,0:4]
y = Features[:,-1].astype('int')
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=11)
mlp_gs = MLPClassifier(max_iter=240)
parameter_space = {
'hidden_layer_sizes': [(80,50,680),],
'activation': ['tanh', 'relu', 'logistic'],
'solver': ['sgd', 'adam','lbfgs'],
'alpha': 10.0 ** -np.arange(1, 7),
'learning_rate': ["constant", "invscaling", "adaptive"],
'learning_rate_init': 10.0 ** -np.arange(1, 6),
'random_state':np.arange(1, 4),
'tol' : 10.0 ** -np.arange(1, 6),
}
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(mlp_gs, parameter_space, n_jobs=-1, cv=5)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
print(clf.best_estimator_)
| Data-Processing_Sickit-Learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
from PIL import Image
import glob
from scipy.io import savemat, loadmat
from IPython.display import display, clear_output
import pandas as pd
import numpy as np
import random
# Find actual cluster types and ground truth labels. E.g. Area3 has 6 different rooms as follows;
#
# {'WC', 'conferenceRoom', 'hallway', 'lounge', 'office', 'storage'}
def get_room_label(path):
if 'WC' in path: return 0
if 'conferenceRoom' in path: return 1
if 'hallway' in path: return 2
if 'lounge' in path: return 3
if 'office' in path: return 4
if 'storage' in path: return 5
# A function to save images and its labels as numpy array to reduce cost of reading data, by converting to grayscale and resizing.
def save_images_and_labels():
path = "C:\\Users\\ustundag\\GitHub\\2D-3D-Semantics\\area_3\\data\\semantic\\*.png"
images = []
labels = []
paths = glob.glob(path)
random.shuffle(paths)
for p in paths:
clear_output(wait=True)
img = Image.open(p).convert('L')
img = np.array(img.resize((90, 90), Image.NEAREST))/255
images.append(img.flatten())
labels.append(get_room_label(p))
display(str(len(images)) + " / 3704")
savemat('noXYZ_area_3_no_xyz_data_semantic_90x90.mat', {'semantic': np.array(images)})
savemat('noXYZ_area_3_no_xyz_data_semantic_90x90_labels.mat', {'labels': np.array(labels)})
save_images_and_labels()
images = loadmat("C:\\Users\\ustundag\\GitHub\\2D-3D-Semantics\\noXYZ_area_3_no_xyz_data_rgb_90x90.mat")
images = images["rgb"]
labels = loadmat("C:\\Users\\ustundag\\GitHub\\2D-3D-Semantics\\noXYZ_area_3_no_xyz_data_rgb_90x90_labels.mat")
labels = labels["labels"]
pylab.figure(figsize=(8, 8))
for i in range(4):
pylab.subplot(1, 4, i + 1)
pylab.imshow(images[i].reshape([90, 90]), cmap='gray')
set(labels[0])
| main_notebooks_old/main_dataset_preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Atlas Scientific pH
# pH Sensor for measuring acidity (hydrogen ion concentration) of liquids
#
# Manufacturer Link: https://www.atlas-scientific.com/ph.html
# +
import matplotlib.pyplot as plt
from meerkat.base import time
from meerkat import atlas, parser
# -
# instance device and set output format to .csv (which is default)
dev = atlas.pH(bus_n=1, output='csv')
# #### Configuration
# device information: device type, firmware version
time.sleep(0.5)
dev.info()
# status of device power: restart code, input voltage Vcc
time.sleep(0.5)
dev.status()
# set current temperature for compensation
time.sleep(0.5)
dev.temp_set(24.4)
time.sleep(0.5)
dev.temp_get()
# #### Calibration
# Three point calibration
dev.cal_set_mid(7.00)
time.sleep(1)
dev.cal_set_low(3.00)
time.sleep(1)
dev.cal_set_high(10.09)
time.sleep(1)
dev.cal_get()
# clear calibration
time.sleep(1)
dev.cal_clear()
time.sleep(1)
dev.cal_get()
# response breakdown of calibration
time.sleep(0.5)
dev.cal_slope()
# #### Measurement
# single pH measurement
time.sleep(0.5)
dev.measure()
# get one sample without a description
dev.get('test_1')
# get 5 samples with a description
dev.get('test_2', n=5)
# #### JSON Data Publishing
# set the metadata publishing interval to every third sample
dev.json_writer.metadata_interval = 3
dev.publish(description='test_3', n=5)
# #### CSV Writer Output
# write 5 samples to .csv file with description
dev.write(description='test_4', n=5)
# name of file written
dev.csv_writer.path
# load .csv data written with dev.write
m, df = parser.csv_resource(dev.csv_writer.path)
df
df.datetime64_ns = df.datetime64_ns.dt.tz_localize('UTC') # Pandas 0.24.1 hack
df[["datetime64_ns", "pH"]].plot(x="datetime64_ns", style='-x');
# #### JSON Writer Output
dev.writer_output = "json"
# get 7 samples with a description
dev.write(description='test_5', n=7)
# name of file written
dev.json_writer.path
# !head $dev.json_writer.path
| notebooks/atlas_pH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Lab: Week 3
#
# This week we are going to learn about **NumPy** and manipulating 1D NumPy arrays. There is also a *debug* challenge and a '*do something we haven't taught you how to do*' challenge at the end of the lab.
#
# The lab assumes you are using `Spyder 3` and Python 3.x
#
# You should have attended Lecture 3 in the Python series and read the lecture notes before attempting this lab.
#
# **Remember for any code where you wish to use numpy you need to import it:**
import numpy as np
# ## NumPy Basics
# ### Example 1: Create NumPy arrays and accessing data
#
# The fundermental building block in NumPy is the `numpy.ndarry`
#
# As we discovered in the last lecture arrays are quite different from a Python `List`. However, creation and accessing individual array items and slicing array is very similar to a list. A big difference is that a `numpy.ndarray` requires
# all data values to be of the **same type**.
#
# Suppose that we want a numpy array containing the integers 4, 3, 1, 5 and 6.
#
# A simple way to create such an array and access its data is to use the following syntax.
arr = np.array([4, 3, 1, 5, 6])
print('the array contains {0}'.format(arr))
print('a numpy array has has type {0}'.format(type(arr)))
print('the array has a shape of {0}'.format(arr.shape))
print('The item at index 0 in the array is {0}'.format(arr[0]))
print('The item at index 2 in the array is {0}'.format(arr[2]))
print('If we slice the array between item 0 and 2 we get {0}'.format(arr[:2]))
print('If we slice the array between item 3 and 5 we get {0}'.format(arr[3:5]))
# ### Example 2: Creating empty arrays
#
# * The size of a NumPy array needs to be defined upfront.
# * You cannot dynamically append to a `numpy.ndarray` like you can with a `List`
# * One option is to create an empty numpy array or a numpy array containing all zeros (if using numeric data).
# * When you create an 'empty' array you are allocated a space in the computer's memory.
# * There may be some strange data in inside (note your output might look different to the below depending on what is in your computer's memory)
empty_arr = np.empty(10)
print(empty_arr)
print(type(empty_arr))
print(empty_arr.shape)
# * Alternatively you could create an array that contains all zeros
zeros_arr = np.zeros(10)
print(zeros_arr)
print(type(zeros_arr))
print(zeros_arr.shape)
# ### Example 3: Manipulating array data
#
# * It is trivial to update individual elements in an array
# +
data = np.zeros(10)
print('original data {0}'.format(data))
data[5] = 111
data[9] = 222
print('updated data {0}'.format(data))
# -
# * When you slice an array you effectively create a **view** of the data.
# * This means that when you update the slice you update the original array as well.
# * Numpy keeps the data in the same place in memory for efficiency.
# +
data = np.zeros(10)
print('original data {0}'.format(data))
slice_of_data = data[3:6] # slice from index 3 to index 5
slice_of_data += 999 #incremement each value in the slice by 999
print('slice of data {0}'.format(slice_of_data))
print('the original data is also updated {0}'.format(data))
# -
# * NumPy makes basic matrix alegbra simple and efficient
# * Here we are going to work with 2 arrays of the same size.
# * We are going to create a sequence of numbers in each using the `np.arange()` function
# +
data1 = np.arange(10) #create a sequence of integers 0-9
data2 = np.arange(10,20) #create a sequence of integers 10-19
print('data1: {0}'.format(data1))
print('data2: {0}'.format(data2))
print('The square of each value in data1 {0}'.format(data1**2))
print('The summation of the the two arrays {0}'.format(data1 + data2))
print('The difference between the the two arrays {0}'.format(data1 - data2))
data1 += 10
print('If we add 10 to each element in data1 we get {0}'.format(data1))
# -
# ### Exercise 1: Create and manipulate your own numpy array
#
# * Create two numpy arrays of size 10.
# * The first array should be called `array_1` have all zero values
# * The second array `array_2` should be a sequence from 90 to 99
# * Create a slice of `array_1` to access the last 5 elements of the array.
# * Add the value 10 to each of these slices
# * Now multiply the two arrays together and print out the result
#
# The expected result is:
#
# ```python
# [0, 0, 0, 0, 0, 950, 960, 970, 980, 990]
# ```
# ## NumPy for data analysis
# ### Example 1: Basic statistical analysis of the Standard Normal
#
# We are going to explore the basic analysis capabilities of numpy.
#
# The first thing we will do is generate some synthetic data. We are going to take 10000 random samples from the standard normal distribution. Numpy has a function to do this called `numpy.random.randn`
data = np.random.randn(10000)
print(data.shape)
print(type(data))
# We are then going to create a function to conduct and report a descriptive analysis of our data.
# +
def descriptives(data):
"""
Returns mean, stdev, and 1st and 99th
percentile of a 1D numpy.array
Keyword arguments:
data -- 1d numpy.ndarray containing data to analyse
"""
mean = data.mean()
std = data.std()
per_1st = np.percentile(data, 1)
per_99th = np.percentile(data, 99)
return mean, std, per_1st, per_99th
results = descriptives(data)
print(results)
# -
# So we can see that the mean of the distribution is approx zero with a standard deviation of 1.
#
# We can also see that 99% of our data lie between -2.3 and +2.3 (which we would expect from the standard normal)
#
# It is very simple to work with NumPy arrays containing numeric data. For example if we wanted to find all of our samples that are greater than or equal to +2.3 we use:
# +
result = data >= 2.3
print(result.shape)
print(type(result))
print(result)
# -
# The code returns a new `numpy.ndarray` that contains boolean (True/False) values. The value at array index `i` is `True` if the corresponding value at index `i` in array `data` is >= 2.3 and `False` otherwise. If we had used a python `List` we would have needed to loop through all of list items and perform the check ourselves.
#
# Let's create some generalised functions to return the probabilities that a value is greater or less than a user specified value in our data set.
#
# To do that we need to know that
#
# ```python
# False == 0
# True == 1
# ```
#
# Therefore we can take the sum of our boolean array to find out how many array elements are greater or less than a user specified values e.g.
#
# ```python
# (data >= 2.3).sum()
# ```
# +
def prob_great_than_or_equal_to(data, x):
'''
Return the proportion of the dataset that
is greater than or equal to x
Keyword arguments
data -- a numpy.ndarray containing numeric data
x -- a numeric value. Function returns proportion where data >=x
'''
return (data >= x).sum()/data.shape[0]
def prob_less_than_or_equal_to(data, x):
'''
Return the proportion of the dataset that
is less than or equal to x
Keyword arguments
data -- a numpy.ndarray containing numeric data
x -- a numeric value. Function returns proportion where data <=x
'''
return (data <= x).sum()/data.shape[0]
x1 = prob_great_than_or_equal_to(data, 1.96)
x2 = prob_less_than_or_equal_to(data, -1.96)
print(x1, x2)
# -
# Our test of these functions shows use that around 95% of data lie between points -1.96 and +1.96 (which again we would expect with the standard normal).
# ### Example 2: Simple linear regression using data in numpy arrays
#
# NumPy arrays are the fundamental building block of the Python SciPy stack.
# Scientific computing in Python nearly always makes use of `numpy.ndarrays` at some level.
#
# In this example we will load two NumPy arrays from file and conduct a simple linear regression. The method of Ordinary Least Squares is used to fit a linear model (think $y = \beta_1 x + \beta_0 + \epsilon $ ) to some data stored in numpy arrays.
#
# We have two datasets.
#
# * `breach.csv`: monthly totals of people waiting 4 or more hours in English emergency departments
# * `dtocs.csv`: monthly total of the number of people waiting to be discharged (leave) hospial and go home or transfer to another form of healthcare.
#
# We are going to (naively) assess the relationship between these two variables. For the purposes of this example we are going to ignore that these two datasets are time-series data.
#
# The library we will use to conduct linear regression is `statsmodels.api`
# We will use the function `OLS` which accepts two keyword arguments: y and x
#
# Once we have conducted the analysis we will print the results summary.
# +
import statsmodels.api as sm #this contains the analysis function we will use
def load_dtoc_dataset():
'''
Loads the breach and dtoc data sets into memory
Returns a tuple of numpy.ndarrays representing
breach and dtoc dataset respectively.
'''
#note we use skip_header because the dataset has column descriptors
dtoc = np.genfromtxt('dtocs.csv', skip_header=1)
breach = np.genfromtxt('breach.csv', skip_header=1)
return breach, dtoc
breach, dtoc = load_dtoc_dataset()
#regression code
dtoc = sm.add_constant(dtoc) # an intercept term to the model
model = sm.OLS(breach, dtoc)
results = model.fit()
print(results.summary())
# -
# * The results of regression analysis gives an adjusted R-squared of 0.71
# * Both the intercept (cont) and dtocs (x1) have confidence intervals that do not include zero
# * Our analysis suggests that (on the face of it) there is an association between the two variables
# ### Exercise 1: Descriptive analysis of hourly banks customers
#
# The dataset `bank_arrivals.csv` contains 1000 hourly observations of customers arriving at a bank cashiers queue.
#
# * Load the bank arrivals from the file into a numpy array
# * Check that you have successfully loaded all 1000 observations
# * Create a function `descriptives` that accepts an numpy array as an argument.
# * The function should calculate and return the mean, standard deviation, median and inter-quartile range
# * **Tip**: The inter-quartile range is the difference between the 75th and 25th percentile; the median is the 50th percentile.
# * Calculate the mean, standard deviation, median and inter-quartile range for bank arrivals data
# * Print the summary statistics to the console.
#
# ### Exercise 2: Before and After Analysis of Stroke Treatment Rates
#
# The file `lysis.csv` contains a monthly proportion of a hospital's stroke patients that have been treated with a potentially life saving drug to remove a blood clot from their brain.
#
# There are a total of 54 months in the dataset.
#
# In month 42 the hospital introduced a new process for fast tracking patients to treatment. Your task is to quantify the difference in treatment rates before and after the fast track intervention and to determine if the result is statistically significant.
#
# To do this you need to:
#
# * import the OLS function from `statsmodels.api`
# * Read the `lysis.csv` file into a `numpy.ndarray` variable (hint: watch out for headers in the file)
# * Create a numpy array that is the same size as the lysis array. It should contain zero when it the month (index) is less than when the intervention was introduced (42) and 1 for all months after the intervention was introduced. Your array should look like
#
# ```python
# dummy == [0,0,0, ... ,1,1,1]
#
# # Hints:
# # dummy.shape[0] == 54; (i.e. its length is 54)
# # where index < 42 dummy[index] = 0; where index >=42 dummy[index] == 1
# ```
#
#
# * Conduct a regression analysis of the before and after period and display the results.
# * Tip: remember that the regression analysis should include a constant term.
# * The independent variables in your regression is the dummy variable
# * The dependent variables in your regression is the data read in from `lysis.csv`
#
#
# ## Looping over NumPy Arrays
#
# ### Example 1: Basic Iteration over an array
#
# * For a 1D `numpy.ndarray` you can iterate (loop) over each element much in the same way you would iterate over items in a `List`
# * Here we create an array with 10 items - the numbers 0 to 9.
# * We then iterate over each item using a `for` loop
# +
data = np.arange(10)
for x in data:
print(x, end= ' ')
# -
# * Alternatively a `for` loop and array index approach could be used
# +
data = np.arange(10)
for i in range(data.shape[0]):
print(data[i], end = ' ')
# -
# ### Example 2: Finding the maximum value in an array
#
# A simple example of a loop is finding the maximum value in an array. This works in a similar fashion to when
# using a python `List`.
#
# +
def max_value(data):
'''
Loop over each element in array
and return the maximum value
Keyword arguments:
data -- numpy.array containing numeric data
'''
max_value = 0
for x in data:
max_value = max(x, max_value)
return max_value
data = np.array([100, 10, 10, 1000, 999, 1])
print(max_value(data))
# -
# Note that in practice you would use numpy's build-it function for max as opposed to writing your own code.
#
# ```python
# data = np.array([100, 10, 10, 1000, 999, 1])
# print(data.max())
# ```
# ### Example 3: Vectorizing a function
#
# * NumPy allows you to vectorize a function.
# * This means that you can apply a function to every item in an array without requiring an explicit `for` loop in your code.
#
# As an example consider the following array:
# ```python
# data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
#
# ```
#
# If we wanted to limit the values in this array to a maximum value of 5 then one way to do this is to use an explicit `for` loop to iterate over all of the elements and impose a ceiling on the value. I.e.
# +
def ceiling(arr, upper_limit):
'''
Loop through all elements of a np.ndarray
and impose a max_value ceiling on the data.
Keyword arguments:
arry - numeric np.ndarray to iterate
upper_limit - the numeric upper limit on values in arr
'''
for i in range(arr.shape[0]):
arr[i] = min(arr[i], upper_limit)
data = np.arange(10)
print('original data: {0}'.format(data))
ceiling(data, 5)
print('data with ceiling {0}'.format(data))
# -
# * However, with NumPy we could instead make use of the `np.vectorize()` function
# * To make it work we need to update the `ceiling` function so that it works on individual array elements
# * This means that we no longer have a `for` loop in our code!
# +
def ceiling(to_test, upper_limit):
'''
Returing the minimum value by
comparing to_test to max_value
Keyword arguments:
to_test - numeric value to test if breaches ceiling
upper_limit - the numeric upper limit on to_test
'''
return min(to_test, upper_limit)
# v_ceiling is a wrapper function that we call instead of ceiling
v_ceiling = np.vectorize(ceiling)
data = np.arange(10)
c_data = v_ceiling(data, 5)
print('original data: {0}'.format(data))
print('data with ceiling {0}'.format(c_data))
# -
# * In this simple example the function `ceiling` is just a wrapper for the the built in function `min`
# * Therefore we could vectorize `min` instead.
# * This means we need even less code again!
# * In reality you are more likely to vectorize your own custom functions.
# +
# here we vectorize min instead of our custom function
v_ceiling = np.vectorize(min)
data = np.arange(10)
c_data = v_ceiling(data, 5)
print('original data: {0}'.format(data))
print('data with ceiling {0}'.format(c_data))
# -
# ### Example 4: Using np.where() and fancy indexing
#
# Another efficient alternative to using a loop in NumPy is to use `np.where()`
#
# * `np.where()` is like asking "tell me where in this array, values satisfy a given condition".
# +
data = np.array([0, 1, 2, 500, 700])
results = np.where(data > 2)
print(results)
# -
# * The `results` above tell us that the array `data` contains values that are > 3 in indexes 3 and 4
# * This might be a useful result on its own.
# * To go one step further and access and manipulate the values in indexes 3 and 4 we need to use fancy indexing
# +
data = np.array([0, 1, 2, 500, 700])
indexes = [1, 3, 4]
sliced_data = data[indexes]
print(sliced_data)
# -
# * The code above demonstrates fancy indexing.
# * We defined a `List` of indexes i.e. `indexes = [1, 3, 4]` (this could also be another `np.ndarray`)
# * The code `data[indexes]` 'looks up' the values contained in indexes 1, 3, and 4 of the array `data`
# * If we combine using `np.where()` and 'fancy indexing' we have very powerful code
# +
data = np.array([0, 1, 2, 500, 700])
sliced_data = data[np.where(data > 2)]
print(sliced_data)
data[np.where(data > 2)] = 999
print(data)
# -
# * We can implement the `ceiling` function from the previous example using np.where()
# * It is neat solution that requires minimal code
# +
def ceiling(data, upper_limit):
'''
Returing the minimum value by
comparing to_test to max_value
Keyword arguments:
to_test - numeric value to test if breaches ceiling
upper_limit - the numeric upper limit on to_test
'''
data[np.where(data > upper_limit)] = upper_limit
data = np.arange(10)
print('original data: {0}'.format(data))
ceiling(data, 5)
print('data with ceiling {0}'.format(data))
# -
# ### Exercise 4: Calculate the winsorized mean
#
# * Assume you have the data
#
# ```python
# data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# ```
# * The 20% Winsorized mean of `data` is calculated on a modified data set where the top and bottom 10% of values are replaced by the 10th and 90th percentiles.
# * In this case the 10th percentile = 2 and the 90th = 10. Therefore the winsorized dataset is:
#
# ```python
# win_data = [2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10]
#
# win_mean = win_data.mean()
# ```
# * Write a function `winsorise(data, cut_off = 0.1)`
# * The function must modify the numeric np.ndarray `data` so that is it is winsorised.
# * A cut_off = 0.1 specifies that the function uses the 10th and 90th percentiles as cut-offs
#
# Hints:
#
# * There are multiple ways to solve this problem
# * You could use a `for` loop
# * You could create a function that is vectorized using `np.vectorize()`
# * You could use `np.where()` and fancy indexing
#
#
# ### Week 3: Debug Challenge
#
# Each laboratory will have a debug challenge. You will be given a pre-existing script containing Python code. The catch is that the code doesn't run!
#
# Your challenge is to find and correct the errors so that the script correctly executes.
#
# The challenges are based around common problems students have when writing code. If you do the exercises it will help you debug your own code and maybe even avoid the mistakes in the first place!
# #### Challenge 1:
#
# This weeks debug challenge is fixing a monte-carlo simulation model.
#
# We return to our stroke care example from earlier in the lab. This time rather than conducting
# an empricial analysis we are going to model each stage of the emeregency stroke care pathway
# and examine the distribution of performance that might be expected.
#
# The model is a simplification of a stroke treatment pathway at a hospital.
# Patients must be treated within 180 minutes of the onset of their symptoms.
#
# * They must be brought to hospital
# * be seen in the ED
# * undergo a CT scan
# * be clinically assessed by a stroke medic
# * not have any additional illnesses that prevent treatment
#
# Instructions:
# * open `wk3_debug_challenge.py` in `spyder 3`.
# * Attempt to run the code. The code will raise errors.
# * Fix the bugs!
#
# Hints:
# * Read the Python interpreter output.
# * The errors reported can look confusing at first, but read them carefully and they will point you to the lines of code with problems.
# * The `Spyder` IDE may give you some hints about formatting errors
# * It can be useful to use `print()` to display intermediate calculations and variable values.
# * Remember that `Spyder` has a variable viewer where you can look at the value of all variables created.
# * There might be multiple bugs! When you fix one and try to run the code you might find another!
# ### Week 3: Do something we haven't taught you how to do challenge!
# #### Challenge 1: Preprocessing data to detrend a timeseries
#
# In example 2, we conducted a simple linear regression to assess the relationship between two variables. Our initial analysis is problematic because both variables are time series and contain autocorrelation and trend. This means that we violate some of the assumptions of ordinary least squares regression and our estimates of the relationship are likely to be incorrect.
#
# In practice, we would pre-process the data in the numpy arrays before conducting the regression. A simple way to do this is to take the first difference of the time series. If $Y_t$ represents the value of the time series at time $t$ then the first difference is equal to:
#
# $$Y_t = Y_{t+1} - Y_t$$
#
# **Tip**: If an array $a$ has length $n$ then an array of the first differences of $a$ is $n-1$
#
# **Task**:
#
# * Copy the code from Example 2.
# * Modify the code to take the first difference of the breach and dtoc numpy arrays
# * Conduct the regression analysis using the first differences (detrended data)
# * Look at the new $R^2$. Is there still strong evidence of a relationship?
#
#
| Labs/wk3/lab_wk3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Demo of Astronomical Interactive Data Analysis Techniques using IPython/Jupyter Notebooks and Ginga
# This notebook shows examples of using an interactive Ginga viewer running in an HTML5 canvas with an IPython Notebook. You do not need a special widget set to run this, just an HTML5 compliant browser.
# See NOTES at end for more info installation/requirements.
#
# This is adapted for AAS 229 workshop based on the original demo [here](https://github.com/ejeschke/ginga/blob/master/ginga/examples/ipython-notebook/ginga_ipython_demo.ipynb).
# +
# Requirements:
from ginga.version import version
version
# Get ginga from github (https://github.com/ejeschke/ginga) or
# pypi (https://pypi.python.org/pypi/ginga)
# Ginga documentation at: http://ginga.readthedocs.io/en/latest/
# +
# setup
from ginga.web.pgw import ipg
# Set this to True if you have a non-buggy python OpenCv bindings--it greatly speeds up some operations
use_opencv = False
server = ipg.make_server(host='localhost', port=9914, use_opencv=use_opencv)
# -
# Start viewer server
# IMPORTANT: if running in an IPython/Jupyter notebook, use the no_ioloop=True option
server.start(no_ioloop=True)
# Get a viewer
# This will get a handle to the viewer v1 = server.get_viewer('v1')
v1 = server.get_viewer('v1')
# where is my viewer
v1.url
# open the viewer in a new window
v1.open()
# *NOTE*: if you don't have the `webbrowser` module, *open the link that was printed in the cell above in a new window* to get the viewer.
#
# You can open as many of these viewers as you want--just keep a handle to it and use a different name for each unique one.
#
# Keyboard/mouse bindings in the viewer window: http://ginga.readthedocs.io/en/latest/quickref.html
#
# You will want to check the box that says "I'm using a trackpad" if you are--it makes zooming much smoother
# Load an image into the viewer
# (change the path to where you downloaded the sample images, or use your own)
v1.load('../Lecture_Notebooks/Imexam/iacs01t4q_flt.fits')
# Example of embedding a viewer
v1.embed(height=650)
# capture the screen
v1.show()
# + active=""
# Now set a pan position by shift-clicking somewhere in the *viewer* window.
# -
# Let's get the pan position we just set
dx, dy = v1.get_pan()
dx, dy
# Getting values from the FITS header is also easy
img = v1.get_image()
hdr =img.get_header()
hdr['BUNIT']
# What are the coordinates of the pan position?
# This uses astropy.wcs under the hood if you have it installed
img.pixtoradec(dx, dy)
# Set cut level algorithm to use
v1.set_autocut_params('zscale', contrast=0.25)
# Auto cut levels on the image
v1.auto_levels()
# Let's do an example of the two-way interactivity
# First, let's add a drawing canvas
canvas = v1.add_canvas()
# delete all objects on the canvas
canvas.delete_all_objects()
# set the drawing parameters
canvas.set_drawtype('circle', color='blue')
# Now, in the Ginga window, draw a point using the right mouse button (if you only have one mouse button (e.g. Mac) press and release spacebar, then click and drag)
# get the pixel coordinates of the point we just drew
p = canvas.objects[0]
p.x, p.y
# Get the RA/DEC in degrees of the point
img.pixtoradec(p.x, p.y)
# Get RA/DEC in H M S sign D M S
img.pixtoradec(p.x, p.y, format='hms')
# Get RA/DEC in classical string notation
img.pixtoradec(p.x, p.y, format='str')
# Verify we have a valid coordinate system defined
img.wcs.coordsys
# Get viewer model holding data
image = v1.get_image()
image.get_minmax()
# get viewer data
data_np = image.get_data()
import numpy as np
np.mean(data_np)
# Set viewer cut levels
v1.cut_levels(0, 5)
# set a color map on the viewer
v1.set_color_map('smooth')
# Image will appear in this output
v1.show()
# Set color distribution algorithm
# choices: linear, log, power, sqrt, squared, asinh, sinh, histeq,
v1.set_color_algorithm('linear')
# Example of setting another draw type.
canvas.delete_all_objects()
canvas.set_drawtype('rectangle', color='green')
# Now right-drag to draw a *small* rectangle in the Ginga image.
# Remember: On a single button pointing device, press and release spacebar, then click and drag.
#
# Try to include some objects.
#
# +
# Find approximate bright peaks in a sub-area
from ginga.util import iqcalc
iq = iqcalc.IQCalc()
img = v1.get_image()
r = canvas.objects[0]
data = img.cutout_shape(r)
peaks = iq.find_bright_peaks(data)
peaks[:20]
# -
# evaluate peaks to get FWHM, center of each peak, etc.
objs = iq.evaluate_peaks(peaks, data)
# how many did we find with standard thresholding, etc.
# see params for find_bright_peaks() and evaluate_peaks() for details
len(objs)
# example of what is returned
o1 = objs[0]
o1
# pixel coords are for cutout, so add back in origin of cutout
# to get full data coords RA, DEC of first object
x1, y1, x2, y2 = r.get_llur()
img.pixtoradec(x1+o1.objx, y1+o1.objy)
# +
# Draw circles around all objects
Circle = canvas.get_draw_class('circle')
for obj in objs:
x, y = x1+obj.objx, y1+obj.objy
if r.contains(x, y):
canvas.add(Circle(x, y, radius=100, color='yellow'))
# set pan and zoom to center
v1.set_pan((x1+x2)/2, (y1+y2)/2)
v1.scale_to(0.75, 0.75)
# -
v1.show()
# How about some plots...?
# swap XY, flip Y, change colormap back to "ramp"
v1.set_color_map('gray')
v1.transform(False, True, True)
v1.auto_levels()
# Programmatically add a line along the figure at designated coordinates
canvas.delete_all_objects()
Line = canvas.get_draw_class('line')
l1 = Line(0, 585, 250, 585)
tag = canvas.add(l1)
# Set the pan position and zoom to 1:1. Show what we did.
v1.set_pan(125, 512)
v1.scale_to(1.0, 1.0)
v1.show()
# Get the pixel values along this line
img = v1.get_image()
values = img.get_pixels_on_line(l1.x1, l1.y1, l1.x2, l1.y2)
values[:10]
# +
# Plot the 'cuts'
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.cla()
plt.plot(values)
plt.ylabel('Pixel value')
plt.show()
# -
# Plot the cuts that we will draw interactively
canvas.delete_all_objects()
canvas.set_drawtype('line')
# Now draw a line through the image (remember to use right mouse btn or else press space bar first)
# show our line we drew
v1.show()
def getplot(v1):
l1 = canvas.objects[0]
img = v1.get_image()
values = img.get_pixels_on_line(l1.x1, l1.y1, l1.x2, l1.y2)
plt.cla()
plt.plot(values)
plt.ylabel('Pixel value')
plt.show()
getplot(v1)
# ## Other ways of loading data
# make some random data in a numpy array
import numpy as np
import random
data_np = np.random.rand(512, 512)
# example of loading numpy data directly to the viewer
v1.load_data(data_np)
v1.show()
# Th-th-th-that's all folks!
# ## Requirements
# Needed packages for this notebook:
#
# - [`ginga`](https://github.com/ejeschke/ginga), jupyter/ipython w/notebook feature
# - typical scientific python reqs: `numpy`, `scipy`, `astropy`
# - for drawing, you will need either PIL/pillow, OpenCv or the [`aggdraw` module](https://github.com/ejeschke/aggdraw) module (python 2 only). PIL is included in anaconda, so is usually all you need.
# - optional, but highly recommended: `webbrowser`, OpenCv
#
# Latest Ginga documentation, including detailed installation instructions, can be found [here](http://ginga.readthedocs.io/en/latest/).
| aas229_workshop/Tutorial_Notebooks/ginga_ipython_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transitivity
# ## Set up modules, variables, functions
# +
import sys
import collections
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
from tf.fabric import Fabric
from tf.app import use
import matplotlib.pyplot as plt
# import some project code from elsewhere
# Lord, please forgive me for my import sins
sys.path.append('/Users/cody/github/CambridgeSemiticsLab/time_collocations/tools/stats/')
from pca import apply_pca
from significance import apply_deltaP, apply_fishers
sys.path.append('/Users/cody/github/CambridgeSemiticsLab/Gesenius_data/analysis/')
from text_show import TextShower
# load Text-Fabric with datasets downloaded from Github
# NB that URLs of the original Github repos can be inferred
# from the pathnames. Just replace github/ with github.com/
locations = [
'~/github/etcbc/bhsa/tf/c', # main ETCBC data
'~/github/etcbc/heads/tf/c', # for phrase head data (i.e. for complements)
]
# load TF data, give main classes shortform names, instantiate the use app with gives convenient methods
TF = Fabric(locations=locations)
API = TF.load('''
vs vt pdp lex lex_utf8
rela code function freq_lex
typ nhead
label gloss number
mother language
''')
A = use('bhsa', api=API)
F, E, T, L = A.api.F, A.api.E, A.api.T, A.api.L
sys.path.append('../')
from clause_relas import in_dep_calc as clause_relator
# -
def get_spread(array, n):
"""Retrieve an even spread of indices an array/Series.
https://stackoverflow.com/a/50685454/8351428
Args:
array: either numpy array or Pandas Series
(with multiple indices allowed)
n: number of indices to return
Returns:
indexed array or series
"""
end = len(array) - 1
spread = np.ceil(np.linspace(0, end, n)).astype(int)
indices = np.unique(spread)
try:
return array[indices]
except KeyError:
return array.iloc[indices]
ts = TextShower(
default=['ref','verb_id', 'text' , 'clause', 'has_obj'],
stylize=['text', 'clause']
)
# ## Build the dataset
# +
def clause_has_object(verb, clause_atom, clause):
"""Search a given clause for any marked objects."""
clause_phrases = L.d(clause, 'phrase')
daughters = E.mother.t(clause_atom)
daught_relas = set(F.rela.v(d) for d in daughters)
daught_codes = set(F.code.v(d) for d in daughters)
phrase_functs = set(F.function.v(p) for p in clause_phrases)
# we count direct speech clauses following amar as direct objects
amar_obj = F.lex.v(verb) == '>MR[' and 999 in daught_codes
# evaluate conditions for object presence
return any([
'Objc' in phrase_functs,
'PreO' in phrase_functs,
'PtcO' in phrase_functs,
'Objc' in daught_relas,
amar_obj,
])
def cmpl_data(clause):
"""Tag complement data within a clause"""
data = {
'has_cmpl': 0,
# 'cmpl_ph_type': np.nan,
# 'cmpl_heads': np.nan,
}
# check for complement
cmpl_ph = [p for p in L.d(clause, 'phrase') if F.function.v(p) == 'Cmpl']
# detect presence of complement
if cmpl_ph:
data['has_cmpl'] = 1
return data
dataset = []
for verb in F.pdp.s('verb'):
# ignore low frequency clauses
if F.freq_lex.v(verb) < 10:
continue
clause = L.u(verb, 'clause')[0]
ca_rela = clause_relator(clause)
# ignore non-main clauses to avoid
# the problem of, e.g., אשֶׁר subjects / objects
# which are not marked in the database, as well as other
# potential complications
if ca_rela != 'Main':
continue
# process the data
book, chapter, verse = T.sectionFromNode(verb)
clause_atom = L.u(verb, 'clause_atom')[0]
ref = f'{book} {chapter}:{verse}'
vs = F.vs.v(verb)
lex = F.lex_utf8.v(verb)
lex_node = L.u(verb, 'lex')[0]
verb_id = f'{lex}.{vs}'
has_obj = 1 * clause_has_object(verb, clause_atom, clause)
cl_data = {
'node': verb,
'ref': ref,
'lex_node': lex_node,
'lex': lex,
'text': T.text(verb),
'clause': T.text(clause),
'clause_node': clause,
'clause_atom': clause_atom,
'verb_id': verb_id,
'has_obj': has_obj,
}
cl_data.update(cmpl_data(clause))
dataset.append(cl_data)
vdf = pd.DataFrame(dataset)
vdf = vdf.set_index('node')
print(vdf.shape)
vdf.head(5)
# -
ts.show(vdf, spread=10)
# ## Count verb lexeme object tendencies
# +
vo_ct = pd.pivot_table(
vdf,
index='verb_id',
columns='has_obj',
aggfunc='size',
fill_value=0,
)
vo_ct = vo_ct.loc[vo_ct.sum(1).sort_values(ascending=False).index]
vo_ct.head(10)
# -
# ### Cull dataset down
print('present data shape:')
vo_ct.shape
vo_ct.sum(1).plot()
# +
vo_ct2 = vo_ct[vo_ct.sum(1) >= 30] # keep those with N observations
print('new shape of data:')
vo_ct2.shape
# +
vo_pr = vo_ct2.div(vo_ct2.sum(1), 0)
vo_pr.head(10)
# -
# # Cluster them
import seaborn as sns
# +
fig, ax = plt.subplots(figsize=(5, 8))
sns.stripplot(ax=ax, data=100*vo_pr.iloc[:, 1:2], jitter=0.2, edgecolor='black', linewidth=1, color='lightblue', size=10)
ax.set_ylabel('% of verb lexeme with an object', size=14)
plt.savefig('/Users/cody/Desktop/verb_objects.png', dpi=300, bbox_inches='tight', facecolor='white')
# -
# ## Select a subset for transitivity tagging
#
# We will select the prototypical, unambiguous cases.
#
# These seem to be those from 0-5% (intransitive)
# and those from 60-100%. We'll leave the rest of the cases.
tran = vo_pr[vo_pr[1] >= 0.6]
itran = vo_pr[vo_pr[1] <= 0.05]
tran
itran
# how many verbs would this account for?
vo_ct.loc[np.array(tran.index, itran.index)].sum(1).sum()
# ## Export for Inspection
# +
from pathlib import Path
from df2gspread import df2gspread as d2g
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'/Users/cody/.config/gspread/service_account.json', scope
)
drive_id = Path.home().joinpath('github/CambridgeSemiticsLab/Gesenius_data/data/_private_/keys/tran_folder.txt').read_text().strip()
# -
gc = gspread.authorize(credentials)
tran_sh = gc.create("transitive_verbs", drive_id)
itran_sh = gc.create('intransitive_verbs', drive_id)
d2g.upload(tran.round(2), tran_sh.id, 'Sheet1',
row_names=True, col_names=True, credentials=credentials)
d2g.upload(itran.round(2), itran_sh.id, 'Sheet1',
row_names=True, col_names=True, credentials=credentials)
# ## Cluster on multivariate tendencies
#
# A lot of dynamic verbs are in the intransitive list. It would be nice to have some separation for categories close to a stative / dynamic dichotomy. It may be possible to do this with a PCA analysis by using the `Cmpl` arguments.
# +
va_ct = pd.pivot_table(
vdf,
index='verb_id',
values=['has_obj', 'has_cmpl'],
aggfunc='sum',
fill_value=0,
)
vlex_ct = vdf.verb_id.value_counts()
vlex_ct = vlex_ct[vlex_ct >= 15] # restrict frquency to N and up
va_ct = va_ct.loc[vlex_ct.index]
va_pr = va_ct.div(vlex_ct, 0)
va_pr.head(25)
# -
va_pr.shape
va_pr.loc['ידע.qal']
va_pr[
(va_pr.has_obj < 0.3)
& (va_pr.has_cmpl < 0.1)
]
| workflow/notebooks/old_pipeline/verb_dataset/transitivity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorial 4: Dealing With Failure
# ================================
#
# In the last tutorial we showed how our `NonLinearSearch` can potentially fail, and infer a local maxima solution.
# In this tutorial, we're going to learn how to stop this and infer the globally maximal lens model.
#
# In the previous tutorial, when we inferred a local maxima we knew that we had done so. For modeling a real lens,
# we do not know the *true* lens model and it iss often unclear if a solution is a global or local maximma. The tricks
# we learn in this tutorial are therefore equally important for verifying that a solution does indeed appear to be the
# global maxima.
# +
# %matplotlib inline
from pyprojroot import here
workspace_path = str(here())
# %cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
import numpy as np
from os import path
import autolens as al
import autolens.plot as aplt
import autofit as af
# -
# we'll use the same strong lensing data as the previous tutorial, where:
#
# - The lens `Galaxy`'s `LightProfile` is an `EllipticalSersic`.
# - The lens `Galaxy`'s total mass distribution is an `EllipticalIsothermal`.
# - The source `Galaxy`'s `LightProfile` is an `EllipticalExponential`.
# +
dataset_name = "light_sersic__mass_sie__source_exp"
dataset_path = path.join("dataset", "howtolens", "chapter_2", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
# -
# we'll create and use a smaller 2.0" `Mask2D` again.
mask = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, radius=2.0
)
# When plotted, the lens light`s is clearly visible in the centre of the image.
imaging_plotter = aplt.ImagingPlotter(
imaging=imaging, visuals_2d=aplt.Visuals2D(mask=mask)
)
imaging_plotter.subplot_imaging()
# Like in the previous tutorials, we use a `SettingsPhaseImaging` object to specify our model-fitting procedure uses a
# regular `Grid`.
# +
settings_masked_imaging = al.SettingsMaskedImaging(grid_class=al.Grid, sub_size=2)
settings = al.SettingsPhaseImaging(settings_masked_imaging=settings_masked_imaging)
# -
# __Approach 1: Prior Tuning__
#
# First, we're going to try giving our `NonLinearSearch` a helping hand. Lets think about our priors, they`re telling
# the `NonLinearSearch` where to look in parameter space. If we tell it to look in the right place (that is,
# *tune* our priors), it might find the best-fit lens model.
#
# We've already seen that we can fully customize priors in **PyAutoLens**, so lets do it. I've set up a custom phase
# below and specified a new set of priors that'll give the `NonLinearSearch` a much better chance if inferring the
# correct model. I've also let you know what we're changing the priors from (as initially specified by the
# `config/priors/default` config files.)
#
# we'll call our lens and source galaxies `lens` and `source` this time, for shorter more readable code.
#
# In a later tutorial, we'll cover `NonLinearSearch` approaches that are different to Dynesty, where one provides the
# non-linear search with a *starting point* where it samples parameter space. In a similar fashion to prior tuning,
# giving these searches a good starting point will increase the chances of us finding the global maxima.
lens = al.GalaxyModel(
redshift=0.5, bulge=al.lp.EllipticalSersic, mass=al.mp.EllipticalIsothermal
)
# By default, the prior on the $(y,x)$ coordinates of a `LightProfile` / `MassProfile` is a GaussianPrior with mean 0.0" and
# sigma "1.0. However, visual inspection of our strong lens image tells us that its clearly around x = 0.0" and y = 0.0",
# so lets reduce where `NonLinearSearch` looks for these parameters.
lens.bulge.centre.centre_0 = af.UniformPrior(lower_limit=-0.05, upper_limit=0.05)
lens.bulge.centre.centre_1 = af.UniformPrior(lower_limit=-0.05, upper_limit=0.05)
lens.mass.centre.centre_0 = af.UniformPrior(lower_limit=-0.05, upper_limit=0.05)
lens.mass.centre.centre_1 = af.UniformPrior(lower_limit=-0.05, upper_limit=0.05)
# By default, the elliptical components of the of our lens `Galaxy`'s elliptical `LightProfile` are `UniformPriors`
# between -1.0 and 1.0, corresponding to the full range of possible ellipses with axis-ratio from 0.0 to 1.0 and
# position angles from 0.0 to 180.0 degrees.
#
# However, looking close to the image it is clear that the lens `Galaxy`'s light is elliptical and oriented around
# 45.0 degrees counter-clockwise from the x-axis. We can update the priors on our elliptical components to reflect this.
lens.bulge.elliptical_comps.elliptical_comps_0 = af.GaussianPrior(
mean=0.333333, sigma=0.1, lower_limit=-1.0, upper_limit=1.0
)
lens.bulge.elliptical_comps.elliptical_comps_1 = af.GaussianPrior(
mean=0.0, sigma=0.1, lower_limit=-1.0, upper_limit=1.0
)
# Lets additionally assume that the `LightProfile`'s ellipticity informs us of the `MassProfile`'s ellipticity. Because
# this may not strictly be true (e.g. because of dark matter) we'll use a wider prior.
lens.mass.elliptical_comps.elliptical_comps_0 = af.GaussianPrior(
mean=0.333333, sigma=0.3, lower_limit=-1.0, upper_limit=1.0
)
lens.mass.elliptical_comps.elliptical_comps_1 = af.GaussianPrior(
mean=0.0, sigma=0.3, lower_limit=-1.0, upper_limit=1.0
)
# The effective radius of a `LightProfile` is its `half-light` radius, the radius at which 50% of its total luminosity
# is internal to a circle defined within that radius. **PyAutoLens** assumes a UniformPrior on this quantity between 0.0" and
# 4.0", but inspection of the image (again, using a colormap scaling) shows the lens`s light doesn`t extend anywhere near
# 4.0", so lets reduce it.
lens.bulge.effective_radius = af.GaussianPrior(
mean=1.0, sigma=0.8, lower_limit=0.0, upper_limit=np.inf
)
# Typically, we have knowledge of our lens `Galaxy`'s morphology. Most strong lenses are massive elliptical galaxies which
# have Sersic indexes near 4. So lets change our Sersic index from a UniformPrior between 0.8 and 8.0 to reflect this.
lens.bulge.sersic_index = af.GaussianPrior(
mean=4.0, sigma=1.0, lower_limit=0.0, upper_limit=np.inf
)
# Finally, the `ring` that the lensed source forms clearly has a radius of about 1.2". This is its Einstein radius, so
# lets change the prior from a UniformPrior between 0.0" and 4.0".
lens.mass.einstein_radius = af.GaussianPrior(
mean=1.2, sigma=0.2, lower_limit=0.0, upper_limit=np.inf
)
# In this exercise, I'm not going to change any priors on the source galaxy. Whilst lens modeling experts can look at a
# strong lens and often tell you roughly where the source-galaxy is located (in the source-plane), it is something of art
# form. Furthermore, the source's morphology can be pretty complex, making it difficult to come up with a good source prior!
source = al.GalaxyModel(redshift=1.0, bulge=al.lp.EllipticalExponential)
# We can now create this custom phase and run it. Our `NonLinearSearch` will now start by sampling higher likelihood
# regions of parameter space, given our improved and more informed priors.
# +
phase = al.PhaseImaging(
search=af.DynestyStatic(
path_prefix="howtolens", name="phase_t4_custom_priors", n_live_points=50
),
settings=settings,
galaxies=af.CollectionPriorModel(lens=lens, source=source),
)
print(
"Dynesty has begun running - checkout the workspace/output/4_dealing_with_failure"
" folder for live output of the results, images and lens model."
" This Jupyter notebook cell with progress once Dynesty has completed - this could take some time!"
)
result_custom_priors = phase.run(dataset=imaging, mask=mask)
print("Dynesty has finished run - you may now continue the notebook.")
# -
# Bam! We get a good model, which indeed corresponds to the global maxima. By giving our `NonLinearSearch` a helping hand
# and informing it of where to sample parameter space, we can increase the odds that we find the global maxima solution.
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=result_custom_priors.max_log_likelihood_fit
)
fit_imaging_plotter.subplot_fit_imaging()
# By tuning our priors to the lens we're fitting we can increase our chance of inferring the global maxima lens model.
# Before moving onto the next approach, lets think about the advantages and disadvantages of prior tuning:
#
# Advantages:
#
# - We find the maximum log likelihood solution in parameter space.
# - The phase took less time to run because the `NonLinearSearch` explored less of parameter space.
#
# Disadvantages:
#
# - If we specified a prior incorrectly the `NonLinearSearch` would begin and therefore end at an incorrect solution.
# - Our phase was tailored to this specific strong lens. If we want to fit a large sample of lenses we`d
# have to write a custom phase for every single one - this would take up a lot of our time!
# __Approach 2: Reducing Complexity__
#
# Previously, Our non-linear searched failed because we made the lens model more complex. Can we can make it less complex,
# whilst still keeping it fairly realistic? Maybe there are some assumptions we can make to reduce the number of
# lens model parameters and therefore dimensionality of non-linear parameter space?
#
# Well, we can *always* make assumptions. Below, I'm going to create a phase that assumes that light-traces-mass. That
# is, that our `LightProfile`'s centre, and elliptical components are perfectly aligned with its mass. This may, or may
# not, be a reasonable assumption, but it`ll remove 4 parameters from the lens model (the `MassProfile`'s y, x, and
# elliptical components), so its worth trying!
# +
lens = al.GalaxyModel(
redshift=0.5, bulge=al.lp.EllipticalSersic, mass=al.mp.EllipticalIsothermal
)
source = al.GalaxyModel(redshift=1.0, bulge=al.lp.EllipticalExponential)
# -
# In the pass priors function we can `pair` any two parameters by setting them equal to one another. This removes the
# parameter on the left-hand side of the pairing from the lens model such that is always assumes the same value as the
# parameter on the right-hand side.
lens.mass.centre = lens.bulge.centre
# Lets do this with the elliptical components of the light and mass profiles.
lens.mass.elliptical_comps = lens.bulge.elliptical_comps
# Again, we create this phase and run it. The `NonLinearSearch` now has a less complex parameter space to search.
# +
phase_light_traces_mass = al.PhaseImaging(
search=af.DynestyStatic(
path_prefix="howtolens", name="phase_t4_light_traces_mass", n_live_points=40
),
settings=settings,
galaxies=af.CollectionPriorModel(lens=lens, source=source),
)
print(
"Dynesty has begun running - checkout the workspace/output/4_dealing_with_failure"
" folder for live output of the results, images and lens model."
" This Jupyter notebook cell with progress once Dynesty has completed - this could take some time!"
)
result_light_trace_mass = phase_light_traces_mass.run(dataset=imaging, mask=mask)
print("Dynesty has finished run - you may now continue the notebook.")
fit_imaging_plotter = aplt.FitImagingPlotter(
fit=result_light_trace_mass.max_log_likelihood_fit
)
fit_imaging_plotter.subplot_fit_imaging()
# -
# The results look pretty good. Our source galaxy fits the data pretty well and we've clearly inferred a model that
# looks similar to the one above. However, inspection of the residuals shows that the fit wasn`t quite as good as the
# custom-phase above.
#
# It turns out that when I simulated this image light didn`t perfectly trace mass. The `LightProfile`'s elliptical
# components were (0.333333, 0.0) whereas the `MassProfile`'s were (0.25, 0.0). The quality of the fit has suffered as a
# result and the log likelihood we inferred is lower.
#
# Herein lies the pitfalls of making assumptions - they may make your model less realistic and your fits worse!
#
# Again, lets consider the advantages and disadvantages of this approach:
#
# Advantages:
#
# - By reducing parameter space`s complexity we inferred a global maximum log likelihood.
# - The phase is not specific to one lens - we could run it on many strong lens images.
#
# Disadvantages:
#
# - Our model was less realistic and our fit suffered as a result.
# __Approach 3: Look Harder__
#
# In approaches 1 and 2 we extended our `NonLinearSearch` an olive branch and helped it find the highest log likelihood
# regions of parameter space. In approach 3 ,we're going to tell it to just `look harder`.
#
# Basically, every `NonLinearSearch` has a set of parameters that govern how thoroughly it searches parameter
# space. The more thoroughly it looks, the more likely it is that it`ll find the global maximum lens model. However,
# the search will also take longer - and we don't want it to take too long to get us a result!
#
# In tutorial 7, we'll discuss non-linear searches in more detail, so we'll defer a detailed discussion of setting up
# the non-linear searches until then.
#
# lets list the advantages and disadvantages of simply adjusting the non-linear search:
#
# Advantages:
#
# - Its easy to setup, we simply change parameter of the `NonLinearSearch` like n_live_points.
#
# - It generalizes to any strong lens.
#
# - We didn`t have to make our model less realistic.
#
# Disadvantage:
#
# - Its potentially expensive. Very expensive. For very complex models, the run times can begin to take hours, days,
# weeks or, dare I say it, months!
#
# So, we can now fit strong lenses with **PyAutoLens**. And when it fails, we know how to get it to work. I hope you're
# feeling pretty smug. You might even be thinking `why should I bother with the rest of these tutorials, if I can fit
# strong a lens already`.
#
# Well, my friend, I want you to think about the last disadvantage listed above. If modeling a single lens could really
# take as long as a month, are you really willing to spend your valuable time waiting for this? I'm not, which is why I
# developed **PyAutoLens**, and in the next tutorial we'll see how we can get the best of both worlds - realistic, complex
# lens model that take mere hours to infer!
#
# Before doing that though, I want you to go over the advantages and disadvantages listed above again and think whether
# we could combine these different approaches to get the best of all worlds.
| howtolens/chapter_2_lens_modeling/tutorial_4_dealing_with_failure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Here, you will bring together everything you have learned in this course while working with data recorded from the Summer Olympic games that goes as far back as 1896! This is a rich dataset that will allow you to fully apply the data manipulation techniques you have learned. You will pivot, unstack, group, slice, and reshape your data as you explore this dataset and uncover some truly fascinating insights. Enjoy!
# # Case Study - Summer Olympics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
olympics = pd.read_csv('all_medalists.csv')
olympics.head()
# ## Using .value_counts() for ranking
# For this exercise, you will use the pandas Series method .value_counts() to determine the top 15 countries ranked by total number of medals.
#
# Notice that .value_counts() sorts by values by default. The result is returned as a Series of counts indexed by unique entries from the original Series with values (counts) ranked in descending order
# Extract the 'NOC' column from the DataFrame olympics and assign the result to country_names. Notice that this Series has repeated entries for every medal (of any type) a country has won in any Edition of the Olympics
country_names = olympics['NOC']
# Create a Series medal_counts by applying .value_counts() to the Series country_names.
medal_counts = country_names.value_counts()
medal_counts
# ## Using .pivot_table() to count medals by type
# Rather than ranking countries by total medals won and showing that list, you may want to see a bit more detail. You can use a pivot table to compute how many separate bronze, silver and gold medals each country won. That pivot table can then be used to repeat the previous computation to rank by total medals won.
#
# In this exercise, you will use .pivot_table() first to aggregate the total medals by type. Then, you can use .sum() along the columns of the pivot table to produce a new column. When the modified pivot table is sorted by the total medals column, you can display the results from the last exercise with a bit more detail.
# +
# Construct the pivot table: counted
counted = olympics.pivot_table(index='NOC', values='Athlete', columns='Medal', aggfunc='count')
# Create the new column: counted['totals']
counted['totals'] = counted.sum(axis='columns')
# Sort counted by the 'totals' column
counted = counted.sort_values('totals', ascending=False)
# Print the top 15 rows of counted
counted.head(15)
# -
# ## Applying .drop_duplicates()
# What could be the difference between the 'Event_gender' and 'Gender' columns? You should be able to evaluate your guess by looking at the unique values of the pairs (Event_gender, Gender) in the data. In particular, you should not see something like (Event_gender='M', Gender='Women'). However, you will see that, strangely enough, there is an observation with (Event_gender='W', Gender='Men').
#
# The duplicates can be dropped using the .drop_duplicates() method, leaving behind the unique observations.
# Select columns 'Event_gender' and 'Gender' and drop duplicate pairs
olympics[['Event_gender', 'Gender']].drop_duplicates()
# ## Finding possible errors with .groupby()
# You will now use .groupby() to continue your exploration. Your job is to group by 'Event_gender' and 'Gender' and count the rows.
#
# You will see that there is only one suspicious row: This is likely a data error.
olympics.groupby(['Event_gender', 'Gender']).count()
# ## Locating suspicious data
# You will now inspect the suspect record by locating the offending row.
#
# You will see that, according to the data, <NAME> was a man that won a medal in a women's event. That is a data error as you can confirm with a web search.
olympics[(olympics['Event_gender'] == 'W') & (olympics['Gender'] == 'Men')]
# ## Using .nunique() to rank by distinct sports
# You may want to know which countries won medals in the most distinct sports. The .nunique() method is the principal aggregation here. Given a categorical Series S, S.nunique() returns the number of distinct categories
# __Instructions__
# - Group olympics by 'NOC'.
# - Compute the number of distinct sports in which each country won medals. To do this, select the 'Sport' column from your groupby object and apply .nunique().
# - Sort it in descending order with .sort_values() and ascending=False.
olympics.groupby('NOC')['Sport'].nunique().sort_values(ascending = False).head(15)
# + active=""
# Well done! Interestingly, the USSR is not in the top 5 in this category, while the USA continues to remain on top. What could be the cause of this? You'll compare the medal counts of USA vs. USSR more closely in the next two exercises to find out!
# -
# ## Counting USA vs. USSR Cold War Olympic Sports
# The Olympic competitions between 1952 and 1988 took place during the height of the Cold War between the United States of America (USA) & the Union of Soviet Socialist Republics (USSR). Your goal in this exercise is to aggregate the number of distinct sports in which the USA and the USSR won medals during the Cold War years.
#
# The construction is mostly the same as in the preceding exercise. There is an additional filtering stage beforehand in which you reduce the original DataFrame medals by extracting data from the Cold War period that applies only to the US or to the USSR. The relevant country codes in the DataFrame, which has been pre-loaded as medals, are 'USA' & 'URS'.
# +
# Extract all rows for which the 'Edition' is between 1952 & 1988: during_cold_war
during_cold_war = (olympics.Edition>=1952) & (olympics.Edition<=1988)
# Extract rows for which 'NOC' is either 'USA' or 'URS': is_usa_urs
is_usa_urs = olympics.NOC.isin(['USA', 'URS'])
# Use during_cold_war and is_usa_urs to create the DataFrame: cold_war_olympics
cold_war_olympics = olympics.loc[during_cold_war & is_usa_urs]
# Group cold_war_olympics by 'NOC'
country_grouped = cold_war_olympics.groupby('NOC')
# Create Nsports
Nsports = country_grouped['Sport'].nunique().sort_values(ascending=False)
# Print Nsports
Nsports
# + active=""
# Great work! As you can see, the USSR is actually higher than the US when you look only at the Olympic competitions between 1952 and 1988!
# -
# ## Counting USA vs. USSR Cold War Olympic olympics
# For this exercise, you want to see which country, the USA or the USSR, won the most olympics consistently over the Cold War period.
#
# There are several steps involved in carrying out this computation.
#
# You'll need a pivot table with years ('Edition') on the index and countries ('NOC') on the columns. The entries will be the total number of olympics each country won that year. If the country won no olympics in a given edition, expect a NaN in that entry of the pivot table.
# You'll need to slice the Cold War period and subset the 'USA' and 'URS' columns.
# You'll need to make a Series from this slice of the pivot table that tells which country won the most olympics in that edition using .idxmax(axis='columns'). If .max() returns the maximum value of Series or 1D array, .idxmax() returns the index of the maximizing element. The argument axis=columns or axis=1 is required because, by default, this aggregation would be done along columns for a DataFrame.
# The final Series contains either 'USA' or 'URS' according to which country won the most olympics in each Olympic edition. You can use .value_counts() to count the number of occurrences of each.
# +
# Create the pivot table: olympics_won_by_country
olympics_won_by_country = olympics.pivot_table(index='Edition', columns='NOC', values='Athlete', aggfunc='count')
# Slice olympics_won_by_country: cold_war_usa_urs_olympics
cold_war_usa_urs_olympics = olympics_won_by_country.loc[1952:1988, ['USA','URS']]
# Create most_olympics
most_olympics = cold_war_usa_urs_olympics.idxmax(axis='columns')
# Print most_olympics.value_counts()
most_olympics.value_counts()
# + active=""
# Well done! Here, once again, the USSR comes out on top.
# -
# ## Visualizing USA Medal Counts by Edition: Line Plot
# Your job in this exercise is to visualize the medal counts by 'Edition' for the USA. The DataFrame has been pre-loaded for you as medals.
# %matplotlib notebook
# +
# Create the DataFrame: usa
usa = olympics[olympics.NOC == 'USA']
# Group usa by ['Edition', 'Medal'] and aggregate over 'Athlete'
usa_medals_by_year = usa.groupby(['Edition', 'Medal'])['Athlete'].count()
# Reshape usa_olympics_by_year by unstacking
usa_medals_by_year = usa_medals_by_year.unstack(level='Medal')
# Plot the DataFrame usa_olympics_by_year
usa_medals_by_year.plot()
plt.show()
# + active=""
# Great work! It's difficult to gain too much insight from this visualization, however. An area plot, which you'll construct in the next exercise, may be more helpful.
# -
# ## Visualizing USA Medal Counts by Edition: Area Plot
# As in the previous exercise, your job in this exercise is to visualize the medal counts by 'Edition' for the USA. This time, you will use an area plot to see the breakdown better. The usa DataFrame has been created and all reshaping from the previous exercise has been done. You need to write the plotting command.
# Create an area plot of usa_medals_by_year. This can be done by using .plot.area().
usa_medals_by_year.plot.area()
# ## Visualizing USA Medal Counts by Edition: Area Plot with Ordered Medals
# You may have noticed that the medals are ordered according to a lexicographic (dictionary) ordering: Bronze < Gold < Silver. However, you would prefer an ordering consistent with the Olympic rules: Bronze < Silver < Gold.
#
# You can achieve this using Categorical types. In this final exercise, after redefining the 'Medal' column of the DataFrame medals, you will repeat the area plot from the previous exercise to see the new ordering.
# +
# Redefine 'Medal' as an ordered categorical
olympics.Medal = pd.Categorical(values=olympics.Medal, categories=['Bronze', 'Silver', 'Gold'], ordered=True)
# Create the DataFrame: usa
usa = olympics[olympics.NOC == 'USA']
# Group usa by 'Edition', 'Medal', and 'Athlete'
usa_medals_by_year = usa.groupby(['Edition', 'Medal'])['Athlete'].count()
# Reshape usa_medals_by_year by unstacking
usa_medals_by_year = usa_medals_by_year.unstack(level='Medal')
# Create an area plot of usa_medals_by_year
usa_medals_by_year.plot.area()
# -
| Skill Tracks/Data Manipulation with Python/Course 2 - Manipulating DataFrames with pandas/Chapter 5 - Case Study - Summer Olympics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
# -
# # 数据读取
data_path = Path('/media/bnu/data/nlp-practice/text-generation/Winston_Churchil.txt')
with open(data_path) as f:
raw_text = f.read()
raw_text = raw_text.lower()
print('Raw Text Sample:', raw_text[:50])
print('Raw Text Length:', len(raw_text))
# +
# 对数据进行编码
char_list = sorted(list(set(raw_text)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
print('Number of Char:', len(char_list))
print('Char to Index:')
print(char_to_idx)
# -
# # 构造训练集
# +
sequence_length = 100 # 训练集中的序列长度
x_train, y_train = [], []
for i in range(len(raw_text) - sequence_length):
x_temp = [char_to_idx[c] for c in raw_text[i: i+sequence_length]]
y_temp = char_to_idx[raw_text[i+sequence_length]]
x_train.append(x_temp)
y_train.append(y_temp)
print('Train Data Sample:')
print(x_train[10])
print(y_train[10])
# +
num_samples = len(x_train)
num_vocabs = len(char_list)
# 将x_train的形状修改为LSTM所需的
x_train = np.reshape(x_train, (num_samples, sequence_length, 1))
# 对x_train的数据进行简单归一化
x_train = x_train / float(num_vocabs)
# 对y进行one-hot编码
y_train = np_utils.to_categorical(y_train)
print('X Train Shape:', x_train.shape)
print('Y Train Shape:', y_train.shape)
# -
# # 构建和训练模型
model = Sequential()
model.add(LSTM(256, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x_train, y_train, epochs=50, batch_size=4096, validation_split=0.1)
# # 模型预测
# +
def predict_next(input_array):
"""根据输入的文本特征预测下一个字符"""
test_data = np.reshape(input_array, (1, sequence_length, 1))
test_data = test_data / float(num_vocabs)
return model.predict(test_data)
def string_to_index(input_string):
"""将文本中末尾的字符转换为特征"""
return [char_to_idx[c] for c in input_string[-sequence_length:]]
def pred_to_char(pred):
"""根据预测值获取字符"""
return idx_to_char[pred.argmax()]
def generate_text(init_string, steps=200):
"""根据初始字符串生成文本"""
result = init_string.lower()
for i in range(steps):
c = pred_to_char(predict_next(string_to_index(result)))
result += c
return result
# -
init_string = 'His object in coming to New York was to engage officers for that service. He came at an opportune moment'
result = generate_text(init_string)
print(result)
| notebooks/exercises/Keras_CharRNN_TextGeneration.ipynb |