code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import xgboost
import pandas as pd
import sklearn
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from joblib import dump
pwd
df = pd.read_csv("/Users/yonipineda/Data-Science/airbnb_api/transformed_df.csv")
df.head()
df.isnull().sum()
# split into features matrix and target vector
X = df.drop('price', axis=1)
y = df.price
# train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=14)
model = make_pipeline(
ce.OrdinalEncoder(),
RandomForestRegressor(n_estimators=100, random_state=42, n_jobs=-1)
)
model.fit(X_train, y_train)
fin = model.predict(X_test)
import pickle
with open('regressor.pkl', 'wb') as f:
pickle.dump(fin, f)
| RandomForestRegressor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# +
from torchvision import transforms
from PIL import Image
import os,glob
from myfuc import *
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# !pwd
# +
# bmp to np array.
plist=glob.glob("maetorch/imgs_resize/*")
# target_dir="/home/yy/kamome/PPWBv3.1/monoPGM/"
# os.makedirs(target_dir, exist_ok=True)
for p in plist[:3]:
# LL,LH,LI=makeWT97(p)
LI=makeWT97(p)
break
plt.imshow(LI,"gray")
# -
# Image.open(p)
LI.shape
# fwt97(LI,112,224)
fwt97(LI,224,112)
# +
a1 = -1.586134342
a2 = -0.05298011854
a3 = 0.8829110762
a4 = 0.4435068522
# Scale coeff:
k1 = 0.81289306611596146 # 1/1.230174104914
k2 = 0.61508705245700002 # 1.230174104914/2
# -
width,height=[224,112]
s=LI.copy()
for col in range(width): # Do the 1D transform on all cols:
# Predict 1. y1
for row in range(1, height-1, 2):
s[row][col] += a1 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a1 * s[height-2][col] # Symmetric extension
# Update 1. y0
for row in range(2, height, 2):
s[row][col] += a2 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a2 * s[1][col] # Symmetric extension
# Predict 2.
for row in range(1, height-1, 2):
s[row][col] += a3 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a3 * s[height-2][col]
# Update 2.
for row in range(2, height, 2):
s[row][col] += a4 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a4 * s[1][col]
def fwt97_2d(m, nlevels=1):
h,w = m.shape
for i in range(nlevels):
print(type(w),i)
m = fwt97(m, w, h) # cols
m = fwt97(m, w, h) # rows
w /= 2
h /= 2
return m
[[0]*2 for i in range(3)],np.empty((3,2))
p0=Image.open(p)
p0=np.array(p0).astype(np.int)
plt.imshow(p0,"gray")
d=224
fwt97(p0,d,d)
| wt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prédiction de loyers
# On souhaite estimer le prix des loyers à partir des données surface et arrondissement. Le prix est une valeur continue donc on utilise uniquement des alorithmes de régression:
#
# * Régression linéaire avec 1 feature (surface)
# * Régression linéaire avec 2 features (surface + arrondissement)
# * Régression polynomiale avec 2 features
# * Régression avec k-nearest neighbors
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# ## Chargement des données
house_data_source = pd.read_csv('house_data.csv')
print(house_data_source.shape)
house_data_source.head()
# ## Nettoyage des données
# Les lignes avec des valeurs manquantes ne sont pas prises en compte
house_data = house_data_source.dropna(axis=0, how='any')
print(house_data.shape)
# ## Premières observations des données
sns.pairplot(data=house_data, hue="arrondissement");
# ## Séparation training/testing set
#
# On utilise la fonction fournie par sklearn.
# +
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# pour les features, on prend surface et l'arrondissement (tout sauf le prix)
data = house_data.drop('price', axis=1)
# la variable à prédire est le prix
target = house_data['price']
xtrain_source, xtest_source, ytrain_source, ytest_source = train_test_split(data, target, test_size=0.2)
[print(x.shape) for x in [xtrain_source, xtest_source, ytrain_source, ytest_source]]
error = {}
pass
# -
# # Régression linéaire avec une seule feature
#
# Il s'agit du même modèle qu'initialement proposé dans l'exercice. Il est repris à titre de comparaison uniquement.
# on limite à une seule feature
xtrain = xtrain_source.copy().drop('arrondissement', axis=1)
xtest = xtest_source.copy().drop('arrondissement', axis=1)
ytrain = ytrain_source.copy()
ytest = ytest_source.copy()
# ## Calcul de la régression
regr = linear_model.LinearRegression()
regr.fit(xtrain, ytrain)
# ## Taux d'erreur
error['Linear Regression w/ 1 feature'] = 1 - regr.score(xtest, ytest)
print('Erreur: %f' % error['Linear Regression w/ 1 feature'])
# +
fig = plt.figure(figsize=(8,5))
ax = plt.axes(facecolor='#f3f3f3')
plt.grid(color='w', linestyle='dashed')
plt.scatter(xtrain['surface'], ytrain, alpha = .8, marker = '+', label='Données entraînement')
plt.scatter(xtest['surface'], ytest, alpha = .8, marker = 'o', label='Données de test')
plt.plot([0,400], regr.predict([[0],[400]]), color='red', linestyle='dotted', label='Regression linéaire')
plt.title("Loyers vs. Surface")
plt.legend(loc='best')
ax = ax.set(xlabel='Surface', ylabel='Loyer')
# -
# # Amélioration 1 : Régression linéaire avec deux features
#
# On utilise cette fois les deux features surface et arrondissement
xtrain = xtrain_source.copy()
xtest = xtest_source.copy()
ytrain = ytrain_source.copy()
ytest = ytest_source.copy()
# ## Calcul de la régression
regr_2_features = linear_model.LinearRegression()
regr_2_features.fit(xtrain, ytrain)
# # Taux d'erreur
error['Linear Regression w/ 2 features'] = 1 - regr_2_features.score(xtest, ytest)
print('Erreur: %f' % error['Linear Regression w/ 2 features'])
# Il est meilleur qu'avec une seule feature
# # Amélioration 2 : Régression polynomiale avec deux features
#
# On utilise toujours les deux features. On essaie de faire correspondre un polynôme de degré > 1 aux données. Le degré du polynôme utilisé pour la régression est un hyperparamètre
# on recrée une copie de la dataframe à chaque fois
xtrain = xtrain_source.copy()
xtest = xtest_source.copy()
ytrain = ytrain_source.copy()
ytest = ytest_source.copy()
# ## Calcul de la régression
# On effectue un premier test de régression avec un degré 2 pour valider le modèle
# +
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
model = Pipeline([('poly', PolynomialFeatures(degree=2)),
('linear', linear_model.LinearRegression(fit_intercept=False))])
model.fit(xtrain, ytrain)
# -
# # Taux d'erreur
error['Polynomial Regression degree 2 w/ 2 features'] = 1 - model.score(xtest, ytest)
print('Erreur: %f' % error['Polynomial Regression degree 2 w/ 2 features'])
# Il est meilleur qu'avec une seule feature ou qu'avec une régression linéaire
# ## Variation du degré
# +
xtrain = xtrain_source.copy()
xtest = xtest_source.copy()
ytrain = ytrain_source.copy()
ytest = ytest_source.copy()
errors_per_degree = []
degrees = range(1,7)
for degree in degrees:
model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
('linear', linear_model.LinearRegression(fit_intercept=False))])
errors_per_degree.append(100*(1 - model.fit(xtrain, ytrain).score(xtest, ytest)))
fig = plt.figure(figsize=(8,5))
ax = plt.axes(facecolor='#f3f3f3')
plt.grid(color='w', linestyle='dashed')
plt.plot(degrees, errors_per_degree, label='Erreur')
plt.title("Taux d'erreur vs degré de regression")
plt.legend(loc='best')
ax.set(xlabel='Degré du polynome', ylabel='Erreur')
ax = ax.set_yscale('log')
plt.show()
# -
# ## Optimisation du modèle
# On observe les résultats obtenus en fonction du degré du polynôme
pd.DataFrame(data={'degree': degrees, 'error_rate': errors_per_degree})
# +
import operator
min_index, min_value = min(enumerate(errors_per_degree), key=operator.itemgetter(1))
error['Polynomial Regression degree %d w/ 2 features' % (min_index+1)] = min_value / 100
print('Erreur: %f' % error['Polynomial Regression degree %d w/ 2 features' % (min_index+1)])
# -
# C'est le meilleur résultat obtenu pour l'instant
# # Amélioration 3 : Régression k-NN
# On utilise toujours les deux features. On essaie d'utiliser le modèle k-nearest-neighbors dans sa version "regression". Le nombre de voisins utilisé pour le k-NN est un hyperparamètre.
# ## Variation du modèle
# On fait varier k pour optimiser le modèle
# +
from sklearn.neighbors import KNeighborsRegressor
xtrain = xtrain_source.copy()
xtest = xtest_source.copy()
ytrain = ytrain_source.copy()
ytest = ytest_source.copy()
errors_per_neighbor_number = []
hyper_params = range(2,15)
for k in hyper_params:
knn = KNeighborsRegressor(k)
errors_per_neighbor_number.append(100*(1 - knn.fit(xtrain, ytrain).score(xtest, ytest)))
fig = plt.figure(figsize=(8,5))
ax = plt.axes(facecolor='#f3f3f3')
plt.grid(color='w', linestyle='dashed')
plt.plot(hyper_params, errors_per_neighbor_number, label='Erreur')
plt.title("Taux d'erreur vs nombre de voisins")
plt.legend(loc='best')
ax.set(xlabel='Nombre de voisins', ylabel='Erreur')
ax = ax.set_yscale('linear')
plt.show()
# -
# ## Optimisation du modèle
# On observe les résultats obtenus en fonction du nombre de voisins
pd.DataFrame(data={'neighbors': hyper_params, 'error_rate': errors_per_neighbor_number})
# +
import operator
min_index, min_value = min(enumerate(errors_per_neighbor_number), key=operator.itemgetter(1))
error['k-NN regressor (k=%d) w/ 2 features' % (min_index+2)] = min_value / 100
print('Erreur: %f' % error['k-NN regressor (k=%d) w/ 2 features' % (min_index+2)])
# -
# Le résultat est assez proche de la régression polynomiale
# # Conclusion
#
# Voici les taux d'erreur obtenus pour chaque méthode
s = pd.Series(error, name='Error')
s.index.name = 'Model'
s.reset_index()
# Compte tenu des résultats, je choisis la régression polynomiale avec 2 features.
# ## Améliorations possibles
# * Supprimer les données aberrantes (outliers)
# * Réaliser une régression linéaire par arrondissement
| Loyers++/Loyers++.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# +
# Create an IAM role called [SageMakerRole] with AmazonSageMakerFullAccess and AmazonEC2ContainerRegistryFullAccess manually
# Add the IAM role of the notebook with AmazonEC2ContainerRegistryFullAccess mannually
# -
# # Create MXNet Container
# Create the binary
# !git clone https://github.com/aws/sagemaker-mxnet-container.git
# !cd sagemaker-mxnet-container
# !git checkout v3.1.2
# !python setup.py sdist
# MXNet 1.4.1, Python 3, CPU
# !cp dist/sagemaker_mxnet_container-3.1.2.tar.gz docker/1.4.1/py3/sagemaker_mxnet_container.tar.gz
# !cd docker/1.4.1/py3/
# !docker build -t preprod-mxnet:1.4.1-cpu-py3 -f Dockerfile.cpu .
# !cd ../../../
# !pip install -e .[test]
# May fail
# !tox test/unit
# May fail
# !tox -- test/integration/local --docker-base-name preprod-mxnet \
# --tag 1.4.1-cpu-py3 \
# --py-version 3 \
# --framework-version 1.4.1 \
# --processor cpu
# upload container to ECS
# create-repository in ECR mannually
# !$(aws ecr get-login --region ap-northeast-1 --no-include-email)
# !docker tag preprod-mxnet:1.4.1-cpu-py3 579019700964.dkr.ecr.ap-northeast-1.amazonaws.com/preprod-mxnet:1.4.1-cpu-py3
# !docker push 579019700964.dkr.ecr.ap-northeast-1.amazonaws.com/preprod-mxnet:1.4.1-cpu-py3
# May fail
# !tox -- test/integration/sagemaker --aws-id 579019700964 \
# --docker-base-name preprod-mxnet \
# --instance-type ml.m4.xlarge \
# --tag 1.4.1-cpu-py3
# # Create TextClassification Train Docker
# +
# !cd gluon-nlp/model_zoo/text_classification/
# !aws s3 cp --recursive s3://app-mia/mytask/ .
# create train
# create hyperparameters.json
# create inputdataconfig.json
# create Dockerfile.train
chmod +x build_and_push.sh
./build_and_push.sh text-classification-train train
# !docker run text_classification_train train
# -
# # Test TextClassification Train Docker
# +
import boto3
client = boto3.client('sts')
account = client.get_caller_identity()['Account']
my_session = boto3.session.Session()
region = my_session.region_name
algorithm_name = 'text-classification-train'
ecr_image = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, algorithm_name)
print(ecr_image)
# +
import json
from sagemaker.estimator import Estimator
from sagemaker import get_execution_role, session
role = get_execution_role()
hyperparameters = json.load(open('/home/ec2-user/SageMaker/gluon-nlp/model_zoo/text_classification/hyperparameters.json', 'r'))
bucket = 'app-mia'
s3_train_data = 's3://{}/{}'.format(bucket, 'mytask/train.tsv')
s3_validation_data = 's3://{}/{}'.format(bucket, 'mytask/dev.tsv')
train_data = session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
validation_data = session.s3_input(s3_validation_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
instance_type = 'ml.m4.xlarge'
estimator = Estimator(role=role,
train_instance_count=1,
train_instance_type=instance_type,
image_name=ecr_image,
hyperparameters=hyperparameters)
estimator.fit(data_channels) # data_channels
# -
# # Create MXNet Serving Container
# !git clone https://github.com/aws/sagemaker-mxnet-serving-container.git
# !cd sagemaker-mxnet-serving-container
# !git checkout v1.1.3
# !python setup.py sdist
# !cp dist/sagemaker_mxnet_serving_container-1.1.3.tar.gz docker/1.4.1/py3/sagemaker_mxnet_serving_container.tar.gz
# !cd docker/1.4.1/py3/
# !docker build -t preprod-mxnet-serving:1.4.1-cpu-py3 -f Dockerfile.cpu .
# !cd ../../../
# !pip install -e .[test]
# !tox test/unit
# May fail
# !tox test/integration/local -- --docker-base-name preprod-mxnet-serving \
# --tag 1.4.1-cpu-py3 \
# --py-version 3 \
# --framework-version 1.4.1 \
# --processor cpu
# upload container to ECS
# create-repository in ECR mannually
# !$(aws ecr get-login --region ap-northeast-1 --no-include-email)
# !docker tag preprod-mxnet-serving:1.4.1-cpu-py3 579019700964.dkr.ecr.ap-northeast-1.amazonaws.com/preprod-mxnet-serving:1.4.1-cpu-py3
# !docker push 579019700964.dkr.ecr.ap-northeast-1.amazonaws.com/preprod-mxnet-serving:1.4.1-cpu-py3
# May fail
# !tox test/integration/sagemaker -- --aws-id 579019700964 \
# --docker-base-name preprod-mxnet-serving \
# --instance-type ml.m4.xlarge \
# --tag 1.4.1-cpu-py3
# # Create TextClassification Serving Docker
# +
# !cd gluon-nlp/model_zoo/text_classification/
# create serve
# create Dockerfile.serve
chmod +x build_and_push.sh
./build_and_push.sh text-classification-serve serve
# !docker run text_classification_serve serve
| advanced_functionality/gluonnlp_byom/gluonnlp_byom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Author: <NAME> (<EMAIL>) - September 9, 2021
# # Data Analysis of COVID-19 in the World and ASEAN
# ### Data Loading
# Import libraries
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
# +
# Import dataset
covid19_url = "https://dqlab.id/data/covid19_worldwide_2020.json"
covid19 = pd.read_json(covid19_url)
print("Dataset size: %d columns dan %d rows.\n" % covid19.shape)
print("Top 5 data:\n",covid19.head(5))
# -
# ### Reformat Data Frame
print("Information of initial data frame :")
covid19.info()
# `date` column already of type numpy.datetime64[ns], then this column can be set as index for **covid19** data frame.
# +
covid19 = covid19.set_index("date").sort_index()
print("\nInformation of data frame after reformatting:")
covid19.info()
# -
# ### Handling Missing Values
# Next, we will eliminate rows from data that are detected have missing values.
# +
print("The number of Missing Values for each columns:")
print(covid19.isna().sum())
covid19.dropna(inplace=True)
print("\nThe number of Missing Values for each columns after imputation:")
print(covid19.isna().sum())
# -
# ### Countries Data Loading
countries_url = "https://dqlab.id/data/country_details.json"
countries = pd.read_json(countries_url)
print(countries.head())
# ### Merge Covid19 Data dan Countries Data
covid_merge = pd.merge(covid19.reset_index(), countries, on="geo_id").set_index("date")
print(covid_merge.head())
# Note: **covid19** data frame has an index on the `date` column, so it needs .reset_index()). After the merge, the index can be set back to the `date` column.
# ### Calculating Fatality Ratio
# Fatality ratio can be calculated by dividing between the `deaths` and `confirmed_cases` columns.
covid_merge["fatality_ratio"] = covid_merge["deaths"]/covid_merge["confirmed_cases"]
print(covid_merge.head())
# ### Countries with the Highest Fatality Ratio
top_20_fatality_rate = covid_merge.sort_values(by='fatality_ratio', ascending=False).head(20)
print(top_20_fatality_rate[["geo_id","country_name","fatality_ratio"]])
# ### The Highest Fatality Ratio in August 2020
# Number of cases in august
covid_merge_august = covid_merge.loc["2020-08"].groupby("country_name").sum()
# Calculating fatality ratio in august
covid_merge_august["fatality_ratio"] = covid_merge_august["deaths"]/covid_merge_august["confirmed_cases"]
# Countries with the highest fatality ratio in august
top_20_fatality_rate_on_august = covid_merge_august.sort_values(by="fatality_ratio", ascending=False).head(20)
print("Countries with the Highest Fatality Ratio in August 2020:\n",top_20_fatality_rate_on_august["fatality_ratio"])
# ### Visualization of the Country with the Highest Fatality Ratio in August 2020
# Import libraries
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(8,8))
top_20_fatality_rate_on_august["fatality_ratio"].sort_values().plot(kind="barh", color="steelblue")
plt.title("Top 20 Highest Fatality Rate Countries", fontsize=18, color="k")
plt.xlabel("Fatality Rate", fontsize=14)
plt.ylabel("Country Name", fontsize=14)
plt.grid(axis="x")
plt.tight_layout()
plt.show()
# It can be seen that Yemen has the largest fatality ratio compared to other countries in August 2020.
# ### COVID-19 Case in ASEAN
# +
asean_country_id = ["ID", "MY", "SG", "TH", "VN"]
filter_list = [(covid_merge["geo_id"]==country_id).to_numpy() for country_id in asean_country_id]
filter_array = np.column_stack(filter_list).sum(axis=1, dtype="bool")
covid_merge_asean = covid_merge[filter_array].sort_index()
print("Check unique value in column 'country_name':", covid_merge_asean["country_name"].unique())
print(covid_merge_asean.head())
# -
# ### When was the First Case of COVID-19 Popped Up in ASEAN?
print("The first case popped up in ASEAN countries:")
for country_id in asean_country_id:
asean_country = covid_merge_asean[covid_merge_asean["geo_id"]==country_id]
first_case = asean_country[asean_country["confirmed_cases"]>0][["confirmed_cases","geo_id","country_name"]]
print(first_case.head(1))
# ### Covid-19 cases in March 2020
covid_merge_asean_march_onward = covid_merge_asean[covid_merge_asean.index>="2020-03-01"]
print(covid_merge_asean_march_onward.head())
# ### Visualization of COVID-19 Cases in ASEAN
plt.figure(figsize=(16,8))
sns.lineplot(data=covid_merge_asean_march_onward,
x=covid_merge_asean_march_onward.index,
y="confirmed_cases",
hue="country_name",
linewidth=2)
plt.xlabel('Record Date', fontsize=14)
plt.ylabel('Total Cases', fontsize=14)
plt.title('Comparison of COVID19 Cases in 5 ASEAN Countries', color="k", fontsize=18)
plt.grid()
plt.tight_layout()
plt.show()
# It can be seen that Indonesia has experienced a large increase in Covid-19 cases compared to other ASEAN countries. It is also seen that the country that has the lowest increase in the number of Covid-19 cases are Vietnam and Thailand.
| Data Analysis of COVID-19 in the World and ASEAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import all required libraries
import pandas as pd
from pandas import *
import numpy
from datetime import datetime
# Initialize values
env = None
sample_size = None
# Set env, if env = test, will only be run locally and display the result
env = "prod"
env = "test"
nb_models = 1
# Number of value on which to train, if null, train on all value
sample_size = None
test_size = 1000
# +
# Read training data + test data
df_data_input = pandas.read_json("../input/train.json")
df_test_input = pandas.read_json("../input/test.json")
df_sample_input = pandas.read_csv("../input/sample_submission.csv")
print("df_data contais {0} elements".format(df_data_input.count()))
# Display basic information
display(df_data_input.head(3))
display(df_test_input.head(3))
display(df_sample_input.head(2))
# +
if env == "test":
if sample_size is not None and sample_size > 0:
df_data = df_data_input.sample(sample_size)
else:
df_data = df_data_input
df_test = df_data_input.sample(test_size)
# Removing all df_test from df_data to ensure not train with test data
df_common = df_data.merge(df_test,on=['id'])
#display(df_common)
df_data = df_data[(~df_data.id.isin(df_common.id))]
else:
# set that to some default value
df_test = df_test_input
df_test['cuisine'] = "todo"
if sample_size is not None and sample_size > 0:
df_data = df_data_input.sample(sample_size)
else:
df_data = df_data_input
print("df_data contais {0} elements".format(df_data.count()))
# Display basic information
display(df_data.head(3))
display(df_test.head(3))
# +
# Extract columns values of ingredients to multiple columns with boolean
import re
def preprocess_dataframe(df1, df2):
count1 = len(df1)
count2 = len(df2)
print("count1: {0}".format(count1))
print("count2: {0}".format(count2))
total_df = df1.append(df2, ignore_index=True)
# Remove values from ingredients
print("Removing wrong values")
#total_df['ingredients'] = total_df['ingredients'].map(lambda x: re.sub(r'[0-9]+', '', x))
print("end removing wrong values")
d_list = []
for index, row in total_df.iterrows():
for value in row['ingredients']:
#print(value)
new_value = re.sub(r'[0-9]+', '', value)
new_value = new_value.replace('s', '')
d_list.append({'id':row['id'],
'value':new_value})
total_df = total_df.append(d_list, ignore_index=True)
total_df = total_df.groupby('id')['value'].value_counts()
total_df = total_df.unstack(level=-1).fillna(0)
# Remove all values present less than 5 times
# Then, we need to merge df_1 and df_2 with their id
df1 = df1.merge(total_df, left_on='id', right_on='id', how='inner')
df2 = df2.merge(total_df, left_on='id', right_on='id', how='inner')
# We do not need the ingredients column now, so, we can remove it
df1 = df1.drop(columns=['ingredients'])
df2 = df2.drop(columns=['ingredients'])
return df1, df2
df_data_1, df_test_1 = preprocess_dataframe(df_data, df_test)
def category_to_number(df):
df["code_cuisine"] = df.cuisine.astype("category").cat.codes
mapping = df[["cuisine", "code_cuisine"]]
return df, mapping.drop_duplicates()
#df_data_1, code_mapping = category_to_number(df_data_1)
#df_test_1, empty = category_to_number(df_test_1)
display(df_data.head(5))
display(df_data_1.head(5))
# +
# Generate our training/validation datasets
from sklearn import model_selection
# Name of the result column
result_cols = ['cuisine']
result_excl_cols = ['cuisine_']
input_cols = [
''
]
input_excl_cols = ['ingredients', 'cuisine', 'code_cuisine']
# Removing input_cols = ['store', 'item',
# dom, cw,
# Train on everything
# Get the final values
def get_values(df, cols=[], excl_cols = []):
columns = df.columns.values
# Remove all columns that are not inside the list
cols_to_drop = []
for column in columns:
find = False
ignore = False
for excl_col in excl_cols:
if column.startswith(excl_col):
ignore = True
if ignore is False:
for col in cols:
if column.startswith(col):
find = True
if not find:
cols_to_drop.append(column)
print("dropping columns")
df = df.drop(columns=cols_to_drop)
print("end dropping columns")
new_order = sorted(df.columns.values)
# Same order for both training and testing set
df = df[new_order]
return df.values
X_train = get_values(df_data_1, input_cols, input_excl_cols)
X_test = get_values(df_test_1, input_cols, input_excl_cols)
Y_train = get_values(df_data_1, result_cols, result_excl_cols).ravel()
# In test env, we calculate it for the test only
if env == "test":
Y_test = get_values(df_test_1, result_cols, result_excl_cols).ravel()
# +
# Normalize the data
X_all = [x + y for x, y in zip(X_train, X_test)]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Don't cheat - fit only on training data
# Def adding x_train + X_test + X_validation to fit all of them
scaler.fit(X_train )
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# -
# Custom function to calculate the score
def get_score(Y_validation, Y_validation_predict):
nb_success = 0
for i in range(0, len(Y_validation)):
#print("value1 {0} = {1}".format(Y_validation[i], Y_validation_predict[i]))
if Y_validation[i] == Y_validation_predict[i]:
nb_success += 1
return nb_success / len(Y_validation) * 100
# +
# Import algorithm
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
import lightgbm as lgbm
models = []
#models.append(('LogisticRegression', LogisticRegression()))
#models.append(('KNeighborsClassifier', KNeighborsClassifier()))
#models.append(('LinearDiscriminantAnalysis', LinearDiscriminantAnalysis()))
#models.append(('GaussianNB', GaussianNB()))
#models.append(('SVC', SVC()))
for i in range(5, 5 + nb_models):
# training with early stop
models.append(('lgbm_{0}'.format(i),
lgbm.sklearn.LGBMClassifier(boosting_type= 'gbdt',
random_state=i*10000,
#objective = 'binary',
#silent = True,
#max_depth = -1,
#min_data_in_leaf=1,
#max_bin = 512,
#subsample_for_bin = 200,
#subsample = 1,
#subsample_freq = 1,
#min_split_gain = 0.5,
#min_child_weight = 1,
#min_child_samples = 5,
#scale_pos_weight = 1,
# Updated from 'nthread'
n_jobs = 7 )
))
# High value until first model get solved
best_model = "UNKNOWN"
res = []
# Testing all models, one by one
for name, model in models:
print("Executing for model {0}".format(name))
time_start = datetime.now()
# Training the model
model.fit(X_train, Y_train)
print("Finish fit for {0}".format(name))
Y_test_result = model.predict(X_test)
res.append(Y_test_result)
if env == "test":
# We can calculate the avg error
score = get_score(Y_test, Y_test_result)
print("Model {0} got score of {1}, time: {2}".format(name, score, datetime.now() - time_start))
else:
# Let's write an output file, with the name of the model
print("Writing output file {0}.csv for model {0}".format(name))
df_test['cuisine'] = Y_test_result
result_df = df_test[['id', 'cuisine']]
result_df['cuisine'] = Y_test_result
result_df.to_csv("{0}.csv".format(name), index=False)
# +
# For all result in res, if test, display the result, if not, write it to a file
final_res = []
nb_variable = len(res[0])
for variable in range(0, nb_variable):
final_res.append(0.0)
dict_cuisine = {}
for i in range(0, len(res)):
cuisine_found = res[i][variable]
if cuisine_found in dict_cuisine:
dict_cuisine[cuisine_found] += 1
else:
dict_cuisine[cuisine_found] = 1
# Now, we need to find the most common one for all the values inside dict_cuisine
current_value = 0
current_cuisine = ""
for cuisine in dict_cuisine:
if dict_cuisine[cuisine] > current_value:
current_cuisine = cuisine
current_value = dict_cuisine[cuisine]
final_res[variable] = current_cuisine
if env == "test":
# We can calculate the avg error
score = get_score(Y_test, final_res)
print("avg model got score of {0}".format(score))
else:
print("Writing output file merged.csv".format(name))
df_test['cuisine'] = final_res
result_df = df_test[['id', 'cuisine']]
result_df['cuisine'] = final_res
result_df.to_csv("merged.csv".format(name), index=False)
| whatscooking/notebook/lgbm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab: Transfer Learning
#
# Welcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers.
#
# ### GPU usage
# In our previous examples in this lesson, we've avoided using GPU, but this time around you'll have the option to enable it. You do not need it on to begin with, but make sure anytime you switch from non-GPU to GPU, or vice versa, that you save your notebook! If not, you'll likely be reverted to the previous checkpoint.
#
# We also suggest only using the GPU when performing the (mostly minor) training below - you'll want to conserve GPU hours for your Behavioral Cloning project coming up next!
# +
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights. We are setting up our model here.
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
"""incepton is equivalent to our model variable that we had been using till now"""
# -
# We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).
#
# In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer.
#
# ### Pre-trained with frozen weights
# To start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).
#
# You can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
for layer in inception.layers: #our model is inception here
layer.trainable = False
# ### Dropping layers
# You can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
inception.summary()
# In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:
#
# ```
# inception.layers.pop()
# ```
#
# Note that `pop()` works from the end of the model backwards.
# It's important to note two things here:
# 1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.
# 2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work.
# ### Adding new layers
#
# Now, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).
#
# For example, if you had a previous layer named `inp`:
# ```
# x = Dropout(0.2)(inp)
# ```
# is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.
#
# We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
# +
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
"""Last step mein ham mostly input layer ke aage model ko append kar rahe hain"""
# +
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
out1 = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
out2 = Dense(512, activation='relu')(out1)
predictions = Dense(10, activation='softmax')(out2)
# -
# We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
# +
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
# -
# Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.
#
# Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks.
#
# ### Keras Callbacks
# Keras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.
#
# There's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.
#
# To set these callbacks, you could do the following:
# ```
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# ```
# This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.
# ```
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
# ```
# This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.
#
#
# You still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):
# ```
# model.fit(callbacks=[checkpoint, stopper])
# ```
# ## GPU time
#
# The rest of the notebook will give you the code for training, so you can turn on the GPU at this point - but first, **make sure to save your jupyter notebook**. Once the GPU is turned on, it will load whatever your last notebook checkpoint is.
#
# While we suggest reading through the code below to make sure you understand it, you can otherwise go ahead and select *Cell > Run All* (or *Kernel > Restart & Run All* if already using GPU) to run through all cells in the notebook.
# +
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:10000]
y_one_hot_train = y_one_hot_train[:10000]
X_val = X_val[:2000]
y_one_hot_val = y_one_hot_val[:2000]
# -
# You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
# +
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# -
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
# As you may have noticed, CIFAR-10 is a fairly tough dataset. However, given that we are only training on a small subset of the data, only training for five epochs, and not using any image augmentation, the results are still fairly impressive!
#
# We achieved ~70% validation accuracy here, although your results may vary.
# ## [Optional] Test without frozen weights, or by training from scratch.
#
# Since the majority of the model was frozen above, training speed is pretty quick. You may also want to check out the training speed, as well as final accuracy, if you don't freeze the weights. Note that this can be fairly slow, so we're marking this as optional in order to conserve GPU time.
#
# If you do want to see the results from doing so, go back to the first code cell and set `freeze_flag` to `False`. If you want to completely train from scratch without ImageNet pre-trained weights, follow the previous step as well as setting `weights_flag` to `None`. Then, go to *Kernel > Restart & Run All*.
# ## Comparison
#
# So that you don't use up your GPU time, we've tried out these results ourselves as well.
#
# Training Mode | Val Acc @ 1 epoch | Val Acc @ 5 epoch | Time per epoch
# ---- | :----: | :----: | ----:
# Frozen weights | 65.5% | 70.3% | 50 seconds
# Unfrozen weights | 50.6% | 71.6% | 142 seconds
# No pre-trained weights | 19.2% | 39.2% | 142 seconds
#
# From the above, we can see that the pre-trained model with frozen weights actually began converging the fastest (already at 65.5% after 1 epoch), while the model re-training from the pre-trained weights slightly edged it out after 5 epochs.
#
# However, this does not tell the whole story - the training accuracy was substantially higher, nearing 87% for the unfrozen weights model. It actually began overfit the data much more under this method. We would likely be able to counteract some of this issue by using data augmentation. On the flip side, the model using frozen weights could also have been improved by actually only freezing a portion of the weights; some of these are likely more specific to ImageNet classes as it gets later in the network, as opposed to the simpler features extracted early in the network.
#
# _(My notes: Selective freezing could be done in the loop that we wrote to disable all layers so that weights are not trained)._
#
# ### The Power of Transfer Learning
# Comparing the last line to the other two really shows the power of transfer learning. After five epochs, a model without ImageNet pre-training had only achieved 39.2% accuracy, compared to over 70% for the other two. As such, pre-training the network has saved substantial time, especially given the additional training time needed when the weights are not frozen.
#
# There is also evidence found in various research that pre-training on ImageNet weights will result in a higher overall accuracy than completely training from scratch, even when using a substantially different dataset.
| Transfer_Learning_Lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # PSF normalization
#
# Let us assume that we have reduced an observation, for which we have determined the PSF by stacking the flux of point-like sources. The PSF we obtain will not be as high S/N as the instrumental PSF that has been determined by the instrument team. Moreover, it is likely to be fattened due to the some small pointing errors. We need to find out what fraction of a point-like flux the PSF we have determined represent. In order to do this, we use the growth curve of the theoretical PSF that has been determine by the instrument team, and compare it to the growth curve we determine from our PSF..
#
#
#
# import what we will need.
# %matplotlib inline
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.io import ascii as asciiread
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy import special
from scipy import signal
from scipy import fftpack
# ## 2) Real data: MIPS observations
#
# We will look at a real stack of point sources in the MIPS ELAIS-S1 observations, and try to find its normalization factor.
#
# Let's load the stacked PSF:
stackhd = fits.open('../data/ELAIS-S1/output_data/psf_hires.fits')
psf = stackhd[1].data
hd = stackhd[1].header
cpix=np.int((hd['NAXIS1']+1)/2.0)
rad=40
plt.imshow(psf[cpix-rad-1:cpix+rad,cpix-rad-1:cpix+rad])
plt.colorbar()
resol= 0.5
# ## Read in MIPS 24 $\mathrm{\mu m}$ Instrumental PSF
# We take the instrumental PSF from [<NAME>'s webpage](http://www.astro.princeton.edu/~ganiano/Kernels/Ker_2017/PSF_FITS_Files/)
insthd = fits.open('../../dmu26/data/ELAIS_N1/MIPS/PSF_Original_MIPS_24.fits.gz')
psf_inst_full = insthd[0].data
hdinst = insthd[0].header
hdinst
# +
rad=1000
cpix=np.int((hdinst['NAXIS1']+1)/2.0)
plt.imshow(psf_inst_full[cpix-rad-1:cpix+rad,cpix-rad-1:cpix+rad])
#psf_inst=psf_inst_full[hdinst['CRPIX1']-rad-1:hdinst['CRPIX1']+rad,hdinst['CRPIX2']-rad-1:hdinst['CRPIX2']+rad]
psf_inst=psf_inst_full[cpix-rad-1:cpix+rad,cpix-rad-1:cpix+rad]
# -
# ### Normalise instrumental PSF such that integral=1
psf_inst=psf_inst/(np.sum(psf_inst))
# Now let's build the growthcurve for our PSFs.
# find the brightest pixel, it will be our center.
jmax, imax = np.unravel_index(np.argmax(psf), psf.shape)
jmax_inst, imax_inst = np.unravel_index(np.argmax(psf_inst), psf_inst.shape)
# +
# build the array of coordinates
x = np.arange(hd['NAXIS1'])
y = np.arange(hd['NAXIS2'])
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
xp = (xv-imax)*np.abs(hd['CDELT1'])*3600.
yp = (yv-jmax)*np.abs(hd['CDELT2'])*3600.
r = np.sqrt(xp**2 + yp**2)
x_inst = np.arange(1+rad*2)
y_inst = np.arange(1+rad*2)
xv_inst, yv_inst = np.meshgrid(x_inst, y_inst, sparse=False, indexing='xy')
xp_inst = (xv_inst-imax_inst)*np.abs(hdinst['CD1_1']*3600.0)
yp_inst = (yv_inst-jmax_inst)*np.abs(hdinst['CD1_1']*3600.0)
r_inst = np.sqrt(xp_inst**2 + yp_inst**2)
# -
# build the growth curve
radii = np.unique(r)
encircled_flux = np.zeros(radii.shape)
nbpix = np.zeros(radii.shape)
for i, radius in enumerate(radii):
idj, idi = np.where(r <= radius)
nbpix[i] =len(idi)
#multiply by ((np.abs(hd['CDELT1'])*3600.)**2)/4.25E10 as map is in units of MJy/sr
encircled_flux[i] = np.sum(psf[idj, idi])*((np.abs(hd['CDELT1'])*3600.)**2)/4.25E10
# +
radii_inst = np.unique(r_inst)
encircled_flux_inst = np.zeros(radii_inst.shape)
nbpix_inst = np.zeros(radii_inst.shape)
for i, radius in enumerate(radii_inst):
idj, idi = np.where(r_inst <= radius)
nbpix_inst[i] =len(idi)
encircled_flux_inst[i] = np.sum(psf_inst[idj, idi])
# -
np.savez('../data/MIPS_encircled_flux_inst',encircled_flux_inst=encircled_flux_inst ,radii_inst=radii_inst)
# +
plt.plot(radii, encircled_flux)
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
# -
# Looking at the shape of the encircled flux, it looks like the background level of our PSF is not zero. Let's check
# This is clearly.
print(np.median(psf[0:5,:]))
plt.plot(nbpix[50:], encircled_flux[50:])
plt.xlabel('Number of pixels')
plt.ylabel('Encircled flux')
# Lets do a linear fit to the outer part of the curve to determine the backgound
p = np.polyfit(nbpix[5000:], encircled_flux[5000:], 1)
bkg=p[0]/(((np.abs(hd['CDELT1'])*3600.)**2)/4.25E10)
#bkg = p[0]/resol**2
print(bkg)
print(nbpix[5000:])
# Lets correct the psf and encircled flux
psf = psf - bkg
encircled_flux = encircled_flux - p[0]* nbpix
plt.plot(radii, encircled_flux)
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
# Our PSF does now behaves correctly.
#
# Now let us compare our growth curve with the encircled energy curve from the instrumental PSF.
plt.plot(radii_inst, encircled_flux_inst, label='Calibration')
plt.plot(radii, encircled_flux/np.max(encircled_flux), label='Our PSF')
plt.xlim([0, 100])
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
plt.legend()
# We will work below 30" where our PSF is well behaved
plt.plot(radii_inst, encircled_flux_inst, label='Calibration')
plt.plot(radii, encircled_flux/np.max(encircled_flux), label='Our PSF')
plt.xlim([0, 20])
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
plt.legend()
# We see that while the calibration curve still rises beyond 30", our PSF has reached a plateau. Let's note the calibration $C(r)$. Our PSF encirled energy is of the form:
#
# $E(r) = \alpha C(r \times \beta)$
#
# Where $\beta$ is the fattening of the PSF.
#
# We could take the derivative, but this too noisy. Instead we do a brute force approach
# Seb's suggestion.. look at derivative!! Also see how correction parameters change as a function of where I do correction
# compute the derivatives
deriv_growth_psf = (encircled_flux[2:]/np.max(encircled_flux)-encircled_flux[0:-2]/np.max(encircled_flux))/(radii[2:]-radii[0:-2])
deriv_growth_psfcor = (encircled_flux_inst[2:]-encircled_flux_inst[0:-2])/(radii_inst[2:]-radii_inst[0:-2])
plt.plot(radii[1:-1], deriv_growth_psf)
plt.plot(radii_inst[1:-1], deriv_growth_psfcor)
plt.xlim([0,10])
plt.ylim(0,1)
plt.plot(radii_inst, encircled_flux_inst, label='Calibration')
plt.plot(radii, encircled_flux/np.max(encircled_flux), label='Our PSF')
plt.xlim([0, 20])
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
plt.legend()
rfactor = np.arange(1.,3., 1e-3)
ffactor = np.arange(0.5,2., 1e-3)
# work with the data points between 3 and 25"
idx, = np.where((radii > 2) & (radii < 10))
xv = radii[idx]
yv = encircled_flux[idx]/np.max(encircled_flux)
resid = np.zeros((len(rfactor), len(ffactor)))
for i, rf in enumerate(rfactor):
tck = interpolate.splrep(radii_inst*rf,encircled_flux_inst , s=1)#changed s=0 to 1 as I was getting NaNs
yfit = interpolate.splev(xv, tck, der=0)
print(i, rf,np.isnan(yfit).sum())
for j, ff in enumerate(ffactor):
resid[i, j] = np.sum((yv-yfit*ff)**2)
rfactor = np.arange(1.,3., 1e-2)
ffactor = np.arange(1..,3., 1e-2)
# work with the data points between 3 and 25"
for r in np.arange(3,15):
idx, = np.where((radii > 2) & (radii < r))
xv = radii[idx]
yv = encircled_flux[idx]/np.max(encircled_flux)
resid = np.zeros((len(rfactor), len(ffactor)))
for i, rf in enumerate(rfactor):
tck = interpolate.splrep(radii_inst*rf,encircled_flux_inst , s=1)#changed s=0 to 1 as I was getting NaNs
yfit = interpolate.splev(xv, tck, der=0)
for j, ff in enumerate(ffactor):
resid[i, j] = np.sum((yv-yfit*ff)**2)
imin = np.argmin(resid)
rmin, fmin = np.unravel_index(imin, resid.shape)
print("rf = {:.3f}, ff = {:.3f}, residual = {:.3f}".format(rfactor[rmin], ffactor[fmin], resid[rmin, fmin]))
print(np.max((psf/np.max(encircled_flux)/ffactor[fmin])))
# This shows a minimum, with some degeneracy.
plt.imshow(np.log(resid))
imin = np.argmin(resid)
rmin, fmin = np.unravel_index(imin, resid.shape)
print("rf = {:.3f}, ff = {:.3f}, residual = {:.3f}".format(rfactor[rmin], ffactor[fmin], resid[rmin, fmin]))
plt.plot(radii_inst*rfactor[rmin],encircled_flux_inst, label='Calibration')
plt.plot(radii, encircled_flux/np.max(encircled_flux)/ffactor[fmin], label='Our PSF')
plt.xlim([0, 30])
plt.xlabel('Radius [arcsec]')
plt.ylabel('Encircled flux')
plt.legend()
# The two curve overlap
rad=20
psfok = (psf/np.max(encircled_flux)/ffactor[fmin])
cpix=np.int((hd['NAXIS1']+1)/2.0)
np.sum(psfok[cpix-rad-1:cpix+rad,cpix-rad-1:cpix+rad])*((np.abs(hd['CDELT1'])*3600.)**2)/4.25E10
# psfok is the PSF that a source of flux 1 Jy has in our data, and is to be used for source extraction.
# ## As units of map in MJy/sr, divide by 1E6
psfok=psfok/1.0E6
# ## Validation
# To check PSF is reasonable, lets look at a 24 micron source
print("Max PSF = {:.3f} MJy/sr, off pixel Max PSF = {:.3f} MJy/sr".format(psfok[cpix-1,cpix-1]*8.79E-04,psfok[cpix-2,cpix-2]*8.79E-04))
import aplpy
import seaborn as sns
sns.set_style("white")
cmap=sns.cubehelix_palette(8, start=.5, rot=-.75,as_cmap=True)
fig=aplpy.FITSFigure('../../dmu26/data/ELAIS-S1/MIPS/wp4_elais-s1_mips24_map_v1.0.fits.gz')
fig.recenter(8.65151,-43.71714, radius=0.002)
fig.show_colorscale(vmin=0.0,vmax=1.0,cmap=cmap)
fig.add_colorbar()
fig.colorbar.set_location('top')
# In summary, the PSF is within 10% of this source, and given noise and shape of source will add additional uncertianty this seems reasonable.
# # Create PSF fits file
stackhd[1].data=psfok
stackhd.writeto('./data/dmu17_MIPS_ELAIS-S1_20180116.fits',overwrite=True)
| dmu17/dmu17_ELAIS-S1/normalize_MIPS_psf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.svm import SVR
import pandas_datareader.data as web
import pandas as pd
import datetime
import numpy as np
# +
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2013, 1, 27)
# +
tickers = ['AAPL', 'MSFT','NVDA',"INTC"]
f = web.DataReader(tickers, 'yahoo', start, end)
# -
precios = f.loc["Adj Close"]
precios.head()
precios.sort_index(inplace=True)
num =30
dates = pd.date_range('2010-01-04', periods=num,freq='B')
dates
precios.AAPL[dates].values
np.shape(dates),np.shape(precios.AAPL[dates].values)
Npdates = np.reshape(np.array(dates),(len(dates),1 ))
NPprecio = precios.AAPL[dates].values
# +
# svr_lin.fit?
# +
svr_lin = SVR(kernel='linear',C=1e3)
svr_poly = SVR(kernel='poly',C=1e3,degree = 2)
svr_rbf = SVR(kernel='rbf',C=1e3,gamma=0.1)
svr_lin.fit(Npdates,NPprecio)
svr_poly.fit(Npdates,NPprecio)
svr_rbf.fit(Npdates,NPprecio)
# +
plt.scatter(Npdates,NPprecio,color='black',label="Data")
plt.plot(Npdates,svr_rbf.predict(Npdates),color='red',label='RBF model' )
#plt.plot(Npdates,svr_lin.predict(Npdates),color='green',label='Linear model' )
plt.plot(Npdates,svr_poly.predict(Npdates),color='blue',label='Polynomial model' )
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
# -
# +
svr_lin = SVR(kernel='linear',C=1e1)
svr_poly = SVR(kernel='poly',C=1e1,degree = 2)
svr_rbf = SVR(kernel='rbf',C=1e1,gamma=0.1)
# -
t = np.arange(0,50,1)
t.shape=(50,1)
datos = np.sin(t) + np.exp(-t)
datos.shape=(50,)
svr_lin.fit(t,datos)
svr_poly.fit(t,datos)
svr_rbf.fit(t,datos)
# +
plt.scatter(t,datos,color='black',label="Data")
plt.plot(t,svr_lin.predict(t),color='green',label="Lineal model" )
plt.plot(t,svr_poly.predict(t),color='blue',label="Polynomial model" )
plt.plot(t,svr_rbf.predict(t),color='red',label="RBF model" )
# +
num=55
t = np.arange(0,num,1)
t.shape=(num,1)
datos = np.sin(t) + np.exp(-t)
plt.plot(t,datos,color='black',label="Data")
plt.plot(t,svr_lin.predict(t),color='green',label="Lineal model" )
plt.plot(t,svr_poly.predict(t),color='blue',label="Polynomial model" )
plt.plot(t,svr_rbf.predict(t),color='red',label="RBF model" )
| SirajRaval_Ejemplos/Prediction/.ipynb_checkpoints/SVR-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 🛠 Integrating transformers with fastai for multiclass classification
# Before beginning the implementation, note that integrating ``transformers`` within ``fastai`` can be done in multiple different ways. For that reason, I decided to bring simple solutions, that are the most generic and flexible. More precisely, I try to make the minimum of modification in both libraries while making them compatible with the maximum amount of transformer architectures.
#
# Note that in addition to this NoteBook and the [Medium article](https://medium.com/p/fastai-with-transformers-bert-roberta-xlnet-xlm-distilbert-4f41ee18ecb2?source=email-29c8f5cf1dc4--writer.postDistributed&sk=119c3e5d748b2827af3ea863faae6376), I made another version available on my GitHub(TODO add link).
# ## Libraries Installation
# Before starting the implementation, you will need to install the ``fastai`` and ``transformers`` libraries. To do so, just follow the instructions [here](https://github.com/fastai/fastai/blob/master/README.md#installation) and [here](https://github.com/huggingface/transformers#installation).
#
# In Kaggle, the ``fastai`` library is already installed. So you just have to instal ``transformers`` with :
# !pip install transformers==2.5.1
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pathlib import Path
import os
import torch
import torch.optim as optim
import random
# fastai
from fastai import *
from fastai.text import *
from fastai.callbacks import *
# transformers
from transformers import PreTrainedModel, PreTrainedTokenizer, PretrainedConfig
from transformers import BertForSequenceClassification, BertTokenizer, BertConfig
from transformers import RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig
from transformers import XLNetForSequenceClassification, XLNetTokenizer, XLNetConfig
from transformers import XLMForSequenceClassification, XLMTokenizer, XLMConfig
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer, DistilBertConfig
# -
# The current versions of the fastai and transformers libraries are respectively 1.0.58 and 2.5.1.
import fastai
import transformers
print('fastai version :', fastai.__version__)
print('transformers version :', transformers.__version__)
# ## 🎬 The example task
# The chosen task is a multi-class text classification on [Movie Reviews](https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/overview).
#
# For each text movie review, the model has to predict a label for the sentiment. We evaluate the outputs of the model on classification accuracy. The sentiment labels are:
# * 0 → Negative
# * 1 → Somewhat negative
# * 2 → Neutral
# * 3 → Somewhat positive
# * 4 → Positive
#
# The data is loaded into a ``DataFrame`` using ``pandas``.
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
train = pd.read_csv("../input/comve-subtask-a/train_B.csv")
test = pd.read_csv("../input/comve-subtask-a/test_B.csv")
print(train.shape,test.shape)
train.head()
# -
# It is worth noting that in the dataset there are no individual movie reviews but rather phrases taken out of context and split into smaller parts, each with an assigned sentiment label.
# ## Main transformers classes
# In ``transformers``, each model architecture is associated with 3 main types of classes:
# * A **model class** to load/store a particular pre-train model.
# * A **tokenizer class** to pre-process the data and make it compatible with a particular model.
# * A **configuration class** to load/store the configuration of a particular model.
#
# For example, if you want to use the Bert architecture for text classification, you would use [``BertForSequenceClassification``](https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification) for the **model class**, [``BertTokenizer``](https://huggingface.co/transformers/model_doc/bert.html#berttokenizer) for the **tokenizer class** and [``BertConfig``](https://huggingface.co/transformers/model_doc/bert.html#bertconfig) for the **configuration class**.
#
# In order to switch easily between classes - each related to a specific model type - I created a dictionary that allows loading the correct classes by just specifying the correct model type name.
MODEL_CLASSES = {
'bert': (BertForSequenceClassification, BertTokenizer, BertConfig),
'xlnet': (XLNetForSequenceClassification, XLNetTokenizer, XLNetConfig),
'xlm': (XLMForSequenceClassification, XLMTokenizer, XLMConfig),
'roberta': (RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig),
'distilbert': (DistilBertForSequenceClassification, DistilBertTokenizer, DistilBertConfig)
}
# You will see later, that those classes share a common class method ``from_pretrained(pretrained_model_name, ...)``. In our case, the parameter ``pretrained_model_name`` is a string with the shortcut name of a pre-trained model/tokenizer/configuration to load, e.g ``'bert-base-uncased'``. We can find all the shortcut names in the transformers documentation [here](https://huggingface.co/transformers/pretrained_models.html#pretrained-models).
# +
# Parameters
seed = 42
use_fp16 = False
bs = 32
# model_type = 'roberta'
# pretrained_model_name = 'roberta-base'
model_type = 'bert'
pretrained_model_name='bert-base-uncased'
# model_type = 'distilbert'
# pretrained_model_name = 'distilbert-base-uncased'
#model_type = 'xlm'
#pretrained_model_name = 'xlm-clm-enfr-1024'
# model_type = 'xlnet'
# pretrained_model_name = 'xlnet-base-cased'
# -
model_class, tokenizer_class, config_class = MODEL_CLASSES[model_type]
# Print the available values for ``pretrained_model_name`` (shortcut names) corresponding to the ``model_type`` used.
model_class.pretrained_model_archive_map.keys()
# It is worth noting that in this case, we use the ``transformers`` library only for a multi-class text classification task. For that reason, this tutorial integrates only the transformer architectures that have a model for sequence classification implemented. These model types are :
# * BERT (from Google)
# * XLNet (from Google/CMU)
# * XLM (from Facebook)
# * RoBERTa (from Facebook)
# * DistilBERT (from HuggingFace)
#
# However, if you want to go further - by implementing another type of model or NLP task - this tutorial still an excellent starter.
# ## Util function
# Function to set the seed for generating random numbers.
def seed_all(seed_value):
random.seed(seed_value) # Python
np.random.seed(seed_value) # cpu vars
torch.manual_seed(seed_value) # cpu vars
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value) # gpu vars
torch.backends.cudnn.deterministic = True #needed
torch.backends.cudnn.benchmark = False
seed_all(seed)
# ## Data pre-processing
#
# To match pre-training, we have to format the model input sequence in a specific format.
# To do so, you have to first **tokenize** and then **numericalize** the texts correctly.
# The difficulty here is that each pre-trained model, that we will fine-tune, requires exactly the same specific pre-process - **tokenization** & **numericalization** - than the pre-process used during the pre-train part.
# Fortunately, the **tokenizer class** from ``transformers`` provides the correct pre-process tools that correspond to each pre-trained model.
#
# In the ``fastai`` library, data pre-processing is done automatically during the creation of the ``DataBunch``.
# As you will see in the ``DataBunch`` implementation, the **tokenizer** and **numericalizer** are passed in the processor argument under the following format :
#
# ``processor = [TokenizeProcessor(tokenizer=tokenizer,...), NumericalizeProcessor(vocab=vocab,...)]``
#
# Let's first analyse how we can integrate the ``transformers`` **tokenizer** within the ``TokenizeProcessor`` function.
#
# ### Custom Tokenizer
# This part can be a little bit confusing because a lot of classes are wrapped in each other and with similar names.
# To resume, if we look attentively at the ``fastai`` implementation, we notice that :
# 1. The [``TokenizeProcessor`` object](https://docs.fast.ai/text.data.html#TokenizeProcessor) takes as ``tokenizer`` argument a ``Tokenizer`` object.
# 2. The [``Tokenizer`` object](https://docs.fast.ai/text.transform.html#Tokenizer) takes as ``tok_func`` argument a ``BaseTokenizer`` object.
# 3. The [``BaseTokenizer`` object](https://docs.fast.ai/text.transform.html#BaseTokenizer) implement the function ``tokenizer(t:str) → List[str]`` that take a text ``t`` and returns the list of its tokens.
#
# Therefore, we can simply create a new class ``TransformersBaseTokenizer`` that inherits from ``BaseTokenizer`` and overwrite a new ``tokenizer`` function.
#
class TransformersBaseTokenizer(BaseTokenizer):
"""Wrapper around PreTrainedTokenizer to be compatible with fast.ai"""
def __init__(self, pretrained_tokenizer: PreTrainedTokenizer, model_type = 'bert', **kwargs):
self._pretrained_tokenizer = pretrained_tokenizer
self.max_seq_len = pretrained_tokenizer.max_len
self.model_type = model_type
def __call__(self, *args, **kwargs):
return self
def tokenizer(self, t:str) -> List[str]:
"""Limits the maximum sequence length and add the spesial tokens"""
CLS = self._pretrained_tokenizer.cls_token
SEP = self._pretrained_tokenizer.sep_token
if self.model_type in ['roberta']:
tokens = self._pretrained_tokenizer.tokenize(t, add_prefix_space=True)[:self.max_seq_len - 2]
tokens = [CLS] + tokens + [SEP]
else:
tokens = self._pretrained_tokenizer.tokenize(t)[:self.max_seq_len - 2]
if self.model_type in ['xlnet']:
tokens = tokens + [SEP] + [CLS]
else:
tokens = [CLS] + tokens + [SEP]
return tokens
transformer_tokenizer = tokenizer_class.from_pretrained(pretrained_model_name)
transformer_base_tokenizer = TransformersBaseTokenizer(pretrained_tokenizer = transformer_tokenizer, model_type = model_type)
fastai_tokenizer = Tokenizer(tok_func = transformer_base_tokenizer, pre_rules=[], post_rules=[])
# In this implementation, be carefull about 3 things :
# 1. As we are not using RNN, we have to limit the sequence length to the model input size.
# 2. Most of the models require special tokens placed at the beginning and end of the sequences.
# 3. Some models like RoBERTa require a space to start the input string. For those models, the encoding methods should be called with ``add_prefix_space`` set to ``True``.
#
# Below, you can find the resume of each pre-process requirement for the 5 model types used in this tutorial. You can also find this information on the [HuggingFace documentation](https://huggingface.co/transformers/) in each model section.
#
# bert: [CLS] + tokens + [SEP] + padding
#
# roberta: [CLS] + prefix_space + tokens + [SEP] + padding
#
# distilbert: [CLS] + tokens + [SEP] + padding
#
# xlm: [CLS] + tokens + [SEP] + padding
#
# xlnet: padding + tokens + [SEP] + [CLS]
#
# It is worth noting that we don't add padding in this part of the implementation.
# As we will see later, ``fastai`` manage it automatically during the creation of the ``DataBunch``.
# ### Custom Numericalizer
#
# In ``fastai``, [``NumericalizeProcessor`` object](https://docs.fast.ai/text.data.html#NumericalizeProcessor) takes as ``vocab`` argument a [``Vocab`` object](https://docs.fast.ai/text.transform.html#Vocab).
# From this analyse, we suggest two ways to adapt the fastai numericalizer:
# 1. You can, like decribed in the [Dev Sharma's article](https://medium.com/analytics-vidhya/using-roberta-with-fastai-for-nlp-7ed3fed21f6c) (Section *1. Setting Up the Tokenizer*), retreive the list of tokens and create a ``Vocab`` object.
# 2. Create a new class ``TransformersVocab`` that inherits from ``Vocab`` and overwrite ``numericalize`` and ``textify`` functions.
#
# Even if the first solution seems to be simpler, ``Transformers`` does not provide, for all models, a straightforward way to retreive his list of tokens.
# Therefore, I implemented the second solution, which runs for each model type.
# It consists of using the functions ``convert_tokens_to_ids`` and ``convert_ids_to_tokens`` in respectively ``numericalize`` and ``textify``.
class TransformersVocab(Vocab):
def __init__(self, tokenizer: PreTrainedTokenizer):
super(TransformersVocab, self).__init__(itos = [])
self.tokenizer = tokenizer
def numericalize(self, t:Collection[str]) -> List[int]:
"Convert a list of tokens `t` to their ids."
return self.tokenizer.convert_tokens_to_ids(t)
#return self.tokenizer.encode(t)
def textify(self, nums:Collection[int], sep=' ') -> List[str]:
"Convert a list of `nums` to their tokens."
nums = np.array(nums).tolist()
return sep.join(self.tokenizer.convert_ids_to_tokens(nums)) if sep is not None else self.tokenizer.convert_ids_to_tokens(nums)
def __getstate__(self):
return {'itos':self.itos, 'tokenizer':self.tokenizer}
def __setstate__(self, state:dict):
self.itos = state['itos']
self.tokenizer = state['tokenizer']
self.stoi = collections.defaultdict(int,{v:k for k,v in enumerate(self.itos)})
# NB: The functions ``__gestate__`` and ``__setstate__`` allow the functions [export](https://docs.fast.ai/basic_train.html#Learner.export) and [load_learner](https://docs.fast.ai/basic_train.html#load_learner) to work correctly with ``TransformersVocab``.
# ### Custom processor
# Now that we have our custom **tokenizer** and **numericalizer**, we can create the custom **processor**. Notice we are passing the ``include_bos = False`` and ``include_eos = False`` options. This is because ``fastai`` adds its own special tokens by default which interferes with the ``[CLS]`` and ``[SEP]`` tokens added by our custom tokenizer.
# +
transformer_vocab = TransformersVocab(tokenizer = transformer_tokenizer)
numericalize_processor = NumericalizeProcessor(vocab=transformer_vocab)
tokenize_processor = TokenizeProcessor(tokenizer=fastai_tokenizer, include_bos=False, include_eos=False)
transformer_processor = [tokenize_processor, numericalize_processor]
# -
# ## Setting up the Databunch
# For the DataBunch creation, you have to pay attention to set the processor argument to our new custom processor ``transformer_processor`` and manage correctly the padding.
#
# As mentioned in the HuggingFace documentation, BERT, RoBERTa, XLM and DistilBERT are models with absolute position embeddings, so it's usually advised to pad the inputs on the right rather than the left. Regarding XLNET, it is a model with relative position embeddings, therefore, you can either pad the inputs on the right or on the left.
pad_first = bool(model_type in ['xlnet'])
pad_idx = transformer_tokenizer.pad_token_id
tokens = transformer_tokenizer.tokenize('Salut c est moi, Hello it s me')
print(tokens)
ids = transformer_tokenizer.convert_tokens_to_ids(tokens)
print(ids)
transformer_tokenizer.convert_ids_to_tokens(ids)
# There is multible ways to create a DataBunch, in our implementation, we use [the data block API](https://docs.fast.ai/data_block.html#The-data-block-API), which gives more flexibility.
databunch = (TextList.from_df(train, cols='sent', processor=transformer_processor)
.split_by_rand_pct(0.1,seed=seed)
.label_from_df(cols= 'label')
.add_test(test)
.databunch(bs=bs, pad_first=pad_first, pad_idx=pad_idx))
# Check batch and tokenizer :
print('[CLS] token :', transformer_tokenizer.cls_token)
print('[SEP] token :', transformer_tokenizer.sep_token)
print('[PAD] token :', transformer_tokenizer.pad_token)
databunch.show_batch()
# Check batch and numericalizer :
print('[CLS] id :', transformer_tokenizer.cls_token_id)
print('[SEP] id :', transformer_tokenizer.sep_token_id)
print('[PAD] id :', pad_idx)
test_one_batch = databunch.one_batch()[0]
print('Batch shape : ',test_one_batch.shape)
print(test_one_batch)
# ### Custom model
# As mentioned [here](https://github.com/huggingface/transformers#models-always-output-tuples), every model's forward method always outputs a ``tuple`` with various elements depending on the model and the configuration parameters. In our case, we are interested to access only to the logits.
# One way to access them is to create a custom model.
# defining our model architecture
class CustomTransformerModel(nn.Module):
def __init__(self, transformer_model: PreTrainedModel):
super(CustomTransformerModel,self).__init__()
self.transformer = transformer_model
def forward(self, input_ids, attention_mask=None):
# attention_mask
# Mask to avoid performing attention on padding token indices.
# Mask values selected in ``[0, 1]``:
# ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
attention_mask = (input_ids!=pad_idx).type(input_ids.type())
logits = self.transformer(input_ids,
attention_mask = attention_mask)[0]
return logits
# To make our transformers adapted to multiclass classification, before loading the pre-trained model, we need to precise the number of labels. To do so, you can modify the config instance or either modify like in [Keita Kurita's article](https://mlexplained.com/2019/05/13/a-tutorial-to-fine-tuning-bert-with-fast-ai/) (Section: *Initializing the Learner*) the ``num_labels`` argument.
config = config_class.from_pretrained(pretrained_model_name)
config.num_labels = 2
config.use_bfloat16 = use_fp16
print(config)
# +
transformer_model = model_class.from_pretrained(pretrained_model_name, config = config)
# transformer_model = model_class.from_pretrained(pretrained_model_name, num_labels = 5)
custom_transformer_model = CustomTransformerModel(transformer_model = transformer_model)
# -
# ## Learner : Custom Optimizer / Custom Metric
# In ``pytorch-transformers``, HuggingFace had implemented two specific optimizers - BertAdam and OpenAIAdam - that have been replaced by a single AdamW optimizer.
# This optimizer matches Pytorch Adam optimizer Api, therefore, it becomes straightforward to integrate it within ``fastai``.
# It is worth noting that for reproducing BertAdam specific behavior, you have to set ``correct_bias = False``.
#
# +
from fastai.callbacks import *
from transformers import AdamW
from functools import partial
CustomAdamW = partial(AdamW, correct_bias=False)
learner = Learner(databunch,
custom_transformer_model,
opt_func = CustomAdamW,
metrics=[accuracy, error_rate])
# Show graph of learner stats and metrics after each epoch.
learner.callbacks.append(ShowGraph(learner))
# Put learn in FP16 precision mode. --> Seems to not working
if use_fp16: learner = learner.to_fp16()
# -
# ## Discriminative Fine-tuning and Gradual unfreezing (Optional)
# To use **discriminative layer training** and **gradual unfreezing**, ``fastai`` provides one tool that allows to "split" the structure model into groups. An instruction to perform that "split" is described in the fastai documentation [here](https://docs.fast.ai/basic_train.html#Discriminative-layer-training).
#
# Unfortunately, the model architectures are too different to create a unique generic function that can "split" all the model types in a convenient way. Thereby, you will have to implement a custom "split" for each different model architecture.
#
# For example, if we use the RobBERTa model and that we observe his architecture by making ``print(learner.model)``.
print(learner.model)
# We can decide to divide the model in 14 blocks :
# * 1 Embedding
# * 12 transformer
# * 1 classifier
#
# In this case, we can split our model in this way :
# +
# For DistilBERT
# list_layers = [learner.model.transformer.distilbert.embeddings,
# learner.model.transformer.distilbert.transformer.layer[0],
# learner.model.transformer.distilbert.transformer.layer[1],
# learner.model.transformer.distilbert.transformer.layer[2],
# learner.model.transformer.distilbert.transformer.layer[3],
# learner.model.transformer.distilbert.transformer.layer[4],
# learner.model.transformer.distilbert.transformer.layer[5],
# learner.model.transformer.pre_classifier]
# For xlnet-base-cased
# list_layers = [learner.model.transformer.transformer.word_embedding,
# learner.model.transformer.transformer.layer[0],
# learner.model.transformer.transformer.layer[1],
# learner.model.transformer.transformer.layer[2],
# learner.model.transformer.transformer.layer[3],
# learner.model.transformer.transformer.layer[4],
# learner.model.transformer.transformer.layer[5],
# learner.model.transformer.transformer.layer[6],
# learner.model.transformer.transformer.layer[7],
# learner.model.transformer.transformer.layer[8],
# learner.model.transformer.transformer.layer[9],
# learner.model.transformer.transformer.layer[10],
# learner.model.transformer.transformer.layer[11],
# learner.model.transformer.sequence_summary]
# For roberta-base
# list_layers = [learner.model.transformer.roberta.embeddings,
# learner.model.transformer.roberta.encoder.layer[0],
# learner.model.transformer.roberta.encoder.layer[1],
# learner.model.transformer.roberta.encoder.layer[2],
# learner.model.transformer.roberta.encoder.layer[3],
# learner.model.transformer.roberta.encoder.layer[4],
# learner.model.transformer.roberta.encoder.layer[5],
# learner.model.transformer.roberta.encoder.layer[6],
# learner.model.transformer.roberta.encoder.layer[7],
# learner.model.transformer.roberta.encoder.layer[8],
# learner.model.transformer.roberta.encoder.layer[9],
# learner.model.transformer.roberta.encoder.layer[10],
# learner.model.transformer.roberta.encoder.layer[11],
# learner.model.transformer.roberta.pooler]
# -
# Check groups :
learner.split(list_layers)
num_groups = len(learner.layer_groups)
print('Learner split in',num_groups,'groups')
print(learner.layer_groups)
# Note that I didn't found any document that has studied the influence of **Discriminative Fine-tuning** and **Gradual unfreezing** or even **Slanted Triangular Learning Rates** with transformers. Therefore, using these tools does not guarantee better results. If you found any interesting documents, please let us know in the comment.
# ## Train
# Now we can finally use all the fastai build-in features to train our model. Like the ULMFiT method, we will use **Slanted Triangular Learning Rates**, **Discriminate Learning Rate** and **gradually unfreeze the model**.
learner.save('untrain')
seed_all(seed)
learner.load('untrain');
# Therefore, we first freeze all the groups but the classifier with :
learner.freeze_to(-1)
# We check which layer are trainable.
learner.summary()
# For **Slanted Triangular Learning Rates** you have to use the function ``one_cycle``. For more information please check the fastai documentation [here](https://docs.fast.ai/callbacks.one_cycle.html).
#
# To use our ``one_cycle`` we will need an optimum learning rate. We can find this learning rate by using a learning rate finder which can be called by using ``lr_find``.
learner.lr_find()
learner.recorder.plot(skip_end=10,suggestion=True)
# We will pick a value a bit before the minimum, where the loss still improves. Here 2x10^-3 seems to be a good value.
#
# Next we will use ``fit_one_cycle`` with the chosen learning rate as the maximum learning rate.
learner.fit_one_cycle(1,max_lr=2e-03,moms=(0.8,0.7))
learner.save('first_cycle')
seed_all(seed)
learner.load('first_cycle');
# We then unfreeze the second group of layers and repeat the operations.
learner.freeze_to(-2)
lr = 1e-5
# Note here that we use slice to create separate learning rate for each group.
learner.fit_one_cycle(1, max_lr=slice(lr*0.95**num_groups, lr), moms=(0.8, 0.9))
learner.save('second_cycle')
seed_all(seed)
learner.load('second_cycle');
learner.freeze_to(-3)
learner.fit_one_cycle(1, max_lr=slice(lr*0.95**num_groups, lr), moms=(0.8, 0.9))
learner.save('third_cycle')
seed_all(seed)
learner.load('third_cycle');
# Here, we unfreeze all the groups.
learner.unfreeze()
learner.fit_one_cycle(2, max_lr=slice(lr*0.95**num_groups, lr), moms=(0.8, 0.9))
# +
# learner.fit_one_cycle(1, max_lr=slice(lr*0.95**num_groups, lr), moms=(0.8, 0.9))
# -
# # Now, you can predict examples with:
# +
# Learner.validate
# Learner.get_preds
# Learner.predict
# Learner.show_results
# Learner.no_logging
# Learner.loss_not_reduced
# -
learner.validate()
learner.get_preds()
# ## Export Learner
# In order to export and load the learner you can do these operations:
learner.export(file = 'transformer.pkl');
path = '/kaggle/working'
export_learner = load_learner(path, file = 'transformer.pkl')
# As mentioned [here](https://docs.fast.ai/basic_train.html#load_learner), you have to be careful that each custom classes - like ``TransformersVocab`` - are first defined before executing ``load_learner``.
export_learner.predict('This is the worst movie of 2020')
# ## Creating prediction
# Now that the model is trained, we want to generate predictions from the test dataset.
#
# As specified in Keita Kurita's [article](https://mlexplained.com/2019/05/13/a-tutorial-to-fine-tuning-bert-with-fast-ai/), as the function ``get_preds`` does not return elements in order by default, you will have to resort the elements into their correct order.
# +
def get_preds_as_nparray(ds_type) -> np.ndarray:
"""
the get_preds method does not yield the elements in order by default
we borrow the code from the RNNLearner to resort the elements into their correct order
"""
preds = learner.get_preds(ds_type)[0].detach().cpu().numpy()
sampler = [i for i in databunch.dl(ds_type).sampler]
reverse_sampler = np.argsort(sampler)
return preds[reverse_sampler, :]
test_preds = get_preds_as_nparray(DatasetType.Test)
# -
test_preds
sample_submission = pd.read_csv(DATA_ROOT / 'sampleSubmission.csv')
sample_submission['Sentiment'] = np.argmax(test_preds,axis=1)
sample_submission.to_csv("predictions.csv", index=False)
# We check the order.
test.head()
sample_submission.head()
# +
from IPython.display import HTML
def create_download_link(title = "Download CSV file", filename = "data.csv"):
html = '<a href={filename}>{title}</a>'
html = html.format(title=title,filename=filename)
return HTML(html)
# create a link to download the dataframe which was saved with .to_csv method
create_download_link(filename='predictions.csv')
# -
# We can now submit our predictions to Kaggle ! In our example, without playing too much with the parameters, we get a score of 0.70059, which leads us to the 5th position on the leaderboard!
# # Conclusion
#
# In this NoteBook, I explain how to combine the ``transformers`` library with the beloved ``fastai`` library. It aims to make you understand where to look and modify both libraries to make them work together. Likely, it allows you to use **Slanted Triangular Learning Rates**, **Discriminate Learning Rate** and even **Gradual Unfreezing**. As a result, without even tunning the parameters, you can obtain rapidly state-of-the-art results.
#
# This year, the transformers became an essential tool to NLP. Because of that, I think that pre-trained transformers architectures will be integrated soon to future versions of fastai. Meanwhile, this tutorial is a good starter.
#
# I hope you enjoyed this first article and found it useful.
# Thanks for reading and don't hesitate in leaving questions or suggestions.
#
# # References
# * Hugging Face, Transformers GitHub (Nov 2019), [https://github.com/huggingface/transformers](https://github.com/huggingface/transformers)
# * Fast.ai, Fastai documentation (Nov 2019), [https://docs.fast.ai/text.html](https://docs.fast.ai/text.html)
# * <NAME> & <NAME>, Universal Language Model Fine-tuning for Text Classification (May 2018), [https://arxiv.org/abs/1801.06146](https://arxiv.org/abs/1801.06146)
# * <NAME>'s article : [A Tutorial to Fine-Tuning BERT with Fast AI](https://mlexplained.com/2019/05/13/a-tutorial-to-fine-tuning-bert-with-fast-ai/) (May 2019)
# * <NAME>'s article : [Using RoBERTa with Fastai for NLP](https://medium.com/analytics-vidhya/using-roberta-with-fastai-for-nlp-7ed3fed21f6c) (Sep 2019)
| FastAI Task B.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import sys
# -
# # Load params.yaml
# +
from src.utils.config import load_config
config = load_config('../params.yaml')
config.to_dict()
# -
# # Load dataset
# ## Import functions
# +
from src.data.dataset import get_dataset, get_target_names
dataset = get_dataset()
print(dataset.shape)
dataset.head()
# +
# Feature names
feature_names = dataset.columns.tolist()[:4]
feature_names
# +
# Iris species
species = get_target_names()
species
# -
# # Features engineering
# + [markdown] jupyter={"source_hidden": true}
# ## Import functions
# +
from src.features.features import extract_features
dataset = extract_features(dataset)
dataset.head()
# -
# # Split dataset
# +
from sklearn.model_selection import train_test_split
train_dataset, test_dataset = train_test_split(
dataset,
test_size=config.data_split.test_size, #
random_state=config.base.random_state #
)
train_dataset.shape, test_dataset.shape
# -
# # Train
# ## Import functions
# +
from src.train.train import train_lr
model = train_lr(
df = train_dataset,
target_column = config.featurize.target_column
)
# -
model
#
# # Evaluate
#
# ## Import functions
# +
from src.evaluate.evaluate import evaluate
from src.report.visualize import plot_confusion_matrix
report = evaluate(
df = test_dataset,
target_column = config.featurize.target_column,
clf = model
)
f1 = report['f1']
cm = report['cm']
print(f1)
print(cm)
# -
report
plot_confusion_matrix(cm, species, normalize=False)
| notebooks/step-2-move-code-to-python-modules-clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/antonpolishko/task-ties/blob/master/EDA_NER_sections.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Vakzy9bgYF5x" colab_type="code" colab={}
import pandas as pd
import numpy as np
import os
import glob
import re
# + id="NuAQQp57YbQ6" colab_type="code" outputId="2d28d94e-0237-400d-a71d-cb1c8dbdf123" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/drive')
covid_main = '/content/drive/My Drive/COVID-19'
# + id="QRFEPSpnXdcG" colab_type="code" colab={}
section_text_dir = os.path.join(covid_main,'v6_text')
files = glob.glob(os.path.join(covid_main,'v6_text','*.pkl'))
# + id="Gme2L1eJYzDx" colab_type="code" colab={}
# frames = []
# for i in range(len(files)):
# df = pd.read_pickle(files[i], compression="gzip")
# df = df[['paper_id','section','sentence','DISEASE']]
# frames.append(df)
# del df
# section_text = pd.concat(frames)
# section_text.head(10)
# section_text.reset_index(drop=True, inplace=True)
# section_text.to_json(os.path.join(covid_main,'AllSectionText.json'))
# + id="5f96usMhz-Dl" colab_type="code" outputId="f97f1ce6-8dd0-4b80-8024-bab6d42ebef0" colab={"base_uri": "https://localhost:8080/", "height": 359}
frames = []
for i in range(len(files)):
df = pd.read_pickle(files[i], compression="gzip")
df = df[['paper_id','section','sentence']]
df = df[df['section']=='methods']
frames.append(df)
del df
methods_sentences = pd.concat(frames).reset_index(drop=True)
methods_sentences.head(10)
# + id="yfwWnAua1f5A" colab_type="code" colab={}
# methods_sentences.to_json(os.path.join(covid_main,'methods_sentences.json'))
# + id="dgA6p35wJrRl" colab_type="code" colab={}
methods_sentences = pd.read_json(os.path.join(covid_main,'methods_sentences.json'))
# + id="xJpB-o4TXoPU" colab_type="code" outputId="50d91d7a-29c5-4a20-896c-2debae0ea5db" colab={"base_uri": "https://localhost:8080/", "height": 34}
#get number of papers that have a methods section
len(methods_sentences['paper_id'].unique())
# + id="QDMCx3zWB7x7" colab_type="code" outputId="cb1443bb-74ae-4afa-d6ac-7f34213a6b2f" colab={"base_uri": "https://localhost:8080/", "height": 221}
def get_age(text):
match1 = re.search(r'(median age|mean age)(.*?)([0-9]+?\s*?years|[0-9]+?\s*?yrs|[0-9]+?\s*?y|months|weeks|days)', text)
match2 = re.search(r'([0-9]+?\s*?years|[0-9]+?\s*?yrs|[0-9]+?\s*?y|months|weeks|days)(\s)(median age|mean age)', text)
match3 = re.search(r'(median age|mean age)(.*?)([0-9]+?\s*?)', text)
if match1:
return match1.group()
elif match2:
return match2.group()
elif match3:
return match3.group()
else:
pass
def get_sex(text):
match1 = re.search(r'([1-9]+ males)(.*?)([1-9]*? females)|([1-9]+ men)(.*?)([1-9]*? women)', text)
match2 = re.search(r'(\d+(\.\d+)?% males)(.*?)(\d+(\.\d+)?% females)|(\d+(\.\d+)?% men)(.*?)(\d+(\.\d+)?% women)', text)
match3 = re.search(r'(\d+(\.\d+)?%)(\s)(males|females)|(\d+(\.\d+)?%)(\s)(men|women)', text)
if match1:
return match1.group()
elif match2:
return match2.group()
elif match3:
return match3.group()
else:
pass
# + id="DXir_FwJsjKs" colab_type="code" outputId="3fd83f83-9cb1-492c-f3b3-4fa07b264314" colab={"base_uri": "https://localhost:8080/", "height": 255}
methods_sentences['age_exists']=methods_sentences['sentence'].apply(lambda x: x if any(s in x for s in ['mean age', 'median age']) else np.nan)
methods_sentences['age'] = methods_sentences['sentence'].apply(lambda x: get_age(x) if pd.notnull(x) else x)
methods_sentences['sex_exists']=methods_sentences['sentence'].apply(lambda x: x if any(s in x.split() for s in ['males', 'females', 'male', 'female', 'men','women']) else np.nan)
methods_sentences['sex'] = methods_sentences['sentence'].apply(lambda x: get_sex(x) if pd.notnull(x) else x)
methods_sentences.info()
# + id="x1DEru-ddecm" colab_type="code" outputId="31bf9740-3320-46ed-a376-173ac5ef2f71" colab={"base_uri": "https://localhost:8080/", "height": 1000}
methods_sentences[pd.notnull(methods_sentences['age_exists'])]['age_exists'].tolist()
# + id="eZ5xcygoFlOw" colab_type="code" outputId="f21ee50a-5325-495c-ea7b-da9cf29dbfd4" colab={"base_uri": "https://localhost:8080/", "height": 1000}
methods_sentences[pd.notnull(methods_sentences['age'])]['age'].tolist()
# + id="N0SzJ2gdrWGC" colab_type="code" outputId="f6d73348-d0ec-4440-8e8d-2b42486345dc" colab={"base_uri": "https://localhost:8080/", "height": 1000}
methods_sentences[pd.notnull(methods_sentences['sex_exists'])]['sex_exists'].tolist()
# + id="K3phTBv4s42w" colab_type="code" outputId="5a5498c9-9d5c-4562-a250-bb6014b297de" colab={"base_uri": "https://localhost:8080/", "height": 1000}
methods_sentences[pd.notnull(methods_sentences['sex'])]['sex'].tolist()
# + id="aoKQhjQi0wi-" colab_type="code" colab={}
methods_sentences.to_json(os.path.join(covid_main,'methods_sentences.json'))
# + id="X_W27ay91v46" colab_type="code" outputId="72e7f48a-906e-4730-8e94-ca88df6cfe56" colab={"base_uri": "https://localhost:8080/", "height": 359}
frames = []
for i in range(len(files)):
df = pd.read_pickle(files[i], compression="gzip")
df = df[['paper_id','section','sentence']]
df = df[df['section']=='results']
frames.append(df)
del df
results_sentences = pd.concat(frames).reset_index(drop=True)
results_sentences.head(10)
# + id="e5qBBSH217dp" colab_type="code" outputId="49cb6926-32fe-4e2d-cfb0-137113fac7b8" colab={"base_uri": "https://localhost:8080/", "height": 255}
#get sentences that mention age-related keywords
results_sentences['age_exists']=results_sentences['sentence'].apply(lambda x: x if any(s in x for s in ['mean age', 'median age']) else np.nan)
results_sentences['age'] = results_sentences['sentence'].apply(lambda x: get_age(x) if pd.notnull(x) else x)
results_sentences['sex_exists']=results_sentences['sentence'].apply(lambda x: x if any(s in x.split() for s in ['males', 'females', 'male', 'female', 'men','women']) else np.nan)
results_sentences['sex'] = results_sentences['sentence'].apply(lambda x: get_sex(x) if pd.notnull(x) else x)
results_sentences.info()
# + id="8M0ZxDqm4dcE" colab_type="code" outputId="414c79ce-58e7-4b42-831d-e2ac2d4c772a" colab={"base_uri": "https://localhost:8080/", "height": 221}
methods_results_demo = pd.concat([methods_sentences,results_sentences]).reset_index(drop=True)
methods_results_demo = methods_results_demo.drop(['age_exists','sex_exists'], axis = 1)
methods_results_demo = methods_results_demo.dropna(subset=['age','sex'], how='all').reset_index(drop=True)
methods_results_demo.info()
# + id="C0OaJ9Dh8WSW" colab_type="code" outputId="e9bb303a-e3a8-4a7c-cbcc-2d2339611cf4" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(methods_results_demo['paper_id'].unique())
# + id="8pkUGDWX86XJ" colab_type="code" outputId="9de4c724-c421-4430-b1c8-f11f618c6674" colab={"base_uri": "https://localhost:8080/", "height": 204}
methods_results_demo.head()
# + id="a2jGQ6iH8fNX" colab_type="code" outputId="7cf38730-a76e-45f4-fee3-5c278caba5e0" colab={"base_uri": "https://localhost:8080/", "height": 221}
methods_results_demo[['age','sex']] = methods_results_demo[['age','sex']].applymap(lambda x: x if pd.notnull(x) else "")
methods_results_demo.info()
# + id="VKE7SK0L-QCs" colab_type="code" outputId="a24a5a15-cf13-470f-c083-43dee2c7d76d" colab={"base_uri": "https://localhost:8080/", "height": 221}
methods_results_demo_combined = methods_results_demo.groupby(['paper_id']).agg(lambda x: ' '.join(x)).reset_index()
methods_results_demo_combined.info()
# + id="j63uJsTh-hs1" colab_type="code" colab={}
# methods_results_demo_combined.to_json(os.path.join(covid_main,'methods_results_demographics.json'))
methods_results_demo_combined.to_csv(os.path.join(covid_main,'methods_results_demographics.csv'), index=False)
# + id="kJlXrtBs-spk" colab_type="code" outputId="a0ac2236-8650-4da9-dc6c-4573d6f3c2a4" colab={"base_uri": "https://localhost:8080/", "height": 289}
methods_results_demo_combined.head()
# + id="pcXHUtOmuwrK" colab_type="code" colab={}
# section_text = pd.read_json(os.path.join(covid_main,'AllSectionText.json'))
# + id="IYyghQ_7mvbV" colab_type="code" outputId="c607bbbf-f129-4175-b11c-5ca983ab0583" colab={"base_uri": "https://localhost:8080/", "height": 221}
section_text.info()
# + id="jF_bBUzUoN_s" colab_type="code" outputId="c38d8861-4fda-49df-bda7-edf766ab6453" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(section_text['paper_id'].unique())
# + id="yZAxT6aFikqJ" colab_type="code" outputId="a6cb1a4e-6b9b-4fdc-f207-2a862933426a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
section_text['section'].unique().tolist()
# + id="W7T-nN-kht50" colab_type="code" outputId="4f64082d-1b28-4385-f5d3-58d2c17eca3e" colab={"base_uri": "https://localhost:8080/", "height": 34}
'results' in section_text['section'].unique().tolist()
# + id="G3Iie2yIu8vm" colab_type="code" outputId="257821cf-93f0-43ae-d467-f20699f3e2c3" colab={"base_uri": "https://localhost:8080/", "height": 34}
'table 1' in section_text['section'].unique().tolist()
# + id="Vqkesty2sXAo" colab_type="code" outputId="876a1747-1837-4b13-d8bf-3aa3dc72cf7b" colab={"base_uri": "https://localhost:8080/", "height": 255}
titles = section_text[section_text['section']=='title']
titles['DISEASE'] = titles['DISEASE'].apply(lambda x: eval(str(x)))
titles_joined = titles.groupby('paper_id').agg({'DISEASE':sum}).reset_index()
titles_joined['title_most_freq_dz'] = titles_joined['DISEASE'].apply(lambda lst: max(set(lst), key=lst.count) if len(lst)>0 else np.nan)
titles_joined.drop(['DISEASE'], axis = 1, inplace=True)
print(titles_joined.head())
print()
print(len(titles_joined['paper_id'].unique()))
# + id="u5KnoPmLgCov" colab_type="code" outputId="2d8bb3c9-7ffa-4408-adf8-f38457ca6914" colab={"base_uri": "https://localhost:8080/", "height": 255}
abstracts = section_text[section_text['section']=='abstract']
abstracts['DISEASE'] = abstracts['DISEASE'].apply(lambda x: eval(str(x)))
abstracts_joined = abstracts.groupby('paper_id').agg({'DISEASE':sum}).reset_index()
abstracts_joined['abs_most_freq_dz'] = abstracts_joined['DISEASE'].apply(lambda lst: max(set(lst), key=lst.count) if len(lst)>0 else np.nan)
abstracts_joined.drop(['DISEASE'], axis = 1, inplace=True)
print(abstracts_joined.head())
print()
print(len(abstracts_joined['paper_id'].unique()))
# + id="KBhKuxJBbLGL" colab_type="code" outputId="43763033-c7ae-460d-b0d6-36294928b208" colab={"base_uri": "https://localhost:8080/", "height": 153}
methods = section_text[section_text['section']=='methods']
# print(methods.info())
methods_joined = methods.groupby('paper_id').agg({'sentence': lambda x: ' '.join(x)}).reset_index().reset_index(drop=True)
methods_joined.columns = ['paper_id','methods_sentence']
print(methods_joined.head())
print()
print(len(methods_joined['paper_id'].unique()))
# + id="VGCsIG3JfNh4" colab_type="code" outputId="51fee65e-d4be-406c-d8ea-1fcde1dfc5a7" colab={"base_uri": "https://localhost:8080/", "height": 153}
subjects = section_text[section_text['section']=='subjects'].reset_index(drop=True)
subjects_joined = subjects.groupby('paper_id').agg({'sentence': lambda x: ' '.join(x)}).reset_index().reset_index(drop=True)
print(subjects_joined.head())
print()
print(len(subjects_joined['paper_id'].unique()))
# + id="ZewfX0mMviub" colab_type="code" outputId="0e86d413-2e5a-4f15-841e-eddd297b02ef" colab={"base_uri": "https://localhost:8080/", "height": 153}
results = section_text[section_text['section']=='results']
# print(methods.info())
results_joined = results.groupby('paper_id').agg({'sentence': lambda x: ' '.join(x)}).reset_index().reset_index(drop=True)
results_joined.columns = ['paper_id','results_sentence']
print(results_joined.head())
print()
print(len(results_joined['paper_id'].unique()))
# + id="ipF9D7-5vv0q" colab_type="code" outputId="e4c33387-7a5d-4712-95b9-db7e62fd6235" colab={"base_uri": "https://localhost:8080/", "height": 153}
table1 = section_text[section_text['section']=='table 1']
# print(methods.info())
table1_joined = table1.groupby('paper_id').agg({'sentence': lambda x: ' '.join(x)}).reset_index().reset_index(drop=True)
table1_joined.columns = ['paper_id','table1_sentence']
print(table1_joined.head())
print()
print(len(table1_joined['paper_id'].unique()))
# + id="eQpLpZPDqPPd" colab_type="code" outputId="38db5f9f-29fd-4252-f809-4aac71185986" colab={"base_uri": "https://localhost:8080/", "height": 238}
all_section_data = titles_joined.merge(abstracts_joined, on='paper_id', how='outer')\
.merge(methods_joined, on='paper_id', how='outer').reset_index(drop=True)\
.merge(results_joined, on='paper_id', how='outer').reset_index(drop=True)\
.merge(table1_joined, on='paper_id', how='outer').reset_index(drop=True)
all_section_data.info()
# + id="7WFFOmC3sGkF" colab_type="code" outputId="36fa91f2-e365-4cab-cb65-f64883f72c90" colab={"base_uri": "https://localhost:8080/", "height": 444}
all_section_data.head(10)
# + id="pEBGk9QQwjhW" colab_type="code" colab={}
all_section_data.to_json(os.path.join(covid_main,'AllSectionText_Processed.json'))
# + id="z3JmgZ0Rw3LO" colab_type="code" outputId="c941b3f6-8423-411e-c4bd-7d64d7f3852a" colab={"base_uri": "https://localhost:8080/", "height": 572}
all_section_data[pd.notnull(all_section_data['table1_sentence'])]
# + id="z1Z1UnEYtgnV" colab_type="code" colab={}
## define rule to determine the main disease discussed in paper
def assign_disease(row):
if pd.notnull(row['title_most_freq_dz']):
row['disease'] = row['title_most_freq_dz']
else:
row['disease'] = row['abs_most_freq_dz']
all_section_data = all_section_data.applmap(lambda row: assign_disease(row), axis = 1)
# + id="dNTJ-iFqwV2V" colab_type="code" colab={}
## core claim extraction
# -*- coding: utf-8 -*-
# Import the required libraries
import nltk, re, csv, os
from nltk.corpus import stopwords
from collections import Counter
import json
import numpy as np
from difflib import SequenceMatcher
# + id="U0Us7YDXI5zh" colab_type="code" colab={}
def check_similarity(a, b):
return SequenceMatcher(None, a, b).ratio()
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
# Define core and non-core words
core_words = ["highlight", "constitute", "suggest", "indicate", "demonstrate", "show", "reveal", "provide", "illustrate", "describe", "conclude", "support", "establish", "propose", "advocate", "determine", "confirm", "argue", "imply", "display", "offer", "highlights", "constitutes", "suggests", "indicates", "demonstrates", "shows", "reveals", "provides", "illustrates", "describes", "concludes", "supports", "establishes", "proposes", "advocates", "determines", "confirms", "argues", "implies", "displays", "offers", "underlines", "underline", "underlined", "overall", "sum", "therefore", "thus", "together", "conclusion", "collectively", "altogether", "conclude", "conclusively", "consequently", "study", "results", "findings", "research", "report", "data", "paper", "observations", "experiment", "publication", "analysis"]
non_core_words = ["sought", "addition", "well-replicated", "replicated", "sample", "aimed", "aims", "questionnaire", "survey", "based", "interviews", "cross-sectional", "participants", "descriptive", "CI ", "%", "interview", "participant", "cc by 3.0", "previously"]
# Function that seperates all the lines and creates a list of the most frequent words in the provided text
def determine_frequent_words(abstract, path):
# lines = sent_detector.tokenize(abstract)
# text = open(path)
# body = "==== Body"
# refs = "==== Refs"
# started = False
# body_text = ''
# for line in text:
# if refs in line:
# started = False
# if body in line:
# started = True
# if started:
# body_text += line
# body_text = body_text.decode('utf-8')
base_words = nltk.tokenize.casual.casual_tokenize(abstracte.lower()) #body_text
words = [word for word in base_words if word not in stopwords.words()]
counts = Counter(words)
frequent_words = []
for word, count in counts.most_common(10):
frequent_words.append(word)
return frequent_words
# This function assigns every sentence with a score based on some parameters
def assign_sentences_score(sentence, frequent_words):
sentences = {}
for line in lines:
score = 0
words = nltk.tokenize.casual.casual_tokenize(line)
words = [word for word in words if word != '[' or ']']
searchObj = re.search( r'(overall|in sum|therefore|thus|together|in conclusion|concluding|taken together|collectively|altogether|taken collectively|to conclude|conclusively|all together|all things considered|everything considered|as a result|consequently|conclusion|thus|as expressed)*.*(the|these|this|the present|our)*(study|results|findings|research|report|data|observation|experiment|publication|analysis|data set|we)+.*(highlight|constitute|suggest|indicate|demonstrate|show|reveal|provide|illustrate|describe|conclude|support|establish|propose|advocate|determine|confirm|argue|impl|display|offer|underline|allow|found|find)+', line, re.I)
if searchObj != None:
score += 25
for word in words:
if word.lower() in core_words and line.count(word) == 1:
score += 10
if word.lower() in frequent_words and line.count(word) == 1:
score += 5
if word.lower() in non_core_words:
score -= 5
if len(line) >= 400:
score -= 50
sentences[line] = score
return sentences
# In some cases, two sentences end up with the same score, in such a case an extra check is performed to make sure only one sentence is chosen in the end
def perform_extra_check(candidate_sentences, frequent_words):
less_frequent_words = frequent_words[:5]
sentences = {}
for sentence in candidate_sentences:
score = 0
words = nltk.tokenize.casual.casual_tokenize(sentence)
for word in words:
if word in less_frequent_words:
score += 1
sentences[sentence] = score
return sentences
# When all the sentences have an assigned score, this function goes over all the sentences and picks the sentence(s) with the highest score
def go_over_sentences(sentences):
scores = sentences.values()
highest_score = max(scores)
core_sentence_counter = 0
candidate_sentences = []
for sentence, score in sentences.iteritems():
if score == highest_score:
core_sentence_counter += 1
for sentence, score in sentences.iteritems():
if score == highest_score and core_sentence_counter == 1:
core_sentence = sentence
if score == highest_score and core_sentence_counter > 1:
candidate_sentences.append(sentence)
if candidate_sentences:
less_frequent_words = frequent_words[:5]
sentences = perform_extra_check(candidate_sentences, less_frequent_words)
scores = sentences.values()
highest_score = max(scores)
for sentence, score in sentences.iteritems():
if score == highest_score:
core_sentence = sentence
return core_sentence
# Provide the directory here with all the articles that should be processed
directory='C:/Users/..'
csvfile = open('Results/results_abstract.csv', 'wb')
fieldnames = ['File', 'Sentence']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter='|')
writer.writeheader()
for file in os.listdir(directory):
path = directory + file
text = open(path)
front = "==== Front"
body = "==== Body"
graphical_abstract = "Graphical Abstract"
started = False
abstract = ''
for line in text:
if body in line or graphical_abstract in line:
started = False
if started:
abstract += line
if front in line:
started = True
abstract = abstract.decode('utf-8')
lines, frequent_words = determine_lines_and_frequent_words(abstract, path)
sentences = assign_sentences_score(lines, frequent_words)
core_sentence = go_over_sentences(sentences)
core_sentence = core_sentence.replace('\n', ' ').replace(';',',')
core_sentence = core_sentence.encode('utf-8')
writer.writerow({'File': file, 'Sentence': core_sentence})
# DO NOT FORGET TO DECODE EXTRACTED RESULTS BEFORE NEXT STEP!
| task_ties/EDA_NER_sections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
data = pd.read_csv('kidney_disease.csv')
data.head()
# ckd=chronic kidney disease
data.info()
data.classification.unique()
data.classification=data.classification.replace("ckd\t","ckd")
data.classification.unique()
data.drop('id', axis = 1, inplace = True)
data.head()
data['classification'] = data['classification'].replace(['ckd','notckd'], [1,0])
data.head()
data.isnull().sum()
df = data.dropna(axis = 0)
print(f"Before dropping all NaN values: {data.shape}")
print(f"After dropping all NaN values: {df.shape}")
df.head()
df.index = range(0,len(df),1)
df.head()
for i in df['wc']:
print(i)
df['wc']=df['wc'].replace(["\t6200","\t8400"],[6200,8400])
for i in df['wc']:
print(i)
df.info()
df['pcv']=df['pcv'].astype(int)
df['wc']=df['wc'].astype(int)
df['rc']=df['rc'].astype(float)
df.info()
object_dtypes = df.select_dtypes(include = 'object')
object_dtypes.head()
dictonary = {
"rbc": {
"abnormal":1,
"normal": 0,
},
"pc":{
"abnormal":1,
"normal": 0,
},
"pcc":{
"present":1,
"notpresent":0,
},
"ba":{
"notpresent":0,
"present": 1,
},
"htn":{
"yes":1,
"no": 0,
},
"dm":{
"yes":1,
"no":0,
},
"cad":{
"yes":1,
"no": 0,
},
"appet":{
"good":1,
"poor": 0,
},
"pe":{
"yes":1,
"no":0,
},
"ane":{
"yes":1,
"no":0,
}
}
df=df.replace(dictonary)
df.head()
import seaborn as sns
plt.figure(figsize = (20,20))
sns.heatmap(df.corr(), annot = True, fmt=".2f",linewidths=0.5)
df.corr()
X = df.drop(['classification', 'sg', 'appet', 'rc', 'pcv', 'hemo', 'sod'], axis = 1)
y = df['classification']
X.columns
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators = 20)
model.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix, accuracy_score
confusion_matrix(y_test, model.predict(X_test))
print(f"Accuracy is {round(accuracy_score(y_test, model.predict(X_test))*100, 2)}%")
import pickle
pickle.dump(model, open('kidney.pkl', 'wb'))
| Notebooks/Kidney_Disease_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import gscholar
exrefs = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQYWiNWT5yI-Wgl1po1oWf9CIkU_9oCLSP7mKRU5h8Lftrl1FBntcsW19nYXauwSbArApbQ_8WqFqqu/pub?output=csv'
refs = pd.read_csv(exrefs)
res = gscholar.query("title:"+refs['Title'][10])
refs['Title'][10]
# +
import gscholar
gscholar.query("einstein")
| general_code/citation_extraction/scrape_google_scholar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Imports
from collections import Counter
import numpy as np
import numpy.linalg as LA
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
# %matplotlib inline
# +
#Load the data
iris = datasets.load_iris()
#convert to dataframe
df = pd.DataFrame(iris.data, columns = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])
df.insert(4,'Target',iris.target)
# target = [0,1,2] corresponds to Setosa, Versicolour, and Virginica, resp.
#get data for classification
X = df[['Sepal Length','Sepal Width']].values
y = df['Target']
# +
#sklearn KNN
n_neighbors = 15
norm_degree = 2
clf = KNeighborsClassifier(n_neighbors, weights='uniform', p=norm_degree)
clf.fit(X, y)
# -
#kNN from scratch
def knn_predict(X,y,x_new,p,n_neighbors):
n = len(X)
_dist = []
#compute distances
for ii in range(0,n):
_dist.append(LA.norm(x_new - X[ii,:],p))
#find index of smallest distances
_idxs = [_dist.index(x) for x in sorted(_dist)[:n_neighbors]]
#find classes for these indices
_y = y[_idxs]
return max(Counter(_y).values())
# +
#plot results
#step size of mesh
h = 0.02
#colormaps
cmap_light = ListedColormap(['orange', 'cyan', 'cornflowerblue'])
cmap_bold = ['darkorange', 'c', 'darkblue']
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
#contour plot
Z = Z.reshape(xx.shape)
plt.figure(figsize=(8, 6))
plt.contourf(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=iris.target_names[y],
palette=cmap_bold, alpha=1.0, edgecolor="black")
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("sklearn classification (k = %i)"
% (n_neighbors))
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
# +
#predict classes from scratch for plotting
#this takes awhile
Z_scratch = np.zeros(shape=xx.shape)
x_dim,y_dim = Z_scratch.shape
for ii in range(0,x_dim):
for jj in range(0,y_dim):
x_new = np.array([xx[ii,jj],yy[ii,jj]])
Z_scratch[ii,jj] = knn_predict(X,y,x_new,norm_degree,n_neighbors)
# +
#scratch - plot results
plt.figure(figsize=(8, 6))
plt.contourf(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=iris.target_names[y],
palette=cmap_bold, alpha=1.0, edgecolor="black")
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("Scratch classification (k = %i)"
% (n_neighbors))
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
| KNN_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ds_env
# language: python
# name: ds_env
# ---
# <h1>Dataset</h1>
# <p>We begin by augmenting the 10 hours of train data (on average 6 secs each) yielding 6k training samples total. We set the sampling rate for our audio files to industry standard 16kHz (8kHz is minimum for voice). In order to input into our network, we derive important features by taking short fourier transforms in a window, and convert it into speech relevant Mel Frequency Cepstral Coefficients and derive its delta and delta delta, which in research has shown to improve speech recognition tasks. To achieve this, we set transformation parameters corresponding to industry standards for 16kHz sampling rate 23ms for n_fft (~512 frames), window size of 25ms (400 frames), and hop length of 10ms (160 frames). From this, we derive 13 mffc coefficients, and 26 for the delta and delta delta.</p>
#
# <p>Much like in image recognition, it is good practice to augment the data. Here, there are two augmentation functions used: a random duration dilation (+/-25%), and random shift amount (within the maximum allowed sequence length of 1001, 1 + 10 secs * 16kHz / 160 frames per hop). Then we can perform cepstral mean normalization across the channels. Additionally, if there were more time, we can also implement random dropping of frequency information (zero it out), which has also been shown to improve robustness in training.</p>
#
# <p>The most important aspect of training with a custom CTC loss function is to reorganize how inputs are passed into our network, and what inputs are passed. CTC loss requires additional information, like how long the actual labels are (before padding), so it can mask and compare the predictions to them. In addition to the training data, and labels, which must be padded, we have to provide a list of label lengths in the shape of (batch_size, 1), whose values are of each corresponding label's actual length (e.g if labels = [['dog'], ['monkey']] before padding, then label_length = [[3], [6]]. Secondly, we must provide the input_length which is also in the shape of (batch_size, 1), and whose values are the number of timesteps which will be outputted for the ctc loss. This is an important distinction because if there are maxpool layers which reduce the sequence size, the timestep value will change.<p>
# +
import compose_dataset
train_fp = './en/train_clean.csv'
val_fp = './en/val_clean.csv'
test_fp = './en/test_clean.csv'
train_datagen = compose_dataset.Datagen(train_fp)
val_datagen = compose_dataset.Datagen(val_fp)
# Use augmentations for training data
augments = [train_datagen.speed_aug, train_datagen.shift_aug]
train_data = train_datagen.tensor_batch(augments=augments).repeat()
val_data = val_datagen.tensor_batch()
# -
# <h1>Model</h1>
# <p>Model is straight forward, a couple of CNN layers for feature extraction and the typical batchnorm across the channels in addition to dropout regularization layers. We have to be careful how many maxpool layers we add in because the final number of timesteps must exceed 2 * max_label_length + 1. Here our max_label_length is 100, and our starting timesteps is 1001, so we can't exceed more than two maxpool layers (reducing us to 250 timesteps after). We then chain this with stacked rnns before the final softmax activation layer. Finally, this is sent into a custom loss calculation.</p>
#
# <p>With more time, we can customize better loss functions with better scheduled learning rates in addition to a host of other hyperparameter tuning techniques.</p>
# +
import ctc_model
args = {'max_chars': 28,
'num_features': 39,
'lr': 2e-4,
'max_seq_length': 100,
'timesteps': 1001,
'drop_rate': .2}
base_model, model = ctc_model.crnn_model(args)
# -
# <h1>Training</h1>
# <p>Training is perhaps the most challenging aspect because CTC loss can stagnate due to various reasons (least of which that training a language model requires many examples), but even if correctly implemented, tends to predict increasingly more blanks at the start of training, until it only predicts one character at a time. This makes it difficult to diagnose if the problem is related to the data inputs, the ctc cost function itself, or various other minor details that can occur within the network architecture.</p>
#
# <p>In this example, we can see that training proceeds as expected (after lots of debugging). We regularized our network well, so for the most part, we expect a well behaved loss function (not very jittery, and mostly monotonically decreasing). Our training settings are set at 30 steps per epoch with 32 batches per step. Therefore, we will exhaust the entirety of our training samples after 6 steps. Even with augmentation, it's expected that training on this is going to be greatly underfit. We can see both loss and val_loss decreasing nicely for this small sample, and validation loss is even under training loss. Definitely a sign of underfitting. We are on the right track!</p>
# +
from custom_plot import PlotLosses
history = model.fit(train_data,
validation_data=val_data,
epochs=40,
steps_per_epoch=30,
validation_steps=1,
callbacks=[PlotLosses()])
# -
# <h1>Inference</h1>
# <p>Checking some predictions in inference, we can see that the network is starting to predict the first words as "the". Our training set happens to have a few that begin with 't'. Even though early stages of training yielded lots of blanks (-1 labels), the network is progressively expanding its sequence guesses.</p>
# +
import tensorflow as tf
import numpy as np
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
vocab = " '<KEY>"
decoder = dict(enumerate(vocab))
test_datagen = compose_dataset.Datagen(test_fp)
test_data = test_datagen.tensor_batch()
for feats, _ in test_data:
y_pred = base_model.predict(feats)
labels = feats[1]
ctc_decodes = tf.keras.backend.ctc_decode(y_pred, greedy=False, beam_width=3, top_paths=3,
input_length=np.ones(y_pred.shape[0]) * y_pred.shape[1])[0]
for i, example in enumerate(ctc_decodes[0].numpy()):
pred = example[:100]
actual = labels.numpy()[i]
print(''.join([decoder[i] for i in pred if i != -1]))
print(pred)
print(actual)
print('')
| ASR with CRNN and CTC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.trainer.trainer import Trainer
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# create Maze dataset, each row is a maze instance
class MazeDataset(Dataset):
def __init__(self, num_mazes=1000, num_nodes=21, mode='value_only', var_condition='constant', config=None):
self.num_mazes = num_mazes
self.num_nodes = num_nodes
self.mode = mode
self.var_condition='constant'
self.config = config
self.rng = np.random.default_rng(config["ENV_RANDOM_SEED"])
self.variance = {
'constant': (5.0, -5.0, 10.0, -10.0),
'outer': (20.0, 20.0, -40.0),
'inner': (1.0, 1.0, 1.0, 20.0, -20.0),
'other': (1.0, -1.0)
}
assert num_nodes % 4 == 1
len_branch = (num_nodes-1) // 4
self.node_type = np.array(['center'] + ['other'] * (num_nodes-1))
self.node_type[1::len_branch] = ['inner'] * 4
self.node_type[5::len_branch] = ['outer'] * 4
if self.mode == 'value_only':
self.mazes = np.zeros((num_mazes, num_nodes), dtype=np.float32)
if var_condition == 'constant':
self.mazes[:, 1:] = self.rng.choice(self.variance['constant'], size=(num_mazes, num_nodes-1))
elif var_condition == 'increasing':
other_nodes = ((self.node_type == 'inner') | (self.node_type == 'other'))
self.mazes[:, other_nodes] = \
self.rng.choice(self.variance['other'], size=(num_mazes, sum(other_nodes)))
self.mazes[:, self.node_type == 'outer'] = \
self.rng.choice(self.variance['outer'], size=(num_mazes, sum(self.node_type == 'outer')))
elif var_condition == 'decreasing':
other_nodes = ((self.node_type == 'outer') | (self.node_type == 'other'))
self.mazes[:, other_nodes] = \
self.rng.choice(self.variance['other'], size=(num_mazes, sum(other_nodes)))
self.mazes[:, self.node_type == 'inner'] = \
self.rng.choice(self.variance['inner'], size=(num_mazes, sum(self.node_type == 'inner')))
else:
print('variance condition not recognized.')
else:
print('mode not recognized or supported.')
# something not in the original experiment: have some tweaking
self.mazes += self.rng.normal(0, 0.5, size=self.mazes.shape)
def __len__(self):
return self.mazes.shape[0]
def __getitem__(self, index):
return self.mazes[index]
class VAE(pl.LightningModule):
"""VAE model that reconstructs the input using MSELoss."""
def __init__(self, num_hidden=[10], num_gaussian_dim=5, input_len=21, dropout_rate=0.1, reg_lambda=0.001):
assert len(num_hidden) > 0
super().__init__()
self.num_hidden = num_hidden
self.dropout_rate = dropout_rate
self.num_gaussian_dim = num_gaussian_dim
self.reg_lambda=reg_lambda
encoder_layers = [input_len] + num_hidden + [2*num_gaussian_dim]
enc = []
for i in range(1, len(encoder_layers)):
enc.append(nn.Linear(encoder_layers[i-1], encoder_layers[i]))
enc.append(nn.ReLU())
enc.append(nn.Dropout(p=self.dropout_rate))
self.encoder = nn.Sequential(*enc[:-2])
decoder_layers = [num_gaussian_dim] + num_hidden[::-1] + [input_len]
dec = []
for i in range(1, len(decoder_layers)):
dec.append(nn.Linear(decoder_layers[i-1], decoder_layers[i]))
dec.append(nn.ReLU())
self.decoder = nn.Sequential(*dec[:-1])
self.mu, self.log_var = 0, 0
def forward(self, x, verbose=False):
self.mu, self.log_var = torch.split(self.encoder(x), self.num_gaussian_dim, dim=1)
std = torch.exp(self.log_var / 2)
q = torch.distributions.Normal(self.mu, std)
z = q.sample().squeeze(1)
kl = torch.mean(-0.5 * torch.sum(1 + self.log_var - self.mu ** 2 - self.log_var.exp(), dim=1), dim=0)
if verbose:
print('mu: ', self.mu)
print('std: ', std)
print('z: ', z)
return self.decoder(z), kl
def step(self, batch):
input_reconstructed, kl = self.forward(batch)
mse = F.mse_loss(input_reconstructed, batch)
loss = mse + (self.reg_lambda*kl if kl > 0.001 else 0)
return loss, mse, input_reconstructed, kl
def training_step(self, batch, batch_idx):
loss, mse, _, _ = self.step(batch)
self.log('train_loss', loss, on_step=False, on_epoch=True, logger=True)
self.log('train_rmse', torch.sqrt(mse), on_step=False, on_epoch=True, logger=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
loss, mse, input_reconstructed, kl = self.step(batch)
self.log('val_loss', loss, on_step=False, on_epoch=True, logger=True)
self.log('val_rmse', torch.sqrt(mse), on_step=False, on_epoch=True, logger=True, prog_bar=True)
self.log('val_kl', kl, on_step=False, on_epoch=True, logger=True, prog_bar=True)
def test_step(self, batch, batch_idx):
loss, mse, _, _ = self.step(batch)
self.log('test_loss', loss, on_step=False, on_epoch=True, logger=True)
self.log('test_rmse', torch.sqrt(mse), on_step=False, on_epoch=True, logger=True, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.002)
return optimizer
# +
config = {'ENV_RANDOM_SEED': 2}
dataset = MazeDataset(num_mazes=1000, var_condition='increasing', config=config)
train_set, val_set = random_split(dataset, [900, 100], generator=torch.Generator().manual_seed(2))
train_loader = DataLoader(train_set, num_workers=4, batch_size=50)
val_loader = DataLoader(val_set, num_workers=4, batch_size=50)
model = VAE(num_hidden=[1024], num_gaussian_dim=10, dropout_rate=0.5, reg_lambda=1e-2)
tb_logger = pl_loggers.TensorBoardLogger('logs/')
trainer = Trainer(tb_logger, max_epochs=50)
trainer.fit(model, train_loader, val_loader)
# -
model.forward(torch.tensor(dataset[0:2]), verbose=True)
| maze_reconstruction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] jupyter={"outputs_hidden": true}
# # Shapes input data analysis
# -
# ### What do we know about the dataset
# * Dataset contains 10000 small `28px * 28px * 1bit` images.
# * There are 6 classes of objects - square, circle and triangle which can be pointed up, down, left or right.
# * Every image consists of two classes of geometric shapes.
# * There are always ten shapes on every image.
# * All of the shapes are xor'ed.
# +
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from datasets.shapes_dataset import ShapesClassificationDataset
from datasets.transformers import RandomVerticalFlip, RandomHorizontalFlip, RandomRightRotation
# -
df = pd.read_csv('data/labels.csv')
df
df.describe()
# +
def plot_distribution_of_results(df):
fig, axes = plt.subplots(2, 3, figsize=(20,10))
axe = axes.ravel()
for i, c in enumerate(df.drop(columns=['name']).columns):
df.groupby([c]).size().plot(kind='pie', autopct='%.2f', ax=axe[i], title=c, fontsize=10)
axe[i].legend(loc=3)
plt.ylabel("")
plt.xlabel("")
plt.show()
plot_distribution_of_results(df)
# -
# Looking at the plots we can assume that there's nothing wrong with the data, but to be sure let's look at the distribution after splitting the dataset.
# ### Training data
plot_distribution_of_results(df[:9000])
# ### Validation data
plot_distribution_of_results(df[9000:10000])
# ### Let's try our custom dataset for classification
dataset_raw = ShapesClassificationDataset(
"data/labels.csv",
"data/images",
)
def draw_example(dataset):
fig = plt.figure(figsize=(15, 15))
for i in range(len(dataset)):
image, target = dataset[i]
print(f"Target ({i}): ", target)
ax = plt.subplot(1, 6, i + 1)
plt.tight_layout()
ax.set_title(f'Sample #{i}')
ax.axis('off')
plt.imshow(image)
if i == 5:
plt.show()
break
draw_example(dataset_raw)
# ### Let's see the rotation
dataset_rotation = ShapesClassificationDataset(
"data/labels.csv",
"data/images",
transform_all=transforms.Compose([RandomRightRotation(1)])
)
draw_example(dataset_rotation)
# ### Horizontal flip
dataset_horizontal = ShapesClassificationDataset(
"data/labels.csv",
"data/images",
transform_all=transforms.Compose([RandomHorizontalFlip(1)])
)
draw_example(dataset_horizontal)
# ### And the vertical flip
dataset_vertical = ShapesClassificationDataset(
"data/labels.csv",
"data/images",
transform_all=transforms.Compose([RandomVerticalFlip(1)])
)
draw_example(dataset_vertical)
# Everything looks good, with these 3 operations we can achieve all possible arragments
| data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# -
# load in the dataset into a pandas dataframe
diamonds = pd.read_csv('./data/diamonds.csv')
# +
# convert cut, color, and clarity into ordered categorical types
ordinal_var_dict = {'cut': ['Fair','Good','Very Good','Premium','Ideal'],
'color': ['J', 'I', 'H', 'G', 'F', 'E', 'D'],
'clarity': ['I1', 'SI2', 'SI1', 'VS2', 'VS1', 'VVS2', 'VVS1', 'IF']}
for var in ordinal_var_dict:
pd_ver = pd.__version__.split(".")
if (int(pd_ver[0]) > 0) or (int(pd_ver[1]) >= 21): # v0.21 or later
ordered_var = pd.api.types.CategoricalDtype(ordered = True,
categories = ordinal_var_dict[var])
diamonds[var] = diamonds[var].astype(ordered_var)
else: # pre-v0.21
diamonds[var] = diamonds[var].astype('category', ordered = True,
categories = ordinal_var_dict[var])
# -
# ## Multivariate Exploration
#
# In the previous workspace, you looked at various bivariate relationships. You saw that the log of price was approximately linearly related to the cube root of carat weight, as analogy to its length, width, and depth. You also saw that there was an unintuitive relationship between price and the categorical quality measures of cut, color, and clarity, that the median price decreased with increasing quality. Investigating the distributions more clearly and looking at the relationship between carat weight with the three categorical variables showed that this was due to carat size tending to be smaller for the diamonds with higher categorical grades.
#
# The goal of this workspace will be to depict these interaction effects through the use of multivariate plots.
#
# To start off with, create a plot of the relationship between price, carat, and clarity. In the previous workspace, you saw that clarity had the clearest interactions with price and carat. How clearly does this show up in a multivariate visualization?
# +
def cube_trans(x, inverse=False):
if not inverse:
return np.cbrt(x)
else:
return x**3
diamonds['carat_cube'] = diamonds['carat'].apply(cube_trans)
# -
# multivariate plot of price by carat weight, and clarity
g = sb.FacetGrid(data=diamonds, hue='clarity', height=5)
g = g.map(sb.regplot, 'carat_cube', 'price', fit_reg=False)
plt.yscale('log')
y_ticks = [300, 800, 2000, 4000, 10000, 20000]
plt.yticks(y_ticks, y_ticks)
g.add_legend();
# Price by Carat and Clarity Comment 1: <span style="color:black">With two numeric variables and one categorical variable, there are two main plot types that make sense. A scatterplot with points colored by clarity level makes sense on paper, but the sheer number of points causes overplotting that suggests a different plot type. A faceted scatterplot or heat map is a better choice in this case.</span>
g = sb.FacetGrid(data=diamonds, col='clarity')
g.map(plt.scatter, 'carat_cube', 'price', alpha=1/5)
plt.yscale('log');
# Price by Carat and Clarity Comment 2: <span style="color:black">You should see across facets the general movement of the points upwards and to the left, corresponding with smaller diamond sizes, but higher value for their sizes. As a final comment, did you remember to apply transformation functions to the price and carat values?</span>
# Let's try a different plot, for diamond price against cut and color quality features. To avoid the trap of higher quality grades being associated with smaller diamonds, and thus lower prices, we should focus our visualization on only a small range of diamond weights. For this plot, select diamonds in a small range around 1 carat weight. Try to make it so that your plot shows the effect of each of these categorical variables on the price of diamonds.
diamonds_flag = (diamonds['carat'] >= 0.99) & (diamonds['carat'] <= 1.03)
diamonds_sub = diamonds.loc[diamonds_flag,:]
diamonds_sub['cut'].unique()
diamonds_sub['color'].unique()
sb.pointplot(data=diamonds_sub, x='color', y='price', hue='cut', palette='mako')
plt.yscale('log')
plt.yticks([3000, 4000, 6000], ['3K', '4K', '6K']);
# multivariate plot of price by cut and color, for approx. 1 carat diamonds
plt.figure(figsize=(10,6))
sb.boxplot(data=diamonds_sub, x='color', y='price', hue='cut', palette='mako')
plt.yscale('log')
plt.yticks([3000, 4000, 6000, 10000], ['3K', '4K', '6K', '10K']);
# Price by Cut and Color Comment 1: <span style="color:black">There's a lot of ways that you could plot one numeric variable against two categorical variables. I think that the clustered box plot or the clustered point plot are the best choices in this case. With the number of category combinations to be plotted (7x5 = 35), it's hard to make full sense of a violin plot's narrow areas; simplicity is better. A clustered bar chart could work, but considering that price should be on a log scale, there isn't really a nice baseline that would work well.</span>
# Price by Cut and Color Comment 2: <span style="color:black">Assuming you went with a clustered plot approach, you should see a gradual increase in price across the main x-value clusters, as well as generally upwards trends within each cluster for the third variable. Aesthetically, did you remember to choose a sequential color scheme for whichever variable you chose for your third variable, to override the default qualitative scheme? If you chose a point plot, did you set a dodge parameter to spread the clusters out? </span>
| diamonds_multivariate_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Creazione di Particelle e Risonanze
# In questo notebook saranno calcolati i valori di massa invariante per alcune coppie di muoni rilevati in CMS. Sarà fatto un istogramma con i valori di massa invariante calcolati. Infine l'istogramma verrà fittato con una __Breit-Wigner__. Con il fit della Breit-Wigner sarà possibile determinare la massa e il tempo di vita del __bosone Z__.
# ## Creare l'istogramma delle masse invarianti
# Facciamo un istogramma dai valori di massa invariante calcolati. L'istogramma descrive come i valori sono distribuiti, cioè quanti valori ci sono stati in ogni bin dell'istogramma.
# #### Creare l'istogramma
# Gli istogrammi possono essere creati in Python con il modulo _matplotlib.pyplot_ che è stato importato prima e chiamato _plt_. Con la funzione `plt.hist()` è possibile creare un istogramma dando diversi parametri all'interno delle parentesi. Questi parametri possono essere esaminati da https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html.
#
# Ora sono necessari solo i primi tre parametri: una variabile dai cui valori viene creato l'istogramma (_x)_, il numero di bins (_bins_) e l'intervallo inferiore e superiore dei bins (_range_).
#
# Scrivete un codice che crei un istogramma a partire dai valori di massa invariante che sono stati calcolati. Poiché questo esercizio si concentra sul __bosone Z__, impostate saggiamente l'intervallo per ottenere i valori vicini alla massa del __bosone Z__.
#
# Prova qual è il numero di bins ottimale per fare un istogramma chiaro. Puoi provare diversi valori e vedere come influiscono sull'istogramma.
#
# Nel codice ci sono già delle linee per nominare gli assi e il titolo dell'istogramma.
#
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#importiamo i dati dei processi con due muoni
ds = pd.read_csv('DoubleMuRun2011A.csv')
#otteniamo la massa invariante degli stessi
invariant_mass = ds.M
# Write down there a code that will create the histogram.
lowerlimit = 70
upperlimit = 120
bins = 100
# Selezioniamo i valori di massa invariante che sono all'interno dei limiti.
limitedmasses = invariant_mass[(invariant_mass > lowerlimit) & (invariant_mass < upperlimit)]
#Creiamo un istogramma dei valori selezionati.
istogramma = plt.hist(limitedmasses, bins=bins)
# Diamo un nome agli assi e al titolo
plt.xlabel('Massa invariante [GeV]')
plt.ylabel('Numero di eventi')
plt.title('Istogramma dei valori di massa invariante di due muoni. \n')
plt.show()
# -
# ### Domanda 1
# Descrivi l'istogramma. Quali informazioni se ne possono ottenere?
# ## Fit dell'istogramma
# Per ottenere informazioni sulla massa e sul tempo di vita della risonanza rilevata, una funzione che descrive la distribuzione delle masse invarianti deve essere adattata ai valori dell'istogramma. Nel nostro caso i valori seguono una distribuzione di Breit-Wigner:
#
# $$
# N(E) = \frac{K}{(E-M)^2 + \frac{\Gamma^2}{4}},
# $$
#
# dove $E$ è l'energia, $M$ il massimo della distribuzione (uguale alla massa della particella che viene rilevata nella risonanza), $Gamma$ l'ampiezza completa a metà massimo (FWHM) o la larghezza di decadimento della distribuzione e $K$ una costante.
#
# L'ampiezza del decadimento $\Gamma$ e il tempo di vita $\tau$ della particella rilevata nella risonanza sono correlati nel modo seguente:
#
# $$
# \Gamma \equiv \frac{hbar}{\tau},
# $$
#
# dove $\hbar$ è la costante di Planck ridotta.
#
# Con il codice seguente è possibile ottimizzare una funzione che rappresenta la distribuzione di Breit-Wigner ai valori dell'istogramma. La funzione è già scritta nel codice. Il vostro compito è ora quello di capire quali potrebbero essere approssimativamente i valori del massimo della distribuzione $M$ e la larghezza completa a metà della distribuzione $Gamma$. L'istogramma che è stato creato prima vi aiuterà in questo compito.
#
# Scrivete queste ipotesi iniziali nel codice nella linea `initials = [#IL VALORE INIZIALE PER GAMMA, #IL VALORE INIZIALE PER M, -2, 200, 13000]`. In altre parole sostituite i due commenti in quella linea con i valori che avete ricavato.
#
# Notate che le ipotesi iniziali per i parametri _a, b_ e _A_ sono già state date. Altri commenti nel codice possono essere lasciati intatti. Da essi è possibile ottenere informazioni su ciò che sta accadendo nel codice.
#
# Dopo aver eseguito il codice Jupyter stamperà i valori dei diversi parametri come risultato dell'ottimizzazione. Anche le incertezze dei valori e un grafico della funzione adattata sono stampati. Le incertezze saranno ricevute dalla matrice di covarianza che la funzione di adattamento `curve_fit` restituirà.
# +
# %matplotlib inline
import numpy as np
# Limitiamo il fit vicino al picco dell'istogramma.
lowerlimit = 80
upperlimit = 100
bins = 100
# Selezioniamo i valori di massa invariante che sono all'interno dei limiti.
limitedmasses = invariant_mass[(invariant_mass > lowerlimit) & (invariant_mass < upperlimit)]
#Creiamo un istogramma per i valori selezionati.
istogramma = plt.hist(limitedmasses, bins=bins, range=(lowerlimit,upperlimit))
# Nell'asse y il numero degli eventi per ogni bin (può essere ottenuto dalla variabile istogramma).
# Nell'asse delle x i valori centrali dei bin.
y = istogramma[0]
x = 0.5*( istogramma[1][0:-1] + istogramma[1][1:] )
# Definiamo una funzione che descriva la distribuzione di Breit-Wigner per il fit.
# E è l'energia, gamma è la larghezza del decadimento, M il massimo della distribuzione
# e a, b e A diversi parametri che sono usati per notare l'effetto
# degli eventi di sfondo per l'adattamento.
def breitwigner(E, gamma, M, a, b, A):
return a*E+b+A*( (2*np.sqrt(2)*M*gamma*np.sqrt(M**2*(M**2+gamma**2)))/(np.pi*np.sqrt(M**2+np.sqrt(M**2*(M**2+gamma**2)))) )/((E**2-M**2)**2+M**2*gamma**2)
# Valori iniziali per l'ottimizzazione nel seguente ordine:
# gamma (la larghezza completa a metà altezza (FWHM) della distribuzione)
# M (il massimo della distribuzione)
# a (la pendenza che viene utilizzata per notare l'effetto dello sfondo)
# b (l'intercetta y che si usa per notare l'effetto dello sfondo)
# A (l'"altezza" della distribuzione di Breit-Wigner)
iniziali = [100, 80, -2, 200, 13000]
# Importiamo il modulo che viene utilizzato nell'ottimizzazione, eseguiamo l'ottimizzazione
# e calcoliamo le incertezze dei parametri ottimizzati.
from scipy.optimize import curve_fit
params, covariance = curve_fit(breitwigner, x, y, p0=iniziali)
errore = np.sqrt(np.diag(covariance))
gamma = params[0]
M = params[1]
a = params[2]
b = params[3]
A = params[4]
print("\n\u03C4 = ", 1./gamma)
print("\nM = ", M)
print("\na = ", a)
print("\nb = ", b)
print("\nA = ", A)
plt.plot(x, breitwigner(x, gamma, M, a, b, A))
plt.xlabel('Massa invariante [GeV]')
plt.ylabel('Numero di eventi')
plt.title('Fit di Breit-Wigner')
plt.show()
# -
# #### Ocho 1:
# Se la funzione adattata non segue bene l'istogramma, torna indietro e controlla i valori di partenza.
# #### Ocho 2:
# Nel fit viene preso in considerazione il cosiddetto background della distribuzione di massa. Il background consiste fondamentalmente coppie di muoni che provengono da altri processi di decadimento che dal decadimento del bosone Z. Il background è preso in considerazione nel codice nella linea che segue il comando `def breitwigner`. Il fit è adattato al background con il termine `a*E+b`, corrispondente ad un background lineare.
# ## Analisi dell'istogramma
# ### Domanda 2
# Cosa puoi dire sull'aspetto del bosone Z sulla base dell'istogramma e della funzione adattata?
#
# Puoi definire la massa dello Z con l'incertezza? Come?
#
# Eventualmente, spiega le tue risposte con l'aiuto di fonti esterne.
# ### Domanda 3
# Calcola il tempo di vita $\tau$ del bosone Z usando il fit.
#
# Confronta il valore calcolato con il tempo di vita noto del bosone Z. Cosa noti? Cosa potrebbe spiegare le tue osservazioni?
#
# **************************************************
#
#
#
# # E QUI LE NOSTRE STRADE SI DIVIDONO... AD MAIORA!
# <img src="https://i.imgur.com/Be6cpLh.gif"
# alt="Coding">
| Exercises-with-open-data/Risonanze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !rm -r ./data/
# !mkdir data
# +
import time
import numpy as np
import pandas as pd
# -
# Create an example dataframe
# ===
days = 365*10
n_securities = 3000
df_wide = pd.DataFrame(data=np.random.rand(days, n_securities), index=pd.date_range('2000', periods=days))
df_wide.columns = ['security_{}'.format(i) for i in range(1, n_securities+1)]
df_wide.head()
# Using HDF5 (fixed) to read/write data
# ===
# %time df_wide.to_hdf('data/fixed_wide.hdf', key='wide1')
# %time rb_wide = pd.read_hdf('data/fixed_wide.hdf', key='wide1')
# !du -h data/fixed_wide.hdf
# +
#wide, most compression
# -
# %time df_wide.to_hdf('data/fixed_wide_cmp.hdf', key='wide', complevel=9, complib='blosc')
# %time rb_wide = pd.read_hdf('data/fixed_wide_cmp.hdf', key='wide')
# !du -h data/fixed_wide_cmp.hdf
# reshape wide to tall
# %time df_tall = df_wide.stack().reset_index().rename(columns={'level_0': 'date', 'level_1': 'security_id', 0: 'vals'})
# +
# tall, no compression
# -
# %time df_tall.to_hdf('data/fixed_tall.hdf', key='tall')
# %time rb_tall = pd.read_hdf('data/fixed_tall.hdf', key='tall')
# !du -h data/fixed_tall.hdf
# +
# tall, most compression
# -
# %time df_tall.to_hdf('data/fixed_tall_cmp.hdf', key='tall', complevel=9, complib='blosc')
# %time rb_tall = pd.read_hdf('data/fixed_tall_cmp.hdf', key='tall')
# !du -h data/fixed_tall_cmp.hdf
# Using HDF5 (tables) to read/write data
# ===
# %time df_wide.to_hdf('data/tables_wide.hdf', key='wide', format='table')
# %time rb_wide = pd.read_hdf('data/tables_wide.hdf', key='wide')
# !du -h data/tables_wide.hdf
# %time df_tall.to_hdf('data/tables_tall.hdf', key='tall', format='table')
# %time rb_tall = pd.read_hdf('data/tables_tall.hdf', key='tall')
# !du -h data/tables_tall.hdf
| ten_years_of_daily_data_for_3k_instruments-hdf5.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.0
# language: julia
# name: julia-0.6
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ### Numerical Integration
#
# **ScPo Computational Economics 2018**
# + [markdown] slideshow={"slide_type": "slide"}
# ## Numerical Approximation of Integrals
#
# * We will focus on methods that represent integrals as weighted sums.
# * The typical representation will look like:
# $$ E[G(\epsilon)] = \int_{\mathbb{R}^N} G(\epsilon) w(\epsilon) d\epsilon \approx \sum_{j=1}^J \omega_j G(\epsilon_j) $$
# + [markdown] slideshow={"slide_type": "slide"}
# $$ E[G(\epsilon)] = \int_{\mathbb{R}^N} G(\epsilon) w(\epsilon) d\epsilon \approx \sum_{j=1}^J \omega_j G(\epsilon_j) $$
#
# * $N$ is the dimensionality of the integration problem.
# * $G:\mathbb{R}^N \mapsto \mathbb{R}$ is the function we want to integrate wrt $\epsilon \in \mathbb{R}^N$.
# * $w$ is a density function s.t. $\int_{\mathbb{R}^n} w(\epsilon) d\epsilon = 1$.
# * $\omega$ are weights such that (most of the time) $\sum_{j=1}^J \omega_j = 1$.
# * We will look at normal shocks $\epsilon \sim N(0_N,I_N)$
# * in that case, $w(\epsilon) = (2\pi)^{-N/2} \exp \left(-\frac{1}{2}\epsilon^T \epsilon \right)$
# * $I_N$ is the n by n identity matrix, i.e. there is no correlation among the shocks for now.
# * Other random processes will require different weighting functions, but the principle is identical.
# * For now, let's say that $N=1$
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Quadrature Rules
#
# * We focus exclusively on those and leave Simpson and Newton Cowtes formulas out.
# * This is because Quadrature is the method that in many situations gives highes accuracy with lowest computational cost.
# * Quadrature provides a rule to compute weights $w_j$ and nodes $\epsilon_j$.
# * There are many different quadrature rules.
# * They differ in their domain and weighting function.
# * [https://en.wikipedia.org/wiki/Gaussian_quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature)
# * In general, we can convert our function domain to a rule-specific domain with change of variables.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Gauss-Hermite: Expectation of a Normally Distributed Variable
#
# * There are many different rules, all specific to a certain random process.
# * Gauss-Hermite is designed for an integral of the form
# $$ \int_{-\infty}^{+\infty} e^{-x^2} G(x) dx $$
# and where we would approximate
# $$ \int_{-\infty}^{+\infty} e^{-x^2} f(x) dx \approx \sum_{i=1}^n \omega_i G(x_i) $$
# * Now, let's say we want to approximate the expected value of function $f$ when it's argument $z\sim N(\mu,\sigma^2)$:
# $$ E[f(z)] = \int_{-\infty}^{+\infty} \frac{1}{\sigma \sqrt{2\pi}} \exp \left( -\frac{(z-\mu)^2}{2\sigma^2} \right) f(z) dz $$
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
#
# ## Gauss-Hermite: Expectation of a Normally Distributed Variable
#
# * The rule is defined for $x$ however. We need to transform $z$:
# $$ x = \frac{(z-\mu)^2}{2\sigma^2} \Rightarrow z = \sqrt{2} \sigma x + \mu $$
# * This gives us now (just plug in for $z$)
# $$ E[f(z)] = \int_{-\infty}^{+\infty} \frac{1}{ \sqrt{\pi}} \exp \left( -x^2 \right) f(\sqrt{2} \sigma x + \mu) dx $$
# * And thus, our approximation to this, using weights $\omega_i$ and nodes $x_i$ is
# $$ E[f(z)] \approx \sum_{j=1}^J \frac{1}{\sqrt{\pi}} \omega_j f(\sqrt{2} \sigma x_j + \mu)$$
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Using Quadrature in Julia
#
# * [https://github.com/ajt60gaibb/FastGaussQuadrature.jl](https://github.com/ajt60gaibb/FastGaussQuadrature.jl)
# + attributes={"classes": ["julia"], "id": ""} slideshow={"slide_type": "subslide"}
#Pkg.add("FastGaussQuadrature")
using FastGaussQuadrature
np = 3
rules = Dict("hermite" => gausshermite(np),
"chebyshev" => gausschebyshev(np),
"legendre" => gausslegendre(np),
"lobatto" => gausslobatto(np))
using DataFrames
integ = DataFrame(Rule=Symbol[Symbol(x) for x in keys(rules)],nodes=[x[1] for x in values(rules)],weights=[x[2] for x in values(rules)])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Quadrature in more dimensions: Product Rule
#
# * If we have $N>1$, we can use the product rule: this just takes the *kronecker product* of all univariate rules.
# * The what?
#
# -
A = [1 2;3 4]
B = [1;10]
kron(A,B)
kron(B,A)
# + [markdown] slideshow={"slide_type": "subslide"}
# * This works well as long as $N$ is not too large. The number of required function evaluations grows exponentially.
# $$ E[G(\epsilon)] = \int_{\mathbb{R}^N} G(\epsilon) w(\epsilon) d\epsilon \approx \sum_{j_1=1}^{J_1} \cdots \sum_{j_N=1}^{J_N} \omega_{j_1}^1 \cdots \omega_{j_N}^N G(\epsilon_{j_1}^1,\dots,\epsilon_{j_N}^N) $$
# where $\omega_{j_1}^1$ stands for weight index $j_1$ in dimension 1, same for $\epsilon$.
# * Total number of nodes: $J=J_1 J_2 \cdots J_N$, and $J_i$ can differ from $J_k$.
#
#
#
# ### Example for $N=3$
#
# * Suppose we have $\epsilon^i \sim N(0,1),i=1,2,3$ as three uncorrelated random variables.
# * Let's take $J=3$ points in all dimensions, so that in total we have $J^N=27$ points.
# * We have the nodes and weights from before in `rules["hermite"]`.
# -
rules["hermite"][1]
repeat(rules["hermite"][1],inner=[1],outer=[9])
# + attributes={"classes": ["julia"], "id": ""} slideshow={"slide_type": "subslide"}
nodes = Any[]
push!(nodes,repeat(rules["hermite"][1],inner=[1],outer=[9])) # dim1
push!(nodes,repeat(rules["hermite"][1],inner=[3],outer=[3])) # dim2
push!(nodes,repeat(rules["hermite"][1],inner=[9],outer=[1])) # dim3
weights = kron(rules["hermite"][2],kron(rules["hermite"][2],rules["hermite"][2]))
df = hcat(DataFrame(weights=weights),DataFrame(nodes,[:dim1,:dim2,:dim3]))
# + [markdown] slideshow={"slide_type": "subslide"}
# * Imagine you had a function $g$ defined on those 3 dims: in order to approximate the integral, you would have to evaluate $g$ at all combinations of `dimx`, multiply with the corresponding weight, and sum.
#
#
# ### Alternatives to the Product Rule
#
# * Monomial Rules: They grow only linearly.
# * Please refer to [juddbook] <cite data-cite=juddbook></cite> for more details.
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Monte Carlo Integration
#
# * A widely used method is to just draw $N$ points randomly from the space of the shock $\epsilon$, and to assign equal weights $\omega_j=\frac{1}{N}$ to all of them.
# * The expectation is then
# $$ E[G(\epsilon)] \approx \frac{1}{N} \sum_{j=1}^N G(\epsilon_j) $$
# * This in general a very inefficient method.
# * Particularly in more than 1 dimensions, the number of points needed for good accuracy is very large.
# * Monte Carlo has a rate of convergence of $\mathcal{O}(n^{-0.5})$
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Quasi Monte Carlo Integration
#
# * Uses non-product techniques to construct a grid of uniformly spaced points.
# * The researcher controlls the number of points.
# * We need to construct equidistributed points.
# * Typically one uses a low-discrepancy sequence of points, e.g. the Weyl sequence:
# * $x_n = {n v}$ where $v$ is an irrational number and `{}` stands for the fractional part of a number. for $v=\sqrt{2}$,
# $$ x_1 = \{1 \sqrt{2}\} = \{1.4142\} = 0.4142, x_2 = \{2 \sqrt{2}\} = \{2.8242\} = 0.8242,... $$
# * Other low-discrepancy sequences are Niederreiter, Haber, Baker or Sobol.
# * Quasi Monte Carlo has a rate of convergence of close to $\mathcal{O}(n^{-1})$
# * [The wikipedia entry is good](https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method).
# + attributes={"classes": ["julia"], "id": ""} slideshow={"slide_type": "subslide"}
# Pkg.add("Sobol")
using Sobol
using Plots
s = SobolSeq(2)
p = hcat([next(s) for i = 1:1024]...)'
scatter(p[:,1], p[:,2], m=(:red,:dot,1.0),legend=false)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Correlated Shocks
#
# * We often face situations where the shocks are in fact correlated.
# * One very typical case is an AR1 process:
# $$ z_{t+1} = \rho z_t + \varepsilon_t, \varepsilon \sim N(0,\sigma^2) $$
# * The general case is again:
# $$ E[G(\epsilon)] = \int_{\mathbb{R}^N} G(\epsilon) w(\epsilon) d\epsilon \approx \sum_{j_1=1}^{J_1} \cdots \sum_{j_N=1}^{J_N} \omega_{j_1}^1 \cdots \omega_{j_N}^N G(\epsilon_{j_1}^1,\dots,\epsilon_{j_N}^N) $$
# + [markdown] slideshow={"slide_type": "subslide"}
# * Now $\epsilon \sim N(\mu,\Sigma)$ where $\Sigma$ is an N by N variance-covariance matrix.
# * The multivariate density is
# $$w(\epsilon) = (2\pi)^{-N/2} det(\Sigma)^{-1/2} \exp \left(-\frac{1}{2}(\epsilon - \mu)^T (\epsilon - \mu) \right)$$
# * We need to perform a change of variables before we can integrate this.
# * Given $\Sigma$ is symmetric and positive semi-definite, it has a Cholesky decomposition,
# $$ \Sigma = \Omega \Omega^T $$
# where $\Omega$ is a lower-triangular with strictly positive entries.
# * The linear change of variables is then
# $$ v = \Omega^{-1} (\epsilon - \mu) $$
# + [markdown] slideshow={"slide_type": "subslide"}
#
#
# * Plugging this in gives
# $$ \sum_{j=1}^J \omega_j G(\Omega v_j + \mu) \equiv \sum_{j=1}^J \omega_j G(\epsilon_j) $$
# where $v\sim N(0,I_N)$.
# * So, we can follow the exact same steps as with the uncorrelated shocks, but need to adapt the nodes.
#
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
#
#
#
# ## References
#
# * The Integration part of these slides are based on [@maliar-maliar] <cite data-cite=maliarmaliar></cite> chapter 5
| Notebooks/integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Constraining Cosmological Parameters by Using Type Ia Supernova Data
# ## 1. Data Preparation
# In this notebook, we will use type Ia supernova data from https://sne.space/.
# +
# import necessary modules
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import integrate, optimize
# %matplotlib inline
# set basic figure parameters
import matplotlib as mpl
mpl_param = {'figure.figsize' : [8.0, 6.0],
'savefig.dpi' : 100,
'axes.titlesize' : 'xx-large',
'axes.labelsize' : 'xx-large',
'text.usetex' : False,
'font.family' : 'serif'}
mpl.rcParams.update(mpl_param)
# =============================================================================
# Data cleaning
# =============================================================================
# read json data
data = pd.read_json('supernova.json')
# define list of strings to be removed
bad_string = '(host|spectroscopic|heliocentric|cmb|photometric|cluster|,)'
# define list of columns to be removed
bad_column = ['z', 'dL (Mpc)', 'mmax', 'Mmax']
# replace z and dL value with valid float numbers
# nan means Not A Number
for i in bad_column:
data[i] = data[i].str.replace(bad_string, '')
data[i].loc[data[i] == ''] = np.nan
data[i] = data[i].astype(float)
# sort data by redshift value
data = data.sort_values(by=['z'])
# redshift cut until z~2
data = data.loc[data['z'] <= 2]
data.head()
# -
# plot redshift vs distance modulus
plt.plot(data['z'], data['dL (Mpc)'], 'b.')
plt.xlabel('redshift $(z)$')
plt.ylabel('Luminosity Distance (MPC)')
# ## 2. Luminosity Distance
# Luminosity distance ($d_L$) for various cosmological models are usually formulated as
#
# \begin{equation}
# d_L(z) = \frac{c (1+z_2)}{H_0}
# \times \int_{z_1}^{z_2}{
# \frac{dz'}{\sqrt{
# \Omega_{\rm m}(1+z')^3 + \Omega_{\rm r}(1+z')^4 + \Omega_{\rm de}(1+z')^{3(1+w_{\rm de})
# }}}}
# \end{equation}
#
# Here, we consider a spatially flat universe ($\Omega_{\rm m} + \Omega_\Lambda = 1$, $w_{\rm de} = -1$). Therefore, that we can write it as following codes.
# +
# define some basic constants
c = 3 * 1e5 # km/s
H0 = 67.7 #km / (Mpc s)
Omega_m = 0.307
Omega_r = 0 * 1e-5 # too small
Omega_lambda = 1 - Omega_m
# make luminosity distance function
def lum_dist (z, Omega_m, Omega_lambda, H0):
Omega_r = 0 * 1e-5 # too small
# integration part
# integration is calculated from redshift=0 to redshift=z
fn = lambda z: (Omega_r*(1+z)**4. \
+ Omega_m*(1+z)**3 \
+ Omega_lambda\
)**-0.5
# return array values
return c*(1+z)/H0 * np.asarray([integrate.quad(fn, 0, _z)[0] for _z in z])
# -
# For a quick insight, we can do the least-square fitting to obtain cosmological parameters.
# +
# remove NaN values
data_good = data[['z', 'dL (Mpc)']].dropna()
data_good = data_good.sample(n=500)
# guess initial parameters
# Omega_m, Omega_lambda, H0, respectively
initial_param = np.array([0.3, 0.7, 70])
# least-square fitting
opt_param, cov = optimize.curve_fit(lum_dist,
data_good['z'].values,
data_good['dL (Mpc)'].values,
p0=initial_param)
err_param = np.sqrt(np.diag(cov))
# +
# =============================================================================
# Plot the result
# =============================================================================
plt.figure()
plt.plot(data['z'], data['dL (Mpc)'], 'b.', label='Data')
plt.plot(data['z'], lum_dist(data['z'], *opt_param),
'g-', label='Fitted')
plt.ylabel('Distance (Mpc)')
plt.xlabel('Redshift')
plt.legend()
plt.show()
print ('======================================')
print ('Least-Square Fitting Final parameter:')
print ('Omega_m = %.2f (%f)' %(opt_param[0], err_param[0]))
print ('Omega_lambda = %.2f (%f)' %(opt_param[1], err_param[1]))
print ('H0 = %.2f (%f)' %(opt_param[2], err_param[2]))
print ('======================================')
# -
# ## 3. Maximum Likelihood Fitting
# We will use maximization of the likelihood function to constrain and compare the models. First, we calculate $d_{L}^{\rm obs}$ and $d_{L}^{\rm th}$. Then, the join likelihood function for all parameters, based on a flat Bayesian prior, is
#
# \begin{equation}
# \mathcal{L} = \prod_{i} \exp{\bigg[-\frac{\chi^2}{2} \bigg]}
# \end{equation}
#
# where for each measurement
#
# \begin{equation}
# \chi^2_i = \frac{(d_{L_i}^{\rm obs} - d_{L_i}^{\rm th})^2}{\sigma^2_{d_{L_i}}}
# \end{equation}
#
# In python, those equations can be written as below.
# +
# =============================================================================
# Maximum likelihood fitting
# =============================================================================
# define likelihood function as in Equation 11 in Leaf et al. (2018)
def lnlike(theta, X, y, yerr):
Omega_m, Omega_lambda, H0 = theta
model = lum_dist(X, Omega_m, Omega_lambda, H0)
# chi-square
chi2 = ((y-model)**2)/yerr**2
return np.sum( np.exp(-chi2/2) )
X = data_good['z'].values
y = data_good['dL (Mpc)'].values
yerr = 0.05 # assuming there is uniform error about 5%
# +
from scipy import optimize
# optimize module minimizes functions whereas we would like to maximize the likelihood
# that's why I put the minus(-) sign
nll = lambda *args: -lnlike(*args)
#result = optimize.minimize(nll, initial_param, args=(X, y, yerr))
#m_ml, b_ml, h0_ml, wde_ml = result["x"]
print ('======================================')
#print ('Maximum Likelihood Result')
#print ('Omega_m = %.2f (%.2f)' %(m_ml, 0))
#print ('Omega_lambda = %.2f (%.2f)' %(b_ml, 0))
#print ('H0 = %.2f (%.2f)' %(h0_ml, 0))
#print ('wde = %.2f (%.2f)' %(wde_ml, 0))
print ('======================================\n')
# -
# ## 4. Markov Chain Monte Carlo Fitting
# In order to improve our fitting accuracy, we will use emcee, which is pure-Python implementation of Goodman & Weare's Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler. These pages (http://emcee.readthedocs.io/en/stable/) will show you how to use it. We would like to marginalize over some "nuisance parameters" and find an estimate of the posterior probability function (the distribution of parameters that is consistent with dataset) for others.
#
# We will define uniform (so-called "uninformative") priors on initial parameters ($\Omega_{\rm m}$, $\Omega_{\Lambda}$, and $H_0$). Then, combining this with the definition of lnlike from above, the full log-probability function can be calculated.
# +
# =============================================================================
# MCMC fitting
# see http://dfm.io/emcee/current/user/line/ for the detail
# =============================================================================
# define prior
def lnprior(theta):
Omega_m, Omega_lambda, H0 = theta
if 0.95 <= Omega_m + Omega_lambda <= 1.05 \
and 0 < Omega_m < 1.5 \
and 0 < Omega_lambda < 1.5 \
and 60 < H0 < 80:
return 0
return -np.inf
# define the full probability
def lnprob(theta, X, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, X, y, yerr)
# +
ndim, nwalkers = 3, 1000
pos = [initial_param + 1e-2*np.random.randn(ndim) for i in range(nwalkers)]
import emcee
import sys
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(X, y, yerr), threads=3)
nsteps = 500
width = 30
print ('running MCMC.....')
for i, result in enumerate(sampler.sample(pos, iterations=nsteps)):
n = int((width+1) * float(i) / nsteps)
sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
sys.stdout.write("\n")
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
# +
import corner
fig = corner.corner(samples, labels=["$\Omega_m$", "$\Omega_\Lambda$", "$H_0$"],
truths=[Omega_m, Omega_lambda, H0])
plt.savefig('result/supernova.png')
plt.show()
# +
m_mcmc, b_mcmc, h0_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print ('============================================================================')
print ('MCMC Result')
print ('Omega_m = ', m_mcmc)
print ('Omega_lambda = ', b_mcmc)
print ('H0 = ', h0_mcmc)
print ('============================================================================')
output_data = pd.DataFrame({'omega_m': samples[:, 0],
'omega_l': samples[:, 1],
'h0' : samples[:, 2]})
output_data.to_csv('result/output_supernova.csv', index=False)
# -
| coursework/Cosmological_Parameters/Cosmo_Supernova.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
#from glmnet import glmnet; from glmnetPlot import glmnetPlot
#from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
#from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
#from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
import glob
import csv
import numpy as np
from sksurv.nonparametric import kaplan_meier_estimator
from sksurv.linear_model import CoxPHSurvivalAnalysis
from sksurv.preprocessing import OneHotEncoder
# %matplotlib inline
import matplotlib.pyplot as plt
from random import shuffle
files = (glob.glob("../../ovarian_cancer_results/collagen_final/features/*"))
shuffle(files)
print(len(files))
with open('../notebooks/results_good/main_filenames.txt') as f:
lines = f.readlines()
files = []
for line in lines:
files.append(line[:len(line)-1])
print(files)
# +
#print(files)
#textfile = open("filenames.txt", "w")
#for file in files:
# textfile.write(file + "\n")
#textfile.close()
# -
collagen_features = []
for file in files:
flag = -1
file_features = []
with open(file, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
if flag == -1:
array = row
for index in range(0, len(array)):
file_features.append(float(array[index]))
collagen_features.append(file_features)
print(len(collagen_features))
# +
f = []
flag = -1
with open("../../til_biomarkers_ovarian_cancer/data/features.csv", newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
if flag == -1:
flag = 1
else:
array = row
ff = []
for index in range(1, len(array)):
ff.append(float(array[index]))
f.append(ff)
til_features = []
for file in files:
flag = -1
count = 0
target_file = file.split("/")[-1]
with open("../../til_biomarkers_ovarian_cancer/data/filenames.csv", newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
if flag == -1:
flag = 1
else:
array = row
file = array[1]
file = file.split("/")[-1]
if target_file == file:
til_features.append(f[count])
count += 1
print(len(til_features))
# -
features = []
for index in range(0, 90):
#features.append(til_features[index]+collagen_features[index])
features.append(collagen_features[index])
print(len(features))
print(len(features[0]))
print(len(features[0]))
# +
train_features = []
val_features = []
count = 0
for index in range(30, 90):
count += 1
current_features = []
current_features.append(count)
for index1 in range(0, len(features[index])):
current_features.append(features[index][index1])
train_features.append(current_features)
'''
for index in range(60, 90):
count += 1
current_features = []
current_features.append(count)
for index1 in range(0, len(features[index])):
current_features.append(features[index][index1])
train_features.append(current_features)
'''
"""
for index in range(0, 27, 2):
count += 1
current_features = []
current_features.append(count)
for index1 in range(0, len(features[index])):
current_features.append(features[index][index1])
train_features.append(current_features)
"""
count = 0
for index in range(0, 30):
count += 1
current_features = []
current_features.append(count)
for index1 in range(0, len(features[index])):
current_features.append(features[index][index1])
val_features.append(current_features)
# -
print(len(train_features))
print(len(val_features))
print(len(train_features[0]))
print(len(val_features[0]))
rows = []
rows.append("")
for index in range(1, 73):
rows.append("F_" + str(index))
print(len(rows))
final_train_features = []
final_train_features.append(rows)
for index in range(0, len(train_features)):
final_train_features.append(train_features[index])
print(len(final_train_features))
final_val_features = []
final_val_features.append(rows)
for index in range(0, len(val_features)):
final_val_features.append(val_features[index])
print(len(final_val_features))
# write csv file
with open("train_features.csv", 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
for index in range(0, len(final_train_features)):
spamwriter.writerow(final_train_features[index])
# write csv file
with open("val_features.csv", 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
for index in range(0, len(final_val_features)):
spamwriter.writerow(final_val_features[index])
flag = -1
new_filenames = []
survival_info = []
age_info = []
with open("../../Ov_TCGA_data.csv", newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
file_survival_info = []
if flag == -1:
flag = 1
else:
array = row
new_filenames.append(array[1])
if array[7] == "--":
array[7] = 20000
if array[10] == "--":
if array[len(array)-1] == "--":
array[len(array)-1] = 50
file_survival_info.append(float(array[len(array)-1]))
file_survival_info.append(0)
age_info.append(float(array[7]))
else:
file_survival_info.append(float(array[10]))
file_survival_info.append(1)
age_info.append(float(array[7]))
survival_info.append(file_survival_info)
import pandas as pd
df = pd.read_csv("../../clinical.tsv", sep='\t')
df.head()
df = df[['figo_stage', 'case_submitter_id']]
df['Status'] = df['case_submitter_id'].isin(f)
df = df[df['Status'] == True]
df
for index, row in df.iterrows():
print(row['case_submitter_id'] + " " + row["figo_stage"])
y = []
for file in files:
file = file.split("/")[-1][:-4]
count = 0
flag = -1
for filename in new_filenames:
if file == filename:
y.append(survival_info[count])
flag = 1
count += 1
if flag == -1:
print(file)
print(len(y))
# +
train_labels = []
val_labels = []
count = 0
val_files = []
for index in range(30, 90):
count += 1
current_labels = []
current_labels.append(count)
for index1 in range(0, len(y[index])):
current_labels.append(y[index][index1])
train_labels.append(current_labels)
'''
for index in range(60, 90):
count += 1
current_labels = []
current_labels.append(count)
for index1 in range(0, len(y[index])):
current_labels.append(y[index][index1])
train_labels.append(current_labels)
'''
"""
for index in range(0, 27, 2):
count += 1
current_labels = []
current_labels.append(count)
for index1 in range(0, len(y[index])):
current_labels.append(y[index][index1])
train_labels.append(current_labels)
"""
count = 0
for index in range(0, 30):
val_files.append(files[index])
count += 1
current_labels = []
current_labels.append(count)
for index1 in range(0, len(y[index])):
current_labels.append(y[index][index1])
val_labels.append(current_labels)
print(len(train_labels))
print(len(val_labels))
# -
rows = []
rows.append("")
rows.append("Survival (in days)")
rows.append("Status")
final_train_labels = []
final_train_labels.append(rows)
for index in range(0, len(train_labels)):
final_train_labels.append(train_labels[index])
print(len(final_train_labels))
final_val_labels = []
final_val_labels.append(rows)
for index in range(0, len(val_labels)):
final_val_labels.append(val_labels[index])
print(len(final_val_labels))
# write csv file
with open("train_labels.csv", 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
for index in range(0, len(final_train_labels)):
spamwriter.writerow(final_train_labels[index])
# write csv file
with open("val_labels.csv", 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
for index in range(0, len(final_val_labels)):
spamwriter.writerow(final_val_labels[index])
| notebooks/.ipynb_checkpoints/create_train_val-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import anndata
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
from matplotlib.patches import Patch
from itertools import product
# +
def extract_pathway_cols(df, pathway):
mask = df.columns.str.startswith(pathway + '-')
return df.loc[:, mask]
def tab20(arg):
cmap = plt.get_cmap('tab20')
return rgb2hex(cmap(arg))
cmap = {
'CD4 T': tab20(0),
'CD8 T': tab20(1),
'CD14 Mono': tab20(2),
'CD16 Mono': tab20(3),
'B': tab20(4),
'DC': tab20(6),
'NK': tab20(8),
'T': tab20(10)
}
# +
recons = anndata.read('../data/released/kang_recons.h5ad')
pathways = [
'INTERFERON_ALPHA_BETA_SIGNALIN',
'CYTOKINE_SIGNALING_IN_IMMUNE_S',
'TCR_SIGNALING',
'CELL_CYCLE']
# +
fig, axes = plt.subplots(
2, len(pathways),
figsize=(6*len(pathways), 4*2))
pairs = product(['stimulated', 'control'], pathways)
for ax, (active, key) in zip(axes.ravel(), pairs):
mask = recons.obs['condition'] == active
codes = extract_pathway_cols(recons.obsm['pathway_tsnes'], key)
# plot non-active condition
ax.scatter(
*codes.loc[~mask].T.values,
s=1, c='lightgrey', alpha=0.1
)
# plot active condition
ax.scatter(
*codes.loc[mask].T.values,
c=list(map(cmap.get, recons.obs.loc[mask, 'cell_type'])),
s=1,
alpha=0.5,
)
ax.set_title(f'{key} {active}')
fig.legend(
handles=[Patch(color=c, label=l) for l,c in cmap.items()],
ncol=4,
loc=('upper center'),
bbox_to_anchor=(0.5, 0),
fontsize='xx-large'
)
plt.tight_layout()
plt.show()
| notebooks/scatter_pathway_tsnes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 누락값이란?
from numpy import NaN, nan, NAN
NaN == 0
NaN == False
NaN == ''
NaN == NaN
# 비교할 값이 없는 것이니 상기의 사례에서 모두 False 출력
# ## 누락값 확인하는 방법: pd.isnull()
# +
import pandas as pd
pd.isnull(NaN)
# -
pd.notnull(NaN)
pd.notnull(42)
# ## 누락값이 생기는 이유
# ### 누락값이 있는 데이터 집합을 연결 -> 더 많은 누락값들이 생김
visited = pd.read_csv('DoitPandas_Resource/data/survey_visited.csv')
survey = pd.read_csv('DoitPandas_Resource/data/survey_survey.csv')
visited
survey
visited.merge(survey, left_on = 'ident', right_on = 'taken')
# ### 데이터 입력을 잘못하는 경우
nan_included_series = pd.Series({'name': 'Yuna', 'sex': 'Female', 'job': 'Dancer', 'else': nan})
nan_included_series
nan_included_df = pd.DataFrame({
'name': ['Yuna', 'Yujeong', 'Minyoung', 'Eunji'],
'nickname': ['Danbaljwa', 'GGOBUGJWA', 'Mebojwa','Wangnunjwa'],
'position': ['Subrapper', 'Center', 'Leadvocal', nan]
})
nan_included_df
# ### 범위를 지정하여 데이터를 추출할 때 누락값이 생기는 경우
gapminder = pd.read_csv('DoitPandas_Resource/data/gapminder.tsv', sep = '\t')
gapminder
life_exp = gapminder.groupby(['year',])['lifeExp'].mean()
life_exp
# + tags=[]
life_exp.loc[range(2000, 2010),]
# -
# 이런 방식으로는 더 이상 누락값 출력할 수 없음.
# 번외로 불린 추출을 통해 누락값 없이 원하는 행들을 추출할 수 있음
life_exp[life_exp.index>=2000]
# ## 누락값의 개수를 알아보는 방법
ebola = pd.read_csv('DoitPandas_Resource/data/country_timeseries.csv')
# ### count()매서드로 누락값이 아닌 값의 개수를 알아보기
ebola.count()
# ### 전체 행의 개수에서 누락값이 아닌 행의 개수를 빼는 방법
ebola.shape[0] - ebola.count()
# ### np.count_nonzero, isnull 매서드를 조합하는 방법
import numpy as np
np.count_nonzero(ebola.isnull())
# #### 특정 열에서 위의 방법으로 누락값 개수 구하기
np.count_nonzero(ebola['Cases_Guinea'].isnull())
# ### series.value_counts() 매서드
ebola.Cases_Guinea.value_counts(dropna = False)
# 위에서 보듯이 꼭 '데이터프레임[열이름]'으로 써야 하는 것은 아니고 '데이터 프레임.열이름'의 형태로 쓰이기도 한다!
# value_counts()매서드의 dropna 인자값으로 False를 주는 것은 기본 인자값이 True인 경우 NaN의 빈도에 해당하는 값은 drop 해버리기 때문이다. 한 마디로 NaN의 빈도를 버리지 말고 표시해 주라는 의미인 것
# ## 누락값 처리하기
# ### 변경
# #### .fillna(0)
ebola.fillna(0).iloc[0:5, 0:5]
# 누락값을 모두 0으로 바꿔준다
# #### .fillna(method = 'ffill')
ebola.fillna(method = 'ffill').iloc[0:10, 0:5]
# 누락값이 발생하기 바로 이전 행에서의 같은 항목값으로 바꿔준다.
# #### .fillna(method = 'bfill')
ebola.fillna(method = 'bfill').iloc[0:10, 0:5]
# 마지막 값이 누락값인 경우 처리하지 못한다는 단점이 있음
# #### .interpolate()
ebola.interpolate().iloc[0:10, 0:5]
# 누락값의 앞 뒤 값 확인 후 그 두 값의 중간값으로 채워줌.
# ### 삭제하기
ebola.shape[0]
ebola.dropna()
ebola.dropna().shape[0]
# 굉장히 많은 값이 삭제됨. 자료 아까움. Last Resort!!!
# ## 누락값이 포함된 데이터 계산하기
ebola['Three_Cases_Sum'] = (ebola.Cases_Guinea + ebola.Cases_Liberia + ebola.Cases_SierraLeone)
ebola.loc[:, ['Cases_Guinea', 'Cases_Liberia', 'Cases_SierraLeone', 'Three_Cases_Sum']]
# 이렇게 그냥 합쳐서 새로운 열로 추가하면 한 열이라도 NaN일 경우 합은 무조건 NaN으로 남음.
# ### 'sum'열 값의 총 합 구하기
ebola.Three_Cases_Sum.sum(skipna = True)
ebola.Three_Cases_Sum.sum(skipna = False)
# 누락값을 스킵하도록 해야 sum의 결과값이 nan이 나오지 않음. 굳이 설정 안해도 skipna인자값은 True로 이미 설정되어 있긴 함
| DIP(6)_NaN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id='topcell'></a>
# <h1> An Introduction to Python for Physics Students </h1><hr>
# This notebook is for students in the physical sciences who are seeking to learn about scripting for scientific purposes. The language of choice will be Python as it is the easiest language to work with and requires the least amount of knowledge of computer science fundamentals in order to use effectively.
#
# Questions can be sent to the below contacts: <br>
# <EMAIL> (may be inactive past Summer 2023, if that is the case, refer to the github link which has an updated address).<br>
# https://github.com/JoeyTurn
# <br>
#
# Some parts of this notebook will not be all that applicable to physics-based projects, I'll mark these by having a <b>NP</b> (non-physics) tag in front of them. They'll still be included in the case they are necessary to a project or to those who are more curious about the more programming-heavy aspects.
# <a id="specialized"></a>
# <h2>Specialized Python Fundamentals</h2>
# The name of this chapter is somewhat extraneous; this chapter mostly is a more detailed look at topics covered in Essentials. This chapter aims to follow up Essentials with python functionality and coding practice that all good programmers should know, although aren't used very frequently in physics as compared to traditional computer science.
# <h3> Specialized Python Fundamentals Topics </h3>
#
# 1: <b>NP</b> [User Input](#user)<br>
# 2: [Naming Convention](#varname)<br>
# 3: <b>NP</b> [Match Case](#match)<br>
# 4: [Proper Function Writing](#fnwriting)<br>
#  4.1: [Default Values](#default)<br>
# 5: [Proper Conditional Writing](#condwriting)<br>
# 6: [Enumerate](#ennumerate)<br>
# 7: [Appending and List Comprehensions](#append)<br>
# 8: [Files](#files)<br>
# 9: <b>NP</b> [Try Except Finally and Raise](#try)<br>
# 10: [Specialized Python Fundamentals Example Solutions](#specializedsol)
# <a id="user"></a>
# <h3>User Input (NP)</h3>
# The ability of users to in some way manipulate the program without knowing the inner workings of code sounds like it would be extremely important, though standard physics computations don't require nor need the interactibility that comes with user input. It is, however, still important to know about in the case that user input is required, and is simple enough to implement that learning the implementation will be quick.
#
# To get a user's input, simply use the command <code>input()</code>. We can write store these inputs into variables as if they were any normal variable value, using <code>variable = input()</code>. If we want the user to input a specific type or a choice of values, we can use a do while loop, and if we want to include a message to accompany the <code>input</code>, we can use the statement <code>input("statement here ")</code> (make sure to include the space for readability) and it will still ask the user for an input. We can look at an example of an <code>code</code> inpput statement below.
#note to include the int() if you are using a statement inside the input(), otherwise you will get a string
x = int(input("input a number: "))
if x<0:
print(str(x) + " this is a negative value")
elif x>0:
print(str(x) + " this is a positive value")
else:
print(str(x) + " wow, zero!")
# Try out inputs below, and see if you can get the user to select their favorite from a series of particles (that you let them select from) and return to the user the rest-mass of that particle.
# +
# Example 1:
# -
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="varname"></a>
# <h3>Naming Convention</h3>
# As stated in the Essentials section, variable names should be somewhat obvious as to what they measure. For example, we would want a variable name for <code>distance</code> or potentially <code>dist</code> for a variable that stores distance(s), but wouldn't <i>generally</i> want a variable name like <code>d</code> for the same variable, even if it performs the same function in the code. The main exception to this is in <code>for</code> loops, where <code>i</code>, <code>j</code>, <code>k</code>, and <code>n</code> are common enough variable names for the iterator that in short- to medium-sized <code>for</code> loops the specificity doesn't apply.<br>
#
# Aside from these situations, there are a few more that we should look at which aren't immediately obvious. For instance, function and variable names with multiple contained words, as per the Python guidelines should have the contained words seperated by underscores, as seen below. I will write out variables each named some variation of <code>correct_name</code> and will set them to <code>True</code> if the name is used/recommended.
# +
#standard variable name
correct_name = True
#camel case, used for variables in OTHER languages, try to avoid in Python
correctName = False
#used for constants, will explain in following cell
CORRECT_NAME = True
#don't use this naming convention, I don't believe this is used in any language
correctname = False
correct_Name = False
correct-name = False
correct-Name = False
#as per the above cell, don't use strange/abbreviated variable names unless they're common and/or obvious
corrname = False
corr_name = False
c = False
# -
# As for the one specific case which isn't the standard variable naming convention, the variable in all caps, that variable naming convention should only be used for when the value of the variable will <b>never</b> be changed anywhere else in the code, such as fundamental constants. The value of this variable (called a final variable, but that's only necessary knowledge in other language) can be modified in the line specifying it's value, but no where else!
# Also a really quick thing I want to point out, occasionally you may want to use parantheses in a string, if that's the case you will need to use the <code>\</code> symbol before each parantheses.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="match"></a>
# <h3>Match Case (NP)</h3>
# Match Case statements were only recently added to Python ater users wanted the functionality that existed with other languages. The statements are used specifically when we want to improve the performance of a specific setup of conditionals. This specific case is for when we want to <i>match</i> a variable to one of a specific set of values, and does not (as far as I am aware of) allow the variable to take any value outside of the specific set, which should be contrasted with <code>in</code> statements where <code>else</code> statements can still catch unspecified values. Enough with talking, let's see how match case works.
# Note: this was only recently added to Python, so make sure the version is up-to-date, otherwise there will be a syntax error (as I got when I realized my Anaconda's Python hadn't been updated).
#returns "force" law
force = (input("Enter a force: ")).lower()
match force:
case "gravitational":
print("GMm/R^2")
case "electromagnetism":
print("1/4πƐ0*Qq/R^2")
case "weak":
print("W Z bosons")
case "strong":
print("Gluon")
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="fnwriting"></a>
# <h3>Proper Function Writing</h3>
# This section will go over the standards of writing functions, standards which weren't necessary for functions to, well, function, but are nonetheless important to know to write better code.<br>
# The first part of function writing will look into some syntax that is used when writing functions; typically, value types of the inputs and outputs of functions aren't specified in the variable/function names, so instead indicators are used to specify what types of variables are used. To see the syntax of this, let's look at an example:
# +
standard_value = 10
#multiplies a number by 10
def multiply_value(num: int):
return num*10
print(multiply_value(5))
print(multiply_value("5"))
# -
# It should be easy to see that this function has the identical output to the same function without the <code>: int</code> specification in the input, even in the case where the inputed type was not the same as the one we specified in the function input. This isn't an endorsement of randomly specifying the variables in the inputs to be a specific type and then ignoring that, instead, it was meant to show how fundamentally the <code>: </code> after the variable acts somewhat like a comment, and is mainly used for readibility. Since the colon doesn't directly impact the function, we can actually have the specified type as part of a class that has been made. For instance, if there was a class of forces from which you wanted to take and objects made from this class of type "force," we could specify the input variable being of type "force" through <code>: force</code> after the variable in question.
# We can also specify the output type of a function in a similar way:
# +
special_word = "fugacity"
#returns if a word is a "thermo word," specified by a small list
def thermo_word(word: str) -> bool:
if word in ("heat", "Carnot", "engine", "Boltzmann", "U=Q-W", "3/2kT", "fugacity"):
return True
return False
print(thermo_word(special_word))
# -
# Again, this <code> -> </code> syntax past the function definition is just for syntax and has no direct impact on the function. Additionally, we can see the lack of an <code>else</code> statement in the cell above, this is by design. Since a function can only ever <code>return</code> one object, anything caught in the <code>if</code> statement's <code>return</code> will stop the function from reaching the next line's <code>return False</code>, and if something isn't caught in the <code>if</code> statement, it will simply move onto and execute the <code>return False</code> line.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="default"></a>
# <h4>Default Values</h4>
# Default values in functions are the function equivalent of default values in classes: the function assumes the value unless the value is specifically overwritten. These are NOT constants as in the previous all-caps case, though are values that are common enough that typing them in once is more efficient than repeatedly typing them. To set a default value in a function, all we need to do is have <code>variable = value</code> as if we were creating a variable outside of the variable. This function default value won't be available for use outside of the function, though. An default value implementation is provided below:
# +
#finds the gravitational force on a mass
def grav_force(m, M:int = 5.97*10**24, R:float = 6376000) -> float:
return 6.67*10**(-11)*m*M/(R**2)
print(grav_force(1))
# -
# The <code>: </code> is not necessary here, as the variable type should be self-evident from the value, in fact it's not even stylistically recommended, but was included here to show that the <code>: </code> is still allowed with a default value. It should also be noted that the values which are defaulted must come AFTER any non-defaulted values; this isn't just stylistically recommended, but Python will not compile if a defaulted input comes before an unknown value.<br><br>
# Default values can also be used in class methods with the exact same syntax:
# +
class My_class:
def __init__(self, my_int: int):
self.my_int = my_int
def times_num(self, num = 10) -> float:
return num*self.my_int
my_object = My_class(5)
print(my_object.times_num())
print(my_object.times_num(num = 5))
# -
# I also included a little something extra in the previous cell: if we want to specify certain values as inputs we can use <code>input_name = value</code>, a concept that's simple, yet required for specialized Python tools such as those in machine learning.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="condwriting"></a>
# <h3>Proper Conditional Writing</h3>
# This section should be quick, and perhaps some of you may have picked up on this already. There isn't much to conditionals in terms of style, with the exception of one brief element that introductory programmers often miss. Let's take a look at the conditional below and see what it is that can be improved.
variable = True
#example conditional
if (variable == True):
print(variable)
if (variable == False):
print(variable)
# The "problem" with this code is that there's a redundancy, try to see if you can spot it. The redundancy comes from having the expression inside the <code>()</code> parantheses be evaluated; since we already have the variable as a boolean, it will either be <code>True</code> or <code>False</code>, but either way, we only need this for the conditional statement:
variable = False
if (variable):
print(variable)
if (not variable):
print(variable)
# This may seem trivial when we input something defined one line earlier, though often this discrepancy appears when dealing with more complex code, something like,
gravitational_force = True
if gravitational_force:
print("Gmm/r^2")
else:
print("Unknown")
# as the variable <code>gravitational_force</code> is easy to is defined as boolean in this case.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="ennumerate"></a>
# <h3>Enumerate</h3>
# Enumeration is a useful concept in <code>for</code> loops, but can be worked around in a way that enumeration isn't needed in something more fundamental to coding practices. Enumerating a loop simply gives both the index and value of that loop, and it should be somewhat obvious as to why this would be useful in <code>for</code> loops. The work-around for this used to be using <code>list[i]</code> inside the loop to get the element, but with enumerates we can bypass this <i>clunky</i> code:
# +
starter_list = [1, 2, 4, 8, 9, 15]
for i, n in enumerate(starter_list):
print(i, n)
#compare this to the old way:
# starter_list = [1, 2, 4, 8, 9, 15]
# for i in range(len(starter_list)):
# print(i, starter_list[i])
# -
# Where it is important to note the non-standard <code>i, n</code> part before the enumeration, as well as the fact that the index always comes before the values! The difference between enumeration and the old way may seem minute, but the runtime should solidify enumeration as the better method.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="append"></a>
# <h3>Appending and List Comprehensions</h3>
# Appending is instrumental in lists whereas list comprehensions are simply used to neatify code (like, <i>really</i> neatify code); since appending is more important, let's go over that first.
# <h4>Appending</h4>
# Appending to lists allows us to stick extra elements onto a list, always at the end of where we defined the list, there isn't much more to appending than that honestly. For example, we can append a list like:
# +
basic_list = [1, "5", "g", "fugacity", (10, 3, -42)]
print(basic_list)
#appending
basic_list.append(5)
print(basic_list)
#another way to append:
basic_list = basic_list + basic_list
print(basic_list)
# -
# Please note that to append multiple elements, the appending will need to be done manually or by using loops, trying to append a list to a list by using <code>.append([list_elements])</code> will put the "appended" list inside the other list.<br><br>
# There are also more complicated list commands, like <code>.pop()</code>, <code>.join()</code>, <code>.insert()</code>, and <code>.sort()</code> along with others, but these come up on a case-by-case basis.<br><br>
# <h4>List Comprehension</h4>
# List comprehensions are a much nicer and easier way to produce lists from loops. Say, for instance, you were looking at an osciallator and wanted to know the position only when it was greater than the equilibrium position. That code would be much nicer to write using list compehension than by other means. As an example, I'll supply a few position coordinates and set up a list comprehension to look at.
# +
position = [40, -20, 10, -5, 2.5, -1.25, .625, -.3875, .19375]
#list comprehension:
positive_position = [value for value in position if value>0]
print(positive_position)
# -
# Where we can see here that a list comprehension is defined by putting <code>[]</code> braces around a one-line <code>for</code> loop. The coditional <code>if value>0</code> here isn't necessary for making a list comprehension, but I wanted to show it off as typically conditionals are used in list comprehensions in simular situations as the one I posed.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="files"></a>
# <h3>Files</h3>
# Writing and reading to files is something I dislike as I can never remember the exact keywords. In general, you won't remember either as file functionality will typically be implemented sparcely and not need to be modified. That being said, let's go over file reading/writing so that you have the reference for when it may come up in the future.<br><br>
# To write to files, we want to make it into an object by using <code>f = open(file_name, "w")</code> where <code>f</code> is a shorthand for file and "w" is for write. The file doesn't need to exist in order to write to it, Python will handle that (theres also "a" for append instead of "w"). Once we have the file open, we can write to is using <code>f.write(text_here)</code> and close the file by <code>f.close()</code>. As an example,
f = open("example.txt", "w")
f.write("Physicists created the internet!")
f.close()
# After writing to files, we can read from them by using essentially the same syntax except for the "w" being replaced with "r". After we have the file open to read, we can read from the file by using <code>f.read()</code>, or go line by line with <code>f.readline()</code>, like this:
f = open("example.txt", "r")
print(f.read())
print(f.readline())
f.close()
# To note, only one line was read off (unless you modified the previous <code>f.write()</code> command to include more) since the reading always goes off the position of the last element we wrote/read.
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="try"></a>
# <h3>Try Except Finally and Raise (NP)</h3>
# Try, except, and statements are a bit percarious, they let us ignore any potential errors (such as a divide by zero error) and keep chugging on. Depending on your view and project, this seems either great or terrible: the bad values can just be passed by, or something's gone wrong with the code. I won't say which case is more likely, but I will say that it's better to be cautious than callous in these situations.<br><br>
# Assuming you had a system that gave an expected error that wasn't physically meaningful to any output and were extremely sure it wasn't due to a bug in the code, you can use a <code>try...except..finally/else</code> statement. As the names somewhat imply, we first try a chunk of code, if that fails we go to the except chunk, and then we reach the final state: if we want code to execute regardless of if the except block was reached, we use finally; otherwise, we use else. A sample program of this could be for inverse-square law forces:
# +
def gravity_force(r: float) -> float:
return r**(-2)
try:
gravity_force(1)
gravity_force(.001)
gravity_force(0)
except:
print("Divided by zero")
finally:
print("All done!")
# -
# Based on this code, we can see that if anything in the <code>try</code> block fails, then we don't execute any of it and move to the <code>except</code> except block. If we want to be better about a <code>try...except</code> statement, we can specify which types of errors to catch inside the except, by using <code>except ErrorName</code>. In this case, the error we receive is a <code>ZeroDivisionError</code>, try to modify the code to have the same output but with a better <code>except</code> statement.<br><br>
# A better implementation of <code>try...except</code> when we don't know if/why an eror happens can be done with using conditionals and the keyword <code>raise</code>. As an example, let's redo the <code>gravity_force</code> code,
# +
def gravity_force(r: float) -> float:
return r**(-2)
x = 1
gravity_force(x)
x = .001
gravity_force(x)
x = 0
if x==0:
raise Exception("Can't divide by zero")
# -
# Where here we can see that we actually get an exception we defined, and can review the code to see what went wrong. (Amd no, generally we don't define something and then check if it's what we defined, I just use <code>x=0</code> followed by <code>if x==0:</code> to get across the notation).
# [Return to Specialized Python Fundamentals](#specialized)
# <a id="specializedsol"></a>
# <h3> Specialized Python Fundamentals Example Solutions </h3>
# No examples for now! Come back when the other sections of the notebook are complete!
| Python for Physics Specialized Python Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #<div class="alert alert-success">Fracciones y divisibilidad.</div>
# Para realizar la mayor parte de los cálculos matemáticos de modo exacto utilizaremos la biblioteca **SYMPY**, que no viene de serie con Python, aunque si viene incluida en las distribuciones de Python para cálculo científico. Para ello se debe importar la biblioteca, y en general lo haremos con la sentencia **from sympy import ***.
#
# Además presentaremos los resultados en formato $\LaTeX$. Para ello utilizamos la función **init_printing** (escribimos "init" y pulsamos el tabulador). Estas dos expresiones deben aparecer siempre al comienzo del notebook.
# Para escribir fracciones utilizaremos la función **Rational** (es imprescindible escribir la primera letra con mayúsculas). Con las fracciones se pueden realizar todas las operaciones habituales y los resultados se presentan también en forma de fracción y no en formato decimal. Pero si en la operación introducimos un número decimal, la operación la realiza ya en decimales.
# ###<div class="alert alert-warning">Realiza las siguientes operaciones con fracciones.</div>
#
#
# * Simplifica la fracción $\displaystyle \frac{46}{50}$
#
#
# * $\displaystyle \frac{2}{3} +\frac{6}{7} + 1$
#
#
# * $\displaystyle \frac{2}{5} \times \frac{7}{6}$
#
#
# * $\displaystyle \frac{3}{4} + 1.0$
#
#
# * Calcula $\displaystyle \left(\frac{2}{3}\right)^{100}$ y da el resultado en decimales.
# Sympy puede factorizar números enteros de tamaño razonable con la función **factorint**. El resultado es lo que en Python se denomina un diccionario, donde aparecen el factor primo y el exponente separador por dos puntos. Si queremos que aparezca en el formato estandar, debemos añadir **visual = True**.
# ###<div class="alert alert-warning">Factoriza los números 612 y 5292 y comprueba el resultado.</div>
# Las funciones para calcular el máximo común denominador y el mínimo común múltiplo son **gcd** y **lcm**, que son las iniciales inglesas que corresponden a nuestros **mcd** y **mcm**.
# ###<div class="alert alert-warning">Calcula el mcd y el mcm de los números anteriores.</div>
# Para comprobar si un número es primo se utiliza la función **isprime**. Para números "pequeños" utiliza algoritmos exactos para su comprobación, pero para números grandes utiliza métodos probabilísticos. Parece que esta función no es necesaria, pues con la factorización se puede saber si un número es primo o no lo es. Sin embargo los métodos de factorización son muy costosos computacionalmente y los test de primalidad no lo son tanto.
# ###<div class="alert alert-warning">Comprueba si $5673$ es primo. Lo mismo con $2^{32}+ 1$. Factorízalos.</div>
# Para trabajar con divisores Sympy tiene dos funciones: **divisors**, que devuelve una lista con todos los divisores, y **divisor_count**, que únicamente cuenta el número de divisores.
# ###<div class="alert alert-warning">Calcula todos los divisores de 180.</div>
# La función **sum** suma todos los elementos de una lista.
# ###<div class="alert alert-warning">Demuestra que 496 es un número perfecto.</div>
# Para calcular el primo anterior y el primo siguiente se utilizan las funciones **nextprime** y **prevprime**. Para calcular el número de primos menores que una cantidad dada se utiliza **primepi**.
# ###<div class="alert alert-warning">Calcula el primo anterior y posterior a 1000.</div>
# ###<div class="alert alert-warning">Calcula $\pi(1000)$ (la cantidad de números primos menores que 1000).</div>
# Para calcular con decimales cualquier expresión se utiliza el método **evalf**. Para utilizar un método, primero debemos escribir la expresión, después un punto y finalmente el nombre del método. Muchas veces lo más cómodo es guardar la expresión en una variable y emplear el método sobre dicha variable. En este caso el método **evalf** tiene un argumento opcional, que es el número de cifras significativas.
#
# Una de las "funciones mágicas" de Ipython es **%%time**, que nos permite calcular el tiempo de cómputo de la celda en cuestión. Esto es una función propia de IPython, no disponible en Python.
# ###<div class="alert alert-warning">Calcula el número $\pi$ y $\sqrt{2}$ con 50 cifras significativas. Calcula algunos tiempos de cómputo.</div>
| notebooks/03.- Fracciones y divisibilidad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#158 숫자 1~100 랜덤숫자중에서 스무고개를 만드시오. 입력숫자보다 작으면 작다, 크면 크다 쓰시오
from random import randrange
n = randrange(1, 100)
while True:
ans1 = int(input("Guess Number"))
if ans1 == n:
print("Congrants!")
break
elif ans1 > n:
print("Choose lower number")
else:
print("Choose higher number")
# +
#160 소수점 첫 째 자리가진 0과 10사이의 수(0.0 ~ 9.9)를 랜덤으로 발생하는 프로그램
from random import random
round(random()*10,1)
# -
while True:
print(round(random.random()*10,1))
# +
#161 두 수를 입력하고, 입력받은 두 수 사이의 정수를 랜덤으로 출력하는 프로그램을 작성하시오. 두 수 사이의 정수가 존재하지 않을때에는 존재x표기
num1 = int(input("insert positive integer"))
num2 = int(input("insert positive integer"))
if num1 != num2 and num1 +1 != num2:
print(randrange(num1+1,num2))
else:
print("no integer b/w two numbers")
# +
#161
a = int(input("insert number"))
b = int(input("insert number"))
if min(a,b)+1 < max(a,b):
print(random.randint(min(a,b)+1, max(a,b)-1))
# +
#161(2)
def integer_find(n):
import random
num1, num2 = int(input("insert number")), int(input("insert number"))
if num1 != num2 and num1 +1 != num2:
print(random.randrange(num1+1,num2))
else:
print("no integer between two numbers")
integer_find(n)
# +
#(162) 10이상 20이하 정수형 난수4개발생, 평균이 15이상이면 Big 아니면 아니면 small을 출력
from statistics import mean
num = []
for i in range(4):
a = randrange(10,21)
num.append(a)
mean(num)
# +
#162(2)
#(162) 10이상 20이하 정수형 난수4개발생, 평균이 15이상이면 Big 아니면 아니면 small을 출력
def ran_gen(n):
import random
import statistics
a = []
for i in range(4):
i = random.randrange(10,21)
a.append(i)
if statistics.mean(a) >= 15:
print("Big", statistics.mean(a))
else:
print("Small", statistics.mean(a))
ran_gen(n)
# -
# 162
def average_randint():
import random
import statistics
a = []
for _ in range(3):
a.append(random.randint(10,20))
if statistics.mean(a) >= 15:
print("Big", statistics.mean(a))
else:
print("small", statistics.mean(a))
average_randint()
# +
## 163 총 3단계의 걸쳐 문제를 맞출경우 다음 단계로 넘어가는 퀴즈를 만드세요. 1단계a: 1~2사이 정수, 2단계,1~4사이, 3단계 1~8사이
#중간에 틀리면 Failed. try next time. 3단계 모두 맞추면 Congrats!를 출력
from random import randrange
num1, num2, num3 = randrange(1,3), randrange(1,5), randrange(1,9)
while True:
n1 = int(input("insert 1 or 2 : "))
if n1 != num1:
print("Failure", "Answer is ", num1)
break
else:
print("Succeed!")
n2 = int(input("insert number b/w 1 to 4 : "))
if n2 != num2:
print("Failure", "Answer is ", num2)
break
else:
print("Succeed!")
n3 = int(input("insert number b/w 1 to 8 : "))
if n3 != num3:
print("Failure", "Answer is ", num3)
break
else:
print("Congrats!")
break
# +
import random
num1, num2, num3 = random.randint(1,2), random.randint(1,4), random.randint(1,8)
n = int(input("insert number b/w 1 and 2: "))
if n == num1:
n2 = int(input("insert number b/w 1 and 4: "))
if n2 == num2:
n3 = int(input("insert number b/w 1 and 8: "))
if n3 == num3:
print("Congrats!")
else:
print("Failed!", num1, num2, num3)
# +
#164 cars의 원소를 섞고, 맨앞의 문자열이 Hyundai일경우 True를 출력하시오
from random import shuffle
cars = ["Hyundai", "Kia", "BMW", "Benz"]
shuffle(cars)
if cars[0] == "Hyundai":
print("True: ", cars)
else:
print(cars)
# -
# +
#170 while활용, 작은수에서 큰수를 입력받아 5의배수를 나타내시오. ex) 7, 34 -> 10,15,20,25,30
# +
a = int(input("insert 1st number"))
b = int(input("insert 2nd number"))
if a > b:
c = a
a = b
b = c
i = a + 1
while i < b:
if i % 5 == 0:
print(i)
i += 1
# +
#170(2)
num1, num2 = int(input("insert number")), int(input("insert number"))
i = min(num1,num2)
while i < max(num1,num2):
if i % 5 == 0:
print(i)
i += 1
# +
a, b = int(input("insert number")), int(input("insert number"))
for i in range(min(a,b), max(a,b)):
if i % 5 == 0:
print(i)
# -
| problems-review/158~170.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: padl
# language: python
# name: padl
# ---
# +
import os, sys
sys.path.append('../matplotlib_surface_plotting/')
from matplotlib_surface_plotting import plot_surf
import nibabel as nb
import numpy as np
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from matplotlib import cm
def normalize_v3(arr):
''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
lens = np.sqrt( arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2 )
arr[:,0] /= lens
arr[:,1] /= lens
arr[:,2] /= lens
return arr
def normal_vectors(vertices,faces):
norm = np.zeros( vertices.shape, dtype=vertices.dtype )
tris = vertices[faces]
n = np.cross( tris[::,1 ] - tris[::,0] , tris[::,2 ] - tris[::,0] )
n=normalize_v3(n)
return n
# norm[ faces[:,0] ] += n
# norm[ faces[:,1] ] += n
# norm[ faces[:,2] ] += n
# return normalize_v3(norm)
def frustum(left, right, bottom, top, znear, zfar):
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 * znear / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[0, 2] = (right + left) / (right - left)
M[2, 1] = (top + bottom) / (top - bottom)
M[2, 3] = -2.0 * znear * zfar / (zfar - znear)
M[3, 2] = -1.0
return M
def perspective(fovy, aspect, znear, zfar):
h = np.tan(0.5*np.radians(fovy)) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def translate(x, y, z):
return np.array([[1, 0, 0, x], [0, 1, 0, y],
[0, 0, 1, z], [0, 0, 0, 1]], dtype=float)
def xrotate(theta):
t = np.pi * theta / 180
c, s = np.cos(t), np.sin(t)
return np.array([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]], dtype=float)
def yrotate(theta):
t = np.pi * theta / 180
c, s = np.cos(t), np.sin(t)
return np.array([[ c, 0, s, 0],
[ 0, 1, 0, 0],
[-s, 0, c, 0],
[ 0, 0, 0, 1]], dtype=float)
def zrotate(theta):
t = np.pi * theta / 180
c, s = np.cos(t), np.sin(t)
return np.array([[ c, -s, 0, 0],
[ s, c, 0, 0],
[0, 0, 1, 0],
[ 0, 0, 0, 1]], dtype=float)
def shading_intensity(vertices,faces, light = np.array([0,0,1]),shading=0.7):
"""shade calculation based on light source
default is vertical light.
shading controls amount of shading.
Also saturates so top 20 % of vertices all have max intensity."""
face_normals=normal_vectors(vertices,faces)
intensity = np.dot(face_normals, light)
intensity[np.isnan(intensity)]=1
shading = 0.7
#top 20% all become fully coloured
intensity = (1-shading)+shading*(intensity-np.min(intensity))/((np.percentile(intensity,80)-np.min(intensity)))
#saturate
intensity[intensity>1]=1
#flat maps have lots of nans which need to become 1
intensity[np.isnan(intensity)]=1
return intensity
def f7(seq):
#returns uniques but in order to retain neighbour triangle relationship
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))];
def get_ring_of_neighbours(island, neighbours, vertex_indices=None, ordered=False):
"""Calculate ring of neighbouring vertices for an island of cortex
If ordered, then vertices will be returned in connected order"""
if not vertex_indices:
vertex_indices=np.arange(len(island))
if not ordered:
neighbours_island = neighbours[island]
unfiltered_neighbours = []
for n in neighbours_island:
unfiltered_neighbours.extend(n)
unique_neighbours = np.setdiff1d(np.unique(unfiltered_neighbours), vertex_indices[island])
return unique_neighbours
def get_neighbours_from_tris(tris, label=None):
"""Get surface neighbours from tris
Input: tris
Returns Nested list. Each list corresponds
to the ordered neighbours for the given vertex"""
n_vert=np.max(tris+1)
neighbours=[[] for i in range(n_vert)]
for tri in tris:
neighbours[tri[0]].extend([tri[1],tri[2]])
neighbours[tri[2]].extend([tri[0],tri[1]])
neighbours[tri[1]].extend([tri[2],tri[0]])
#Get unique neighbours
for k in range(len(neighbours)):
if label is not None:
neighbours[k] = set(neighbours[k]).intersection(label)
else :
neighbours[k]=f7(neighbours[k])
return np.array(neighbours)
def adjust_colours_pvals(colours, pvals,triangles,mask=None):
"""red ring around clusters and greying out non-significant vertices"""
if mask is not None:
verts_masked = mask[triangles].any(axis=1)
colours[verts_masked,:] = np.array([0.86,0.86,0.86,1])
neighbours=get_neighbours_from_tris(triangles)
ring=get_ring_of_neighbours(pvals<0.05,neighbours)
if len(ring)>0:
ring_label = np.zeros(len(neighbours)).astype(bool)
ring_label[ring]=1
ring=get_ring_of_neighbours(ring_label,neighbours)
ring_label[ring]=1
colours[ring_label[triangles].any(axis=1),:] = np.array([1.0,0,0,1])
grey_out=pvals<0.05
verts_grey_out= grey_out[triangles].any(axis=1)
colours[verts_grey_out,:] = (1.5*colours[verts_grey_out] + np.array([0.86,0.86,0.86,1]))/2.5
return colours
def frontback(T):
"""
Sort front and back facing triangles
Parameters:
-----------
T : (n,3) array
Triangles to sort
Returns:
--------
front and back facing triangles as (n1,3) and (n2,3) arrays (n1+n2=n)
"""
Z = (T[:,1,0]-T[:,0,0])*(T[:,1,1]+T[:,0,1]) + \
(T[:,2,0]-T[:,1,0])*(T[:,2,1]+T[:,1,1]) + \
(T[:,0,0]-T[:,2,0])*(T[:,0,1]+T[:,2,1])
return Z < 0, Z >= 0
def plot_surf(vertices, faces, overlay, rotate=[270,90], cmap='viridis', filename=None, label=False,
vmax=None, vmin=None, x_rotate=270,z_rotate=0, pvals=None, colorbar=True, title=None, mask=None, base_size=6,
flat_map=False,
):
"""plot mesh surface with a given overlay
vertices - vertex locations
faces - triangles of vertex indices definings faces
overlay - array to be plotted
cmap - matplotlib colormap
rotate - 270 for lateral on lh, 90 for medial
"""
vertices=vertices.astype(np.float)
F=faces.astype(int)
vertices = (vertices-(vertices.max(0)+vertices.min(0))/2)/max(vertices.max(0)-vertices.min(0))
if not isinstance(rotate,list):
rotate=[rotate]
if not isinstance(overlay,list):
overlays=[overlay]
else:
overlays=overlay
if flat_map:
z_rotate=90
rotate=[90]
#change light source if z is rotate
light = np.array([0,0,1,1]) @ yrotate(z_rotate)
intensity=shading_intensity(vertices, F, light=light[:3],shading=0.7)
print(intensity.shape)
#make figure dependent on rotations
fig = plt.figure(figsize=(base_size*len(rotate)+colorbar*(base_size-2),(base_size-1)*len(overlays)))
if title is not None:
plt.title(title, fontsize=25)
plt.axis('off')
for k,overlay in enumerate(overlays):
#colours smoothed (mean) or median if label
if label:
colours = np.median(overlay[F],axis=1)
else:
colours = np.mean(overlay[F],axis=1)
if vmax is not None:
colours = (colours - vmin)/(vmax-vmin)
colours = np.clip(colours,0,1)
else:
colours = (colours - colours.min())/(colours.max()-colours.min())
vmax = colours.max()
vmin = colours.min()
C = plt.get_cmap(cmap)(colours)
if pvals is not None:
C = adjust_colours_pvals(C,pvals,F,mask)
#adjust intensity based on light source here
C[:,0] *= intensity
C[:,1] *= intensity
C[:,2] *= intensity
for i,view in enumerate(rotate):
MVP = perspective(25,1,1,100) @ translate(0,0,-3) @ yrotate(view) @ zrotate(z_rotate) @ xrotate(x_rotate) @ zrotate(270*flat_map)
#translate coordinates based on viewing position
V = np.c_[vertices, np.ones(len(vertices))] @ MVP.T
V /= V[:,3].reshape(-1,1)
V = V[F]
#triangle coordinates
T = V[:,:,:2]
#get Z values for ordering triangle plotting
Z = -V[:,:,2].mean(axis=1)
#sort the triangles based on their z coordinate. If front/back views then need to sort a different axis
#sort the triangles based on their z coordinate. If front/back views then need to sort a different axis
front, back = frontback(T)
T=T[front]
s_C = C[front]
Z = Z[front]
I = np.argsort(Z)
T, s_C = T[I,:], s_C[I,:]
ax = fig.add_subplot(len(overlays),len(rotate)+1,2*k+i+1, xlim=[-.9,+.9], ylim=[-.9,+.9],aspect=1, frameon=False,
xticks=[], yticks=[])
#s_C[:,3]=0.3
#print(s_C)
collection = PolyCollection(T, closed=True, linewidth=0,antialiased=False, facecolor=s_C)
collection.set_alpha(1)
ax.add_collection(collection)
plt.subplots_adjust(left =0 , right =1, top=1, bottom=0,wspace=0, hspace=0)
if colorbar:
cbar = fig.colorbar(cm.ScalarMappable( cmap=cmap), ticks=[0,0.5, 1],cax = fig.add_axes([0.7, 0.3, 0.03, 0.38]))
cbar.ax.set_yticklabels([np.round(vmin,decimals=2), np.round(np.mean([vmin,vmax]),decimals=2),
np.round(vmax,decimals=2)])
cbar.ax.tick_params(labelsize=25)
if filename is not None:
fig.savefig(filename,bbox_inches = 'tight',pad_inches=0,transparent=True)
return
# -
# +
#vertices = (vertices-(vertices.max(0)+vertices.min(0))/2)/max(vertices.max(0)-vertices.min(0))
#vertices = np.roll(vertices,1,axis=1)
# +
# vector_file=nb.load('../data/MyelinMap_inflated_vectors.L.func.gii')
# vectors=np.array([vector_file.darrays[0].data,
# vector_file.darrays[1].data,
# vector_file.darrays[2].data])
# -
surf=nb.load('../data/fs_LR.32k.L.inflated.surf.gii')
vertices,faces = surf.darrays[0].data,surf.darrays[1].data
overlay = nb.load('../data/S1200.MyelinMap.L.func.gii').darrays[0].data
surf=nb.load('../data/fs_LR.32k.L.flat.surf.gii')
vertices,faces = surf.darrays[0].data,surf.darrays[1].data
# +
# vertices[:,2]+=vertices[:,0]
# vertices[:,0]=0
# -
plot_surf(vertices,faces,overlay,rotate=90,flat_map=True,
vmin=1,vmax=2)
light = np.array([0,0,1,1]) @ yrotate(270)
light
# +
plt.scatter(vertices[:,1],vertices[:,2])
# -
intensity
| examples/hcp_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Vw_-jLJKVgGa"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 3*
#
# ---
# + [markdown] id="O67uhlT4MExK"
# # Cross-Validation
#
# - Do **cross-validation** with independent test set
# - Use scikit-learn for **hyperparameter optimization**
# + [markdown] id="59EsvYLdDHAB"
# ### Setup
#
# Run the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.
#
# Libraries
#
# - **category_encoders**
# - matplotlib
# - numpy
# - pandas
# - **pandas-profiling**
# - scikit-learn
# - scipy.stats
# + id="hwbmmgaWDNid"
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# !pip install pandas-profiling==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + [markdown] id="o2voS8UIVgGh"
# # Do cross-validation with independent test set
# + [markdown] id="-ca6Mnr1VgGh"
# ## Overview
# + [markdown] id="YDzyTVpAVgGi"
# ### Predict rent in NYC 🏠
#
# We're going back to one of our New York City real estate datasets.
# + id="exXnWT8OVgGi"
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# Do train/test split
# Use data from April & May 2016 to train
# Use data from June 2016 to test
df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True)
cutoff = pd.to_datetime('2016-06-01')
train = df[df.created < cutoff]
test = df[df.created >= cutoff]
# Wrangle train & test sets in the same way
def engineer_features(df):
# Avoid SettingWithCopyWarning
df = df.copy()
# Does the apartment have a description?
df['description'] = df['description'].str.strip().fillna('')
df['has_description'] = df['description'] != ''
# How long is the description?
df['description_length'] = df['description'].str.len()
# How many total perks does each apartment have?
perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed',
'doorman', 'dishwasher', 'no_fee', 'laundry_in_building',
'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck',
'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony',
'swimming_pool', 'new_construction', 'exclusive', 'terrace',
'loft', 'garden_patio', 'common_outdoor_space',
'wheelchair_access']
df['perk_count'] = df[perk_cols].sum(axis=1)
# Are cats or dogs allowed?
df['cats_or_dogs'] = (df['cats_allowed']==1) | (df['dogs_allowed']==1)
# Are cats and dogs allowed?
df['cats_and_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1)
# Total number of rooms (beds + baths)
df['rooms'] = df['bedrooms'] + df['bathrooms']
# Extract number of days elapsed in year, and drop original date feature
df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days
df = df.drop(columns='created')
return df
train = engineer_features(train)
test = engineer_features(test)
# + id="io-jFp4BVgGl"
from pandas_profiling import ProfileReport
profile = ProfileReport(train, minimal=True).to_notebook_iframe()
profile
# + [markdown] id="ZF7x8ybBVgGo"
# ### Validation options
# + [markdown] id="Hr-Dt67Gv2cB"
# Let's take another look at [Sebastian Raschka's diagram of model evaluation methods.](https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html) So far we've been using "**train/validation/test split**", but we have more options.
#
# Today we'll learn about "k-fold **cross-validation** with independent test set", for "model selection (**hyperparameter optimization**) and performance estimation."
#
# <img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600">
#
# <sup>Source: https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html</sup>
#
#
# + [markdown] id="0clcyZivVgGp"
# ### Cross-validation: What & Why?
# + [markdown] id="ozuzFo_Pv2cB"
# The Scikit-Learn docs show a diagram of how k-fold cross-validation works, and explain the pros & cons of cross-validation versus train/validate/test split.
#
# #### [Scikit-Learn User Guide, 3.1 Cross-validation](https://scikit-learn.org/stable/modules/cross_validation.html)
#
# > When evaluating different settings (“hyperparameters”) for estimators, there is still a risk of overfitting on the test set because the parameters can be tweaked until the estimator performs optimally. This way, knowledge about the test set can “leak” into the model and evaluation metrics no longer report on generalization performance. To solve this problem, yet another part of the dataset can be held out as a so-called “validation set”: training proceeds on the training set, after which evaluation is done on the validation set, and when the experiment seems to be successful, final evaluation can be done on the test set.
# >
# > However, **by partitioning the available data into three sets, we drastically reduce the number of samples which can be used for learning the model, and the results can depend on a particular random choice for the pair of (train, validation) sets.**
# >
# > **A solution to this problem is a procedure called cross-validation (CV for short). A test set should still be held out for final evaluation, but the validation set is no longer needed when doing CV.**
#
# <img src="https://scikit-learn.org/stable/_images/grid_search_cross_validation.png" width="600">
#
# > In the basic approach, called k-fold CV, the training set is split into k smaller sets. The following procedure is followed for each of the k “folds”:
# >
# > - A model is trained using $k-1$ of the folds as training data;
# > - the resulting model is validated on the remaining part of the data (i.e., it is used as a test set to compute a performance measure such as accuracy).
# >
# > The performance measure reported by k-fold cross-validation is then the average of the values computed in the loop. **This approach can be computationally expensive, but does not waste too much data (as is the case when fixing an arbitrary validation set).**
# + [markdown] id="q9xdNinOVgGq"
# ## Follow Along
# + [markdown] id="o10EvckQv2cC"
# ### cross_val_score
#
# How do we get started? According to the [Scikit-Learn User Guide](https://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics),
#
# > The simplest way to use cross-validation is to call the [**`cross_val_score`**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) helper function
#
# But, there's a quirk: For scikit-learn's cross-validation [**scoring**](https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter), higher is better. But for regression error metrics, lower is better. So scikit-learn multiplies regression error metrics by -1 to make them negative. That's why the value of the `scoring` parameter is `'neg_mean_absolute_error'`.
#
# So, k-fold cross-validation with this dataset looks like this:
# + [markdown] id="XmO-xR5_VgGr"
# ### Linear Model
# + id="vEtInoSjVgGs"
import category_encoders as ce
import numpy as np
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
target = 'price'
high_cardinality = ['display_address', 'street_address', 'description']
features = train.columns.drop([target] + high_cardinality)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='mean'),
StandardScaler(),
SelectKBest(f_regression, k=20),
Ridge(alpha=1.0)
)
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
# + id="-yu4okA1VgGu"
-scores.mean()
# + [markdown] id="MSJ65n-NVgGx"
# ### Random Forest
# + id="Ri4hKY6MVgGy"
from sklearn.ensemble import RandomForestRegressor
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.TargetEncoder(min_samples_leaf=1, smoothing=1),
SimpleImputer(strategy='median'),
RandomForestRegressor(n_estimators=100, n_jobs=-1, random_state=42)
)
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
# + id="WH8bXvofVgG0"
-scores.mean()
# + [markdown] id="7dq-PfpGZSHJ"
# But the Random Forest has many hyperparameters. We mostly used the defaults, and arbitrarily chose `n_estimators`. Is it too high? Too low? Just right? How do we know?
# + id="YCubg7EbjZyT"
print('Model Hyperparameters:')
print(pipeline.named_steps['randomforestregressor'])
# + [markdown] id="ZsOkh4BBVgG7"
# ## Challenge
#
# You will continue to participate in our Kaggle challenge. Use cross-validation and submit new predictions.
# + [markdown] id="sGZ0YPrUVgG7"
# # Use scikit-learn for hyperparameter optimization
# + [markdown] id="400NkSsBVgG8"
# ## Overview
# + [markdown] id="bk6o8W7Cv2cE"
# "The universal tension in machine learning is between optimization and generalization; the ideal model is one that stands right at the border between underfitting and overfitting; between undercapacity and overcapacity. To figure out where this border lies, first you must cross it." —[<NAME>](https://books.google.com/books?id=dadfDwAAQBAJ&pg=PA114)
# + [markdown] id="p8uKvR_pv2cG"
# ### Validation Curve
#
# Let's try different parameter values, and visualize "the border between underfitting and overfitting."
#
# Using scikit-learn, we can make [validation curves](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html), "to determine training and test scores for varying parameter values. This is similar to grid search with one parameter."
# + [markdown] id="hEIxeNXdv2cF"
# <img src="https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png">
#
# <sup>Source: https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn</sup>
# + [markdown] id="r3bbgaP2c3Pr"
# Validation curves are awesome for learning about overfitting and underfitting. (But less useful in real-world projects, because we usually want to vary more than one parameter.)
#
# For this example, let's see what happens when we vary the depth of a decision tree. (This will be faster than varying the number of estimators in a random forest.)
# + id="znIz2FPQv2cG"
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
from sklearn.tree import DecisionTreeRegressor
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
DecisionTreeRegressor()
)
depth = range(1, 30, 3)
train_scores, val_scores = validation_curve(
pipeline, X_train, y_train,
param_name='decisiontreeregressor__max_depth',
param_range=depth, scoring='neg_mean_absolute_error',
cv=3,
n_jobs=-1
)
plt.figure(dpi=150)
plt.plot(depth, np.mean(-train_scores, axis=1), color='blue', label='training error')
plt.plot(depth, np.mean(-val_scores, axis=1), color='red', label='validation error')
plt.title('Validation Curve')
plt.xlabel('model complexity: DecisionTreeRegressor max_depth')
plt.ylabel('model score: Mean Absolute Error')
plt.legend();
# + id="RPNs0mAoVgHB"
plt.figure(dpi=150)
plt.plot(depth, np.mean(-train_scores, axis=1), color='blue', label='training error')
plt.plot(depth, np.mean(-val_scores, axis=1), color='red', label='validation error')
plt.title('Validation Curve, Zoomed In')
plt.xlabel('model complexity: DecisionTreeRegressor max_depth')
plt.ylabel('model score: Mean Absolute Error')
plt.ylim((500, 700)) # Zoom in
plt.legend();
# + [markdown] id="vqTe6aAJVgHD"
# ## Follow Along
# + [markdown] id="JUaLgk8Pv2cJ"
# To vary multiple hyperparameters and find their optimal values, let's try **Randomized Search CV.**
# + [markdown] id="AexbC7fjv2cL"
# #### [Scikit-Learn User Guide, 3.2 Tuning the hyper-parameters of an estimator](https://scikit-learn.org/stable/modules/grid_search.html)
#
# > Hyper-parameters are parameters that are not directly learnt within estimators. In scikit-learn they are passed as arguments to the constructor of the estimator classes.
# >
# > It is possible and recommended to search the hyper-parameter space for the best cross validation score.
# >
# > [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV) exhaustively considers all parameter combinations, while [`RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) can sample a given number of candidates from a parameter space with a specified distribution.
# >
# > While using a grid of parameter settings is currently the most widely used method for parameter optimization, other search methods have more favourable properties. [`RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) implements a randomized search over parameters, where each setting is sampled from a distribution over possible parameter values.
# >
# > Specifying how parameters should be sampled is done using a dictionary. Additionally, a computation budget, being the number of sampled candidates or sampling iterations, is specified using the `n_iter` parameter.
# >
# > For each parameter, either a distribution over possible values or a list of discrete choices (which will be sampled uniformly) can be specified.
# + [markdown] id="oWxYsaPhVgHF"
# Here's a good blog post to explain more: [**A Comparison of Grid Search and Randomized Search Using Scikit Learn**](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85).
#
# <img src="https://miro.medium.com/max/2500/1*9W1MrRkHi0YFmBoHi9Y2Ow.png" width="50%">
# + [markdown] id="DBQJE9nAVgHG"
# ### Linear Model
# + id="ZtZQbJQ5v2cM"
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
features = train.columns.drop([target] + high_cardinality)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
StandardScaler(),
SelectKBest(f_regression),
Ridge()
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': range(1, len(X_train.columns)+1),
'ridge__alpha': [0.1, 1, 10],
}
# If you're on Colab, decrease n_iter & cv parameters
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=100,
cv=5,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
# + id="P9M-OOJltM_I"
print('Best hyperparameters', search.best_params_)
print('Cross-validation MAE', -search.best_score_)
# + id="qFrOr60-VgHL"
# If we used GridSearchCV instead of RandomizedSearchCV,
# how many candidates would there be?
# 2 imputation strategies * n columns * 3 Ridge alphas
2 * len(X_train.columns) * 3
# + [markdown] id="Q79ipvpgqYwF"
# ### "Fitting X folds for each of Y candidates, totalling Z fits" ?
#
# What did that mean? What do you think?
#
#
# + [markdown] id="3kpQQKWMVgHO"
# ### Random Forest
# + [markdown] id="bdO_ySRpVgHP"
# #### [Scikit-Learn User Guide, 3.2 Tuning the hyper-parameters of an estimator](https://scikit-learn.org/stable/modules/grid_search.html)
#
# > [`RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) implements a randomized search over parameters, where each setting is sampled from a distribution over possible parameter values.
# >
# > For each parameter, either a distribution over possible values or a list of discrete choices (which will be sampled uniformly) can be specified.
# >
# > This example uses the `scipy.stats` module, which contains many useful distributions for sampling parameters.
# + id="hfkelzeoVgHP"
from scipy.stats import randint, uniform
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.TargetEncoder(),
SimpleImputer(),
RandomForestRegressor(random_state=42)
)
param_distributions = {
'targetencoder__min_samples_leaf': randint(1, 1000),
'simpleimputer__strategy': ['mean', 'median'],
'randomforestregressor__n_estimators': randint(50, 500),
'randomforestregressor__max_depth': [5, 10, 15, 20, None],
'randomforestregressor__max_features': uniform(0, 1),
}
# If you're on Colab, decrease n_iter & cv parameters
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=3,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
# + id="tKaqopbzVgHR"
print('Best hyperparameters', search.best_params_)
print('Cross-validation MAE', -search.best_score_)
# + [markdown] id="2tJr3YZ8xLt-"
# ### See detailed results
# + id="IGHRUlY3xF1O"
pd.DataFrame(search.cv_results_).sort_values(by='rank_test_score').T
# + [markdown] id="GDZyu6FNyY2l"
# ### Make predictions for test set
# + id="dlc-_tThVgHW"
pipeline = search.best_estimator_
# + id="OuWqQUk_yIw4"
from sklearn.metrics import mean_absolute_error
X_test = test[features]
y_test = test[target]
y_pred = pipeline.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test MAE: ${mae:,.0f}')
# + [markdown] id="ANHoHmt5VgHb"
#
# Here's what the [`RandomizdSearchCV` documentation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) says about the `best_estimator_` attribute:
#
# > **best_estimator_** : ***estimator***
# > Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if `refit=False`. ... See `refit` parameter for more information ...
#
# So let's look at the `refit` parameter too:
#
# > **refit** : ***boolean, string, or callable, default=True***
# > Refit an estimator using the best found parameters on the whole dataset.
#
# By default, scikit-learn cross-validation will _"refit an estimator using the best found parameters on the whole dataset",_ which means, use **all** the training data:
#
# <img src="https://scikit-learn.org/stable/_images/grid_search_workflow.png" width="50%">
#
# ***Tip: If you're doing 3-way train/validation/test split, you should do this too!*** After you've optimized your hyperparameters and selected your final model, then manually refit on both the training and validation data.
# + [markdown] id="aXxYyLjoVgHc"
# ## Challenge
#
# For your assignment, use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
# + [markdown] id="jTj0wRy_VgHc"
# # Review
#
# Continue to participate in our Kaggle Challenge, and practice these objectives:
#
# - Do **cross-validation** with independent test set
# - Use scikit-learn for **hyperparameter optimization**
#
# You can refer to these suggestions when you do hyperparameter optimization, now and in future projects:
# + [markdown] id="4sQiv9s2kOjn"
# ### Tree Ensemble hyperparameter suggestions
#
# #### Random Forest
# - class_weight (for imbalanced classes)
# - max_depth (usually high, can try decreasing)
# - n_estimators (too low underfits, too high wastes time)
# - min_samples_leaf (increase if overfitting)
# - max_features (decrease for more diverse trees)
#
# #### XGBoost
# - scale_pos_weight (for imbalanced classes)
# - max_depth (usually low, can try increasing)
# - n_estimators (too low underfits, too high wastes time/overfits) — _I recommend using early stopping instead of cross-validation_
# - learning_rate (too low underfits, too high overfits)
# - See [Notes on Parameter Tuning](https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html) and [DART booster](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html) for more ideas
#
# ### Linear Model hyperparameter suggestions
#
# #### Logistic Regression
# - C
# - class_weight (for imbalanced classes)
# - penalty
#
# #### Ridge / Lasso Regression
# - alpha
#
# #### ElasticNet Regression
# - alpha
# - l1_ratio
#
# For more explanation, see [**<NAME>'s 9 minute video on Ridge Regression**](https://www.youtube.com/watch?v=XK5jkedy17w)!
# + [markdown] id="tMFqqYWxVgHd"
# # Sources
# - <NAME>, [Python Data Science Handbook, Chapter 5.3,](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html) Hyperparameters and Model Validation
# - <NAME>, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)
# - <NAME>, [A Programmer’s Guide to Data Mining, Chapter 5,](http://guidetodatamining.com/chapter5/) first 10 pages, for a great explanation of cross-validation with examples and pictures
# - <NAME>, [Model Evaluation](https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html)
# - [Scikit-Learn User Guide, 3.1 Cross-validation](https://scikit-learn.org/stable/modules/cross_validation.html)
# - [Scikit-Learn User Guide, 3.2 Tuning the hyper-parameters of an estimator](https://scikit-learn.org/stable/modules/grid_search.html)
# - [sklearn.model_selection.cross_val_score](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html)
# - [sklearn.model_selection.RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
# - [xgboost, Notes on Parameter Tuning](https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html)
| module3-cross-validation/LS_DS_223.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import cv2
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import net as neural_net
net = neural_net.Net()
net.load_weights('trained_data.npy')
# +
TEXT = 'Text'#os.path.join('Training Data', 'Text')
NOTEXT = 'No Text'#os.path.join('Training Data', 'No Text')
labels = {TEXT: 0, NOTEXT: 1}
incorrect = 0
for label in labels:
for file in tqdm(os.listdir(os.path.join('Training Data', label))):
try:
path = os.path.join('Training Data', label, file)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (128, 64))
array = torch.Tensor(img).to(neural_net.device).view(-1, 128, 64)/255.0
net_out = net(array.view(-1, 1, 128, 64))[0]
predicted_label = torch.argmax(net_out)
real_label = labels[label]
if not predicted_label == real_label:
incorrect += 1
cv2.imwrite(os.path.join('Incorrect Data', label, file), img)
except Exception as e:
print(e)
# -
incorrect
| .ipynb_checkpoints/improve-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import datetime
import pylab as pl
import pandas as pd
import numpy as np
# -
index = pd.read_json("https://archive.analytics.mybinder.org/index.jsonl",
lines=True)
# +
# get all days since start of June 2020.
now = datetime.datetime.now()
n = (now - datetime.datetime(2020, 7, 1)).days
frames = []
count = 0
for idx, day in index.sort_index(ascending=False).iterrows():
df = pd.read_json("https://archive.analytics.mybinder.org/{}".format(day['name']), lines=True)
frames.append(df)
if len(frames) > n:
break
print(count, n)
count += 1
print(f"Fetched data for {n} days.")
# -
df = pd.concat(frames)
# +
# make it easier to grab the ref
def get_repo(spec):
s = spec.rsplit("/", 1)[0]
if s.endswith('.git'):
s = s[:-4]
return s
df['repo'] = df['spec'].apply(get_repo) #lambda s: s.rsplit("/", 1)[0].replace(".git", ""))
df['org'] = df['spec'].apply(lambda s: s.split("/", 1)[0])
# -
# take a look at the data, does it look sensible?
df.sample(10)
df = df.drop(columns=['schema', 'version', 'spec', 'ref'])
df.sample(10)
np.any(df['repo'] == 'michaelJwilson/DESI-HighSchool')
ds = df[df['repo'] == 'michaelJwilson/DESI-HighSchool']
ds['pd_timestamp'] = pd.to_datetime(ds.timestamp)
ds.sort_values(by='pd_timestamp')
daily = ds.set_index("timestamp").resample('D').count()
daily.head()
daily.tail()
# +
daily['repo'].plot(marker='.', lw=0.)
ax = pl.gca()
ax.set_xlabel('')
# -
np.sum(daily['repo'])
| desihigh/Binder/Stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="c8Cx-rUMVX25"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="I9sUhVL_VZNO"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="6Y8E0lw5eYWm"
# # Post-training integer quantization with int16 activations
# + [markdown] id="CGuqeuPSVNo-"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_integer_quant_16x8"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/performance/post_training_integer_quant_16x8.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="BTC1rDAuei_1"
# ## Overview
#
# [TensorFlow Lite](https://www.tensorflow.org/lite/) now supports
# converting activations to 16-bit integer values and weights to 8-bit integer values during model conversion from TensorFlow to TensorFlow Lite's flat buffer format. We refer to this mode as the "16x8 quantization mode". This mode can improve accuracy of the quantized model significantly, when activations are sensitive to the quantization, while still achieving almost 3-4x reduction in model size. Moreover, this fully quantized model can be consumed by integer-only hardware accelerators.
#
# Some examples of models that benefit from this mode of the post-training quantization include:
# * super-resolution,
# * audio signal processing such
# as noise cancelling and beamforming,
# * image de-noising,
# * HDR reconstruction
# from a single image
#
# In this tutorial, you train an MNIST model from scratch, check its accuracy in TensorFlow, and then convert the model into a Tensorflow Lite flatbuffer using this mode. At the end you check the accuracy of the converted model and compare it to the original float32 model. Note that this example demonstrates the usage of this mode and doesn't show benefits over other available quantization techniques in TensorFlow Lite.
# + [markdown] id="2XsEP17Zelz9"
# ## Build an MNIST model
# + [markdown] id="dDqqUIZjZjac"
# ### Setup
# + id="gyqAw1M9lyab"
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
# + [markdown] id="srTSFKjn1tMp"
# Check that the 16x8 quantization mode is available
# + id="c6nb7OPlXs_3"
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
# + [markdown] id="eQ6Q0qqKZogR"
# ### Train and export the model
# + id="hWSAjQWagIHl"
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
# + [markdown] id="5NMaNZQCkW9X"
# For the example, you trained the model for just a single epoch, so it only trains to ~96% accuracy.
# + [markdown] id="xl8_fzVAZwOh"
# ### Convert to a TensorFlow Lite model
#
# Using the Python [TFLiteConverter](https://www.tensorflow.org/lite/convert/python_api), you can now convert the trained model into a TensorFlow Lite model.
#
# Now, convert the model using `TFliteConverter` into default float32 format:
# + id="_i8B2nDZmAgQ"
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# + [markdown] id="F2o2ZfF0aiCx"
# Write it out to a `.tflite` file:
# + id="vptWZq2xnclo"
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
# + id="Ie9pQaQrn5ue"
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
# + [markdown] id="7BONhYtYocQY"
# To instead quantize the model to 16x8 quantization mode, first set the `optimizations` flag to use default optimizations. Then specify that 16x8 quantization mode is the required supported operation in the target specification:
# + id="HEZ6ET1AHAS3"
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
# + [markdown] id="zLxQwZq9CpN7"
# As in the case of int8 post-training quantization, it is possible to produce a fully integer quantized model by setting converter options `inference_input(output)_type` to tf.int16.
# + [markdown] id="yZekFJC5-fOG"
# Set the calibration data:
# + id="Y3a6XFqvHbYM"
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter.representative_dataset = representative_data_gen
# + [markdown] id="xW84iMYjHd9t"
# Finally, convert the model as usual. Note, by default the converted model will still use float input and outputs for invocation convenience.
# + id="yuNfl3CoHNK3"
tflite_16x8_model = converter.convert()
tflite_model_16x8_file = tflite_models_dir/"mnist_model_quant_16x8.tflite"
tflite_model_16x8_file.write_bytes(tflite_16x8_model)
# + [markdown] id="PhMmUTl4sbkz"
# Note how the resulting file is approximately `1/3` the size.
# + id="JExfcfLDscu4"
# !ls -lh {tflite_models_dir}
# + [markdown] id="L8lQHMp_asCq"
# ## Run the TensorFlow Lite models
# + [markdown] id="-5l6-ciItvX6"
# Run the TensorFlow Lite model using the Python TensorFlow Lite Interpreter.
# + [markdown] id="Ap_jE7QRvhPf"
# ### Load the model into the interpreters
# + id="Jn16Rc23zTss"
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
# + id="J8Pztk1mvNVL"
interpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))
interpreter_16x8.allocate_tensors()
# + [markdown] id="2opUt_JTdyEu"
# ### Test the models on one image
# + id="AKslvo2kwWac"
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
# + id="XZClM2vo3_bm"
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# + id="3gwhv4lKbYZ4"
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter_16x8.get_input_details()[0]["index"]
output_index = interpreter_16x8.get_output_details()[0]["index"]
interpreter_16x8.set_tensor(input_index, test_image)
interpreter_16x8.invoke()
predictions = interpreter_16x8.get_tensor(output_index)
# + id="CIH7G_MwbY2x"
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# + [markdown] id="LwN7uIdCd8Gw"
# ### Evaluate the models
# + id="05aeAuWjvjPx"
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
# + id="T5mWkSbMcU5z"
print(evaluate_model(interpreter))
# + [markdown] id="Km3cY9ry8ZlG"
# Repeat the evaluation on the 16x8 quantized model:
# + id="-9cnwiPp6EGm"
# NOTE: This quantization mode is an experimental post-training mode,
# it does not have any optimized kernels implementations or
# specialized machine learning hardware accelerators. Therefore,
# it could be slower than the float interpreter.
print(evaluate_model(interpreter_16x8))
# + [markdown] id="L7lfxkor8pgv"
# In this example, you have quantized a model to 16x8 with no difference in the accuracy, but with the 3x reduced size.
#
| tensorflow/lite/g3doc/performance/post_training_integer_quant_16x8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="pOrSBEmKBmxF"
# # SIT742: Modern Data Science
# **(Week 01: Programming Python)**
#
# ---
# - Materials in this module include resources collected from various open-source online repositories.
# - You are free to use, change and distribute this package.
#
# Prepared by **SIT742 Teaching Team**
#
#
# ---
#
#
# # Session 1B - Control Flow, File usage, and Advanced data
# + [markdown] colab_type="text" id="mBYW9FlMBmxG"
# ## Introduction
#
# Normally,
# *Python* executes a series of statement in exact top-down order.
# What if you want to change the flow how it works?
# As you might have guessed, in this prac, we will first look at **control flow**
# statements. We are going to practice on three control flow statements, i.e.,
# **if**, **for** and **while**, to see how they can
# determine what statement is to be executed next in a program.
#
# Second, we will look at how to create, read, and write files in *Python*.
# We have obtained data via interaction with users in previous prac. Now let us
# explore how to deal with files to get input of a program and write
# output that can be used later.
#
#
#
# Finally, we will learn about advanced data types in addition to the **strings**
# and **number** learnt before. We will examine **lists**, **tuples**
# and **dictionaries** used for a collection of related data.
#
# + [markdown] colab_type="text" id="BTjIVzM4BmxH"
# <a id = "cell_tuple"></a>
# + [markdown] colab_type="text" id="8rxG_XNwBmxI"
# ## Table of Content
#
# ### Part 1 Control Flow
#
# 1.1 [**If** statements](#cell_if)
#
# 1.2 [**For** statements](#cell_for)
#
# 1.3 [**While** statements](#cell_while)
#
# 1.4 [**Break** statements](#cell_break)
#
# 1.5 [Notes on Python 2](#cell_note)
#
# ### Part2 Using files
#
# 2.1 [Reading files](#cell_read)
#
# 2.2 [Writing files](#cell_write)
#
# ### Part3 Advanced data
#
# 3.1 [List](#cell_list)
#
# 3.2 [Tuple](#cell_tuple)
#
# 3.3 [Dictionary](#cell_dict)
#
#
# + [markdown] colab_type="text" id="48mvMOLyBmxJ"
# ## Part 1 Control flow
# + [markdown] colab_type="text" id="APgJv4VdBmxK"
# <a id = "cell_if"></a>
# + [markdown] colab_type="text" id="Z8I-fSFrBmxL"
# ### 1.1 **If** statements
#
# The **if** statement is used to check a condition: **if** the condition is true, we run a block of statements(**if-block**), **else** we process another block of statement(**else-block**). The **else** clause is optional. The condition is usually a boolean expression, which can have value of **True** or **False**.
#
# Often we have a block of statement inside either **if-block** or **else-block**. In this case, you need to especially pay attention to the indentation of the statements in the block. Indenting starts a block and unindenting ends it. As a general practice, Python style guide recommend using 4 white spaces for indenting a block, and not using tabs. If you get indentation error message from Python interpretor, you will need to check your code carefully.
#
# + colab={} colab_type="code" id="pSM5441WBmxM"
x = 15
if x % 2 == 0:
print('%d is even' % x)
else:
print('%d is odd' % x)
print('This is always printed')
# + [markdown] colab_type="text" id="AgajrFJ_BmxP"
#
# Try change **x** to even number and run the program again.
#
# The else-block in **if** statement is optional. If the **else** block is omitted, the statements in **if**-block are executed when the condition equal to **True**. Otherwise, the flow of execution continues to the statement after the **if** structure.
#
# Try the following code:
# + colab={} colab_type="code" id="IgyyRoiLBmxQ"
x = -2
if x < 0:
print("The negative number %d is not valid here." % x)
print("This is always printed")
# + [markdown] colab_type="text" id="xTIXGLKlBmxT"
# What will be printed if the value of **x** is negative?
#
#
# **If** statement can also be nested within another. Further **if** structure can either be nested in **if-block** or **else-block**. Here is an example with another **if** structure nested in **else-block**.
#
# This example assume we have two integer variables, **x** and **y**. The code shows how we might decide how they are related to each other.
# + colab={} colab_type="code" id="1QTkRQKNBmxU"
x = 10
y = 10
if x < y:
print("x is less than y")
else:
if x > y:
print("x is greater than y")
else:
print("x and y must be equal")
# + [markdown] colab_type="text" id="qIIckwg6Bmxa"
# Here we can see that the indentation pattern can tell the Python interpretor exactly which **else** belong to which **if**.
#
# Python also provides an alternative way to write nested **if** statement. We need to use keyword **elif**. The above example is equivalent to :
# + colab={} colab_type="code" id="F8lwSvUmBmxb"
x = 10
y = 10
if (x < y):
print("x is less than y")
elif (x > y):
print("x is greater than y")
else:
print("x and y must be equal")
# + [markdown] colab_type="text" id="WmO3pOeVBmxe"
# **elif** is an abbreviation of **else if**. With above structure, each condition is checked in order. If one of them is **True**, the corresponding branch executes. Even if more than one condition is **True**, only the first **True** branch executes.
#
# There is no limit of the number of **elif** statements. but only a single final **else** statement is allowed. The **else** statement must be the last branch in the statement.
# + [markdown] colab_type="text" id="bHPF8viiBmxf"
# <a id = "cell_for"></a>
# + [markdown] colab_type="text" id="J5V62gWWBmxg"
# ### 1.2 **For** statements
# Computers are often used to automate repetitive tasks. Repeated execution of a sequence of statements is called iteration. Two language features provided by Python are **while** and **for** statement. We first take a look an example of **for** statement:
# + colab={} colab_type="code" id="JF9gk0PpBmxh"
for name in ["Joe", "Amy", "Brad", "Angelina", "Zuki"]:
print("Hi %s Please come to my party on Saturday!" % name)
# + [markdown] colab_type="text" id="37f33WKrBmxn"
# This example assume we have some friends, and we would like to send them an invitation to our party. With all the name in the list, we can print a message for each friend.
#
# This is how the **for** statement works:
#
# 1. **name** in the **for** statement is called loop variables, and the names in the square brackets is called **list** in Python. We will cover more details on list in next prac. For now, you just need to know how to use simple list in a **for** loop.
#
# 2. The second line in the program is the **loop body**. All the statements in the loop body is indented.
#
# 3. On each iteration of the loop, the loop variable is updated to refer to the next item in the list. In the above case, the loop body is executed 7 times, and each time name will refer to a different fiend.
#
# 4. At the end of each execution of the body of the loop, Python returns to the **for** statement to handle the next items. This continues until there are no item left. Then program execution continues at the next statement after the loop body.
# + [markdown] colab_type="text" id="9u5fQrRnBmxo"
# One function commonly used in loop statement is **range()**.
#
# Let us first have a look at the following example:
#
# + colab={} colab_type="code" id="0s5lRD76Bmxp"
for i in [0, 1, 2, 3, 4 ]:
print( 'The count is %d' % i)
# + [markdown] colab_type="text" id="7QhivfwyBmxr"
# Actually generating lists with a specific number of integers is a very common task, especially in **for** loop. For this purpose, Python provides a built-in **range()** function to generate a sequence of values. An alternative way of performing above counting using **range()** is as follows.
# + colab={} colab_type="code" id="N72li4DkBmxs"
for i in range(5):
print( 'The count is %d' % i)
print('Good bye!')
# + [markdown] colab_type="text" id="2dJ4XtVbBmxv"
# Notice **range(5)** generate a list of $5$ values starting with 0 instead 1. In addition, 5 is not included in the list.
#
# + [markdown] colab_type="text" id="GBp5UBhTBmxw"
# Here is a note on **range()** function: a strange thing happens if you just print a range:
# + colab={} colab_type="code" id="Abz4U4-jBmxx"
range(5)
# + colab={} colab_type="code" id="BbcPEh3zBmx1"
print(range(5))
# + [markdown] colab_type="text" id="SW2y7aHIBmx4"
# In many ways the object returned by **range()** behaves as if it is a list, but in fact it isn’t. It is an object which returns the successive items of the desired sequence when you iterate over it, but it doesn’t really make the list, thus saving space. We say such an object is *iterable*. In the following example, **rangeA** is **iterable**.
#
# There are functions and constructs that expect something from these objects to obtain successive items until the supply is exhausted. The **list()** function can be used to creates lists from iterables:
# + colab={} colab_type="code" id="oQ5qgASnBmx5"
rangeA = range(5)
list(rangeA)
# + [markdown] colab_type="text" id="W7rrW0PEBmx8"
# In this way, we can print the list generated by **range(5)** to check the values closely.
# + [markdown] colab_type="text" id="75BArncGBmx9"
# To count from 1 to 5, we need the following:
# + colab={} colab_type="code" id="-c8s4yqFBmx9"
list(range(1, 6))
# + [markdown] colab_type="text" id="ina-6XSZBmyA"
#
# We can also add another parameter, **step**, in **range()** function. For example, a step of **2** can be used to produce a list of even numbers.
#
# Look at the following example. Think about what will be the output before you run the code to check your understanding.
#
#
# + colab={} colab_type="code" id="15sR2tbEBmyB"
list(range(1, 6))
# + colab={} colab_type="code" id="I2QNYwqLBmyE"
list(range(1, 6))
# + colab={} colab_type="code" id="5yGaHZgWBmyG"
list(range(0, 19, 2))
# + colab={} colab_type="code" id="4GAI_l42BmyJ"
list(range(0, 20, 2))
# + colab={} colab_type="code" id="NvtLk4FsBmyK"
list(range(10, 0, -1))
# + [markdown] colab_type="text" id="x4kncHIyBmyM"
# Let us return to the previous counting example, when **range()** generate the sequence of numbers, each number is assign to the loop variable **i** in each iteration. Then the block of statements is executed for each value of **i**. In the above example, we just print the value in the block of statements.
# + [markdown] colab_type="text" id="LBu_5KrtBmyN"
# <a id = "cell_while"></a>
# + [markdown] colab_type="text" id="ySAgg2-5BmyO"
# ### 1.3 **While** statements
#
# The **while** statement provide a much more general mechanism for iteration. It allows you to repeatedly execute a block of statements as long as a condition is **True**.
#
# Similar to the **if** statement, the **while** statement uses a boolean expression to control the flow of execution. The body of **while** will be repeated as long as the condition of boolean expression equal to **True**.
#
# Let us see how the previous counting program can be implemented by **while** statement.
# + colab={} colab_type="code" id="bIOEmdUiBmyO"
i = 0
while (i < 6):
print('The count is %d' % i)
i = i + 1
print('Good bye!')
# + [markdown] colab_type="text" id="5LAzdVFzBmyQ"
# How the while statement works:
# 1. The **while** block consists of the print and increment statements. They are executed repeatedly until count is no longer less than $6$. With each iteration, the current value of the index count is displayed and then increased by $1$.
#
# 2. Same as **for** loop, this type of flow is also called a loop since **while** statements is executed repeatedly. Notice that if the condition is **False** at the first time through the loop, the statement inside the loop are never executed. Try change the first line into **i = 6**. What is the output?
#
# 2. In this example, we can prove that the loop terminates because **i** start from $0$ and increase by $1$. Eventually, **i** will be great than 5. When the condition becomes **False**, the loop stops.
#
# 3. Sometime, we will have loop that repeats forever. This is called an infinite loop. Although this kind of loop might be useful sometimes, it is often caused by a programming mistake. Try to change the first two lines of previous program into the following code. See what happen?
#
# Note that if you run the following cell, the code will run indefinitely. You will need to go to the menu: **Kernel->Restart**, and then restart the kernel.
#
# Also note that if you run such code in Python at command line or script mode, you will need to use **CTRL-C** to terminate the program.
# + colab={} colab_type="code" id="rBPwIJ1TBmyR"
i = 6
while i > 5 :
print('The count is %d' % i)
i = i + 1
print('Good bye!')
# + [markdown] colab_type="text" id="Sw9P5rAaBmyU"
# <a id = "cell_break"></a>
# + [markdown] colab_type="text" id="zSrk7ffZBmyV"
# ### 1.4 **Break** statements
#
# The **break** statement can be used to break out of a loop statement. It can be used both in **while** loop and **for** loop.
#
# Alternative way of previous counting example with **break** statement is as follows:
# + colab={} colab_type="code" id="kCvV7yovBmyW"
i = 0
while True :
print( 'The count is %d' % i)
i = i + 1
if i > 6:
break
print('Good bye!')
# + [markdown] colab_type="text" id="0F_hkIdqBmya"
# In this program, we repeated print value of **i** and increase it by 1 each time. We provide a special condition to stop the program by checking if **i** is greater than 6. We then break out of the loop and continue executed the statement after the loop.
#
# Note that it is important to decide what the terminate condition should be. We can see from previous counting example that the terminate condition might be different in different loop structure,.
# + [markdown] colab_type="text" id="u39CNJdhBmyb"
# <a id = "cell_note"></a>
# + [markdown] colab_type="text" id="Ry-Vx12jBmyb"
# ### 1.5 Notes on **Python 2**
#
# Use of **range()** function:
#
#
# 1. One thing to be noted is: the use of **range()** in for-loop looks the same in both \emph{Python} 2 and 3.
#
# 2. While function **range()** creates an *iterable*, which can be used to produce sequence dynamically in *Python 3*;
# **range()** in *Python 2* creates a list. The use of iterable object in \emph{Python} 3 is more efficient in memory wise. This is especially useful when you need a gigantic range.
#
# 3. As you have noticed, statement **print(range(5))** in *Python 3* will not produce a list. However, this statement is valid in *Python 2*. For example, the following can be used to check values of a range in \emph{Python} 2.
# + [markdown] colab_type="raw" id="XoVHQtD5Bmyc"
# >>>print(range(5))
# [0, 1, 2, 3, 4, 5]
# + [markdown] colab_type="text" id="SGjWRgfOBmyd"
# Please also note that **range()** can be safely used in both versions in a **for**-loop.
# + [markdown] colab_type="text" id="7mpkvTSRBmyd"
# ## Part 2 Using files
#
# ### 2.1 Reading files
#
#
# You can open and use files for reading or writing by creating an object of the *file class*. The *mode* that is specified for the file opening decides what you can do with the file: read, write or both. Then the file object's **read()** or **write()** method can be used to read from or write to the file. Finally, when you are finished with the file, you call the **close()** method to tell Python that you are done using the file.
#
# Here is an example. You can download the data file **score.txt**, which includes data on students' score. The format of the data file is as follows:
# + [markdown] colab_type="raw" id="tw4DG880Bmye"
# Name, Student ID, Score
#
# David 3402 80
# Jane 3403 76
# Sophia 3405 65
# Jane 3447 92
# William 3456 75
# + [markdown] colab_type="text" id="zX0btCtSBmye"
# For Online platforms such as IBM Cloud, it is important for you to get familiar with the provided data storage or cloud data storage function. Alternatively, you might want to directly access the file, and load into your Notebook.
# + colab={} colab_type="code" id="IFG5yfANBmyf"
# !pip install wget
# + [markdown] colab_type="text" id="U5ixu3VUBmyh"
# Then you can download the file into GPFS file system.
# + colab={} colab_type="code" id="mZz9001yBmyh"
import wget
link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/score.txt'
DataSet = wget.download(link_to_data)
print(DataSet)
# + [markdown] colab_type="text" id="D5NLUAxoBmyj"
# The following example read from the **.txt** file and display information on the screen. Please type the code and run it under script mode. Make sure **score.txt** are saved under your **data** folder.
# + colab={} colab_type="code" id="fGWLZazQBmyj"
# scorefile = open('https://raw.githubusercontent.com/tulip-lab/sit742/master/Jupyter/data/score.txt','r')
scorefile = open('score.txt','r')
for line in scorefile:
value = line.split()
name = value[0]
id = value[1]
score = value[2]
print('%s with ID %s has a score of %s' % (name, id, score))
# + [markdown] colab_type="text" id="TM_kpY_3Bmyl"
# How the program works:
# 1. The **open()** function is used to open a file. You need to specify the name of the file and the mode in which you want to open the file. The mode can be read mode('r'), write mode('w') or append mode('a'). There are actually many more modes available. You can get more details by create a cell and typing ""open?"". Please try this in your notebook. When we finish working on the file, we need to close the file using **close()** method.
#
#
# 2. To process all of the data, we use a **for** loop to iterate over the lines of the file. The **line** variable is a string that is used to store characters in each line.
#
#
# 3. We use the **split()** method to break each line into a list containing all the fields of interest. We can then take the value corresponding to **name**, **id** and **score** and print them in the sentence. To get each data item in a list, we use index with the list. e.g. values[0] will return the item of position 0 in the list. Note that in Python the position of items in a list is starting from $0$.
# + [markdown] colab_type="text" id="WrvvbEHYBmyl"
# ### 2.2. Writing to files
#
# One of the most commonly performed data processing tasks is to read data from a file, manipulate it in some way and then write the resulting data out to a new data file to be used for other purpose later. For creating a new file used for writing, the same **open()** function is used. Instead using 'r' mode, 'w' mode is used as the parameter. When we open a file for writing, a new, empty file with the specified name is created and ready to accept our data.
#
# As an example, consider the **score.txt** data again. Assume we have request to remove the name information in the file for privacy issue. Therefore, the output file need to have student ID with the scores separated by a comma. Here is how we can generate the required file.
# + colab={} colab_type="code" id="WIgxvy2sBmym"
infile = open('score.txt', 'r')
outfile = open('id.txt', 'w')
for line in infile:
values = line.split()
id = values[1]
score = values[2]
dataline = id + ',' + score
outfile.write(dataline + '\n')
infile.close()
outfile.close()
# + [markdown] colab_type="text" id="dvGy8vFIzRxR"
# You can use the print() to print file content on the console.
# + colab={} colab_type="code" id="MINqoph9XTB1"
f=open('id.txt', 'r')
message=f.read()
print(message)
f.close
# + [markdown] colab_type="text" id="PmDqWJ_EBmyo"
#
# How the program works:
#
# 1. We have add another **open()** method with 'w' mode. The filename **id.txt** is chosen to store the data. If the file does not exist, it will be created. However, if the file does exist, it will be reinitialized and empty, and any previous contents will be lost
# 2. We have variable **dataline** to store what need to be write in the file. If you like, you can add a line **print(dataline)** to check the string value. We then call function **write()** method to write **dataline** into the file.
# 3. There is one additional part we need to add when writing to file. The newline character **\n** need to be concatenated to the end of the line. Otherwise, the text will be all in one continuous line.
#
# 4. The file needs to be closed at the end.
#
# + [markdown] colab_type="text" id="4Xu7rvCrBmyp"
# <a id = "cell_list"></a>
# + [markdown] colab_type="text" id="ZXxkKq8eBmyq"
# ## Part 3 Advanced Data
#
# ### 3.1 List
#
# **List is a sequence**
#
# Like a string, a *list** is a sequence of values. The values in a list can be any type. We call the values *items** of the list. To create a list, enclose the items in square brackets.
#
# For example,
#
# + colab={} colab_type="code" id="E7e8UuwGBmyq"
shoplist = ['apple', 'mango', 'carrot', 'banana']
l = [10, 20, 30, 40]
empty = [ ] # Initialize an empty list
# + colab={} colab_type="code" id="aY2iD5kNBmys"
shoplist
# + colab={} colab_type="code" id="KE779JH2Bmyu"
l
# + colab={} colab_type="code" id="_IpjFmd4Bmyx"
empty
# + colab={} colab_type="code" id="XBrjhj1iBmyz"
shoplist
# + [markdown] colab_type="text" id="j7ruHjt3Bmy0"
# The elements of a list do not have to be the same type. An another list can also be nested inside a list.
#
# To access the element of a list, use the bracket operator to obtain the value available at the index. Note that the indices of list start at $0$. You can also use negative value as index, if you counts from right. For example, the negative index of last item is $-1$. Try the following examples:
# + colab={} colab_type="code" id="W9akILf7Bmy1"
l = [10, 20, 30, 40]
# + colab={} colab_type="code" id="Z5zPJWxpBmy3"
l[2]
# + colab={} colab_type="code" id="bNH-O27pBmy4"
l[-1]
# + colab={} colab_type="code" id="NtKexic5Bmy7"
l[-3]
# + [markdown] colab_type="text" id="Wpr1v6zrBmy9"
# Here is an example of nested list:
# + colab={} colab_type="code" id="r0xGwqeJBmy-"
l = ['apple', 2.0, 5, [10, 20]]
# + colab={} colab_type="code" id="SP-ntHwtBmy_"
l[1]
# + colab={} colab_type="code" id="WQsbJkhABmzC"
l[3]
# + colab={} colab_type="code" id="qDryScoCBmzD"
l[3][1]
# + [markdown] colab_type="text" id="z-zzr7ICBmzF"
# Unlike strings, lists are mutable, which means they can be altered. We use bracket on the left side of an assignment to assign a value to a list item.
# + colab={} colab_type="code" id="rX_ghZH_BmzF"
l = [10, 20, 30, 40]
l[1] = 200
# + colab={} colab_type="code" id="WaAO_Ir2BmzH"
l
# + [markdown] colab_type="text" id="bu8DphbXBmzI"
# **List operation**
#
# **In** is used to perform membership operation.
# The result of expression equals to **True** if a value exists in the list,
# and equals to **False** otherwise.
#
# + colab={} colab_type="code" id="XbCUkFPFBmzJ"
shoplist = ['apple', 'mango', 'carrot', 'banana']
# + colab={} colab_type="code" id="FLXYQQo9BmzK"
'apple' in shoplist
# + colab={} colab_type="code" id="IjdYHK1bBmzN"
'rice' in shoplist
# + [markdown] colab_type="text" id="UNLrFdonBmzO"
# Similarly, **In** operator also applies to **string** type. Here are some examples:
#
# + colab={} colab_type="code" id="GsfGjlPyBmzP"
'a' in 'banana'
# + colab={} colab_type="code" id="GBe4RSgOBmzQ"
'seed' in 'banana' # Test if 'seed' is a substring of 'banana'
# + [markdown] colab_type="text" id="HhQjXO2lBmzR"
# '**+**' is used for concatenation operation, which repeats a list a given number of times.
# + colab={} colab_type="code" id="X6RqwGxQBmzT"
[10, 20, 30, 40 ] + [50, 60]
# + [markdown] colab_type="text" id="F6tP9GVhBmzU"
# [50, 60]*3
#
#
# + [markdown] colab_type="text" id="Nw0vR9K_BmzV"
# ** List slices **
# Slicing operation allows to retrieve a slice of of the list. i.e. a part of the sequence. The sliding operation uses square brackets to enclose an optional pair of numbers separated by a colon. Again, to count the position of items from left(first item), start from $0$. If you count the position from right(last item), start from $-1$.
# + colab={} colab_type="code" id="GqB8ElRVBmzV"
l = [1, 2, 3, 4]
# + colab={} colab_type="code" id="qnnI1lXSBmzW"
l[1:3] # From position 1 to position 3 (excluded)
# + colab={} colab_type="code" id="T1T4ljvqBmzY"
l[:2] # From the beginning to position 2 (excluded)
# + colab={} colab_type="code" id="039r1EDMBmza"
l[-2:] # From the second right to the beginning
# + [markdown] colab_type="text" id="x9vjy8R6Bmzb"
# If you omit both the first and the second indices, the slice is a copy of the whole list.
# + colab={} colab_type="code" id="e6U4CrvhBmzc"
l[:]
# + [markdown] colab_type="text" id="0wMQJlYIBmzf"
# Since lists are mutable, above expression is often useful to make a copy before modifying original list.
# + colab={} colab_type="code" id="BA5iYESzBmzh"
l = [1, 2, 3, 4]
l_org = l[:]
l[0] = 8
# + colab={} colab_type="code" id="qTMISAbJBmzn"
l
# + colab={} colab_type="code" id="Smf9tzSSBmzp"
l_org # the original list is unchanged
# + [markdown] colab_type="text" id="2h-9tDg3Bmzq"
# **List methods**
#
# The methods most often applied to a list include:
# - append()
# - len()
# - sort()
# - split()
# - join()
# + [markdown] colab_type="text" id="qxDlX5huBmzr"
# **append()** method adds a new element to the end of a list.
# + colab={} colab_type="code" id="UUs8PmOiBmzs"
l= [1, 2, 3, 4]
# + colab={} colab_type="code" id="sr7rr13xBmzt"
l
# + colab={} colab_type="code" id="3BhyF427Bmzy"
l.append(5)
l.append([6, 7]) #list [6, 7] is nested in list l
# + colab={} colab_type="code" id="kHh85_Y6Bmzz"
l
# + [markdown] colab_type="text" id="XiLgDfWwBmz1"
# **len()** method returns the number of items of a list.
# + colab={} colab_type="code" id="5wRXT4WcBmz1"
l = [1, 2, 3, 4, 5]
len(l)
# + colab={} colab_type="code" id="LhWFPL_zBmz2"
# A list nested in another list is counted as a single item
l = [1, 2, 3, 4, 5, [6, 7]]
len(l)
# + [markdown] colab_type="text" id="xvuQnOPRBmz3"
# **sort()** arranges the elements of the list from low to high.
# + colab={} colab_type="code" id="v2Me8qqfBmz4"
shoplist = ['apple', 'mango', 'carrot', 'banana']
shoplist.sort()
# + colab={} colab_type="code" id="wNW-D9ifBmz6"
shoplist
# + [markdown] colab_type="text" id="YIPqgrbDBmz8"
#
# It is worth noted that **sort()** method modifies the list in place, and does not return any value. Please try the following:
# + colab={} colab_type="code" id="JauCHCv_Bmz8"
shoplist = ['apple', 'mango', 'carrot', 'banana']
shoplist_sorted = shoplist.sort()
# + colab={} colab_type="code" id="k1eeAAaxBmz_"
shoplist_sorted # No value is returned
# + [markdown] colab_type="text" id="IxkXo9AZBm0A"
#
# There is an alternative way of sorting a list. The build-in function **sorted()** returns a sorted list, and keeps the original one unchanged.
# + colab={} colab_type="code" id="SiS31NXcBm0A"
shoplist = ['apple', 'mango', 'carrot', 'banana']
shoplist_sorted = sorted(shoplist) #sorted() function return a new list
# + colab={} colab_type="code" id="K_eMR23aBm0F"
shoplist_sorted
# + colab={} colab_type="code" id="5c1BtFjFBm0G"
shoplist
# + [markdown] colab_type="text" id="gmabEBPqBm0H"
# There are two frequently-used string methods that convert between lists and strings:
#
# First, **split()** methods is used to break a string into words:
# + colab={} colab_type="code" id="NToLD-lOBm0I"
s = 'I love apples'
s.split(' ')
# + colab={} colab_type="code" id="1uH4tlgdBm0J"
s = 'spam-spam-spam'
# A delimiter '-' is specified here. It is used as word boundary
s.split('-')
# + [markdown] colab_type="text" id="XUuTMK7tBm0K"
# Second, **joint()** is the inverse of **split**. It takes a list of strings and concatenates the elements.
# + colab={} colab_type="code" id="HKZrUhUzBm0K"
l = ['I', 'love', 'apples']
s = ' '.join(l)
s
# + [markdown] colab_type="text" id="qFbGRFyoBm0M"
# How it works:
#
# Since **join** is a string method, you have to invoke it on the *delimiter*. In this case, the delimiter is a space character. So **' '.join()** puts a space between words. The list **l** is passed to **join()** as parameter.
#
# For more information on list methods, type "help(list)" in your notebook.
# + [markdown] colab_type="text" id="dOaAysQyBm0M"
# **Traverse a list**
#
# The most common way to traverse the items of a list is with a **for** loop. Try the following code:
# + colab={} colab_type="code" id="2pUz_BuABm0M"
shoplist = ['apple', 'mango', 'carrot', 'banana']
for item in shoplist:
print(item)
# + [markdown] colab_type="text" id="BdoviXGwBm0N"
# This works well if you only need to read the items of the list. However, you will need to use indices if you want to update the elements. In this case, you need to combine the function **range()** and **len()**.
# + colab={} colab_type="code" id="Hw-LhzXhBm0P"
l = [2, 3, 5, 7]
for i in range(len(l)):
l[i] = l[i] * 2
print(l)
# + [markdown] colab_type="text" id="6i0u44sWBm0Q"
# How it works:
#
# **len()** returns the number of items in the list, while **range(n)** returns a list from 0 to n - 1. By combining function **len()** and **range()**, **i** gets the index of the next element in each pass through the loop. The assignment statement then uses **i** to perform the operation.
# + [markdown] colab_type="text" id="PQIrW0NjBm0Q"
# <a id = "cell_tuple"></a>
# + [markdown] colab_type="text" id="kBsd3dhuBm0Q"
# ### 3.2 Tuple
#
# **Tuple are immutable **
#
# A **tuple** is also a sequence of values, and can be any type. Tuples and lists are very similar. The important difference is that tuples are immutable, which means they can not be changed.
#
# Tuples is typically used to group and organizing data into a single compound value. For example,
# + colab={} colab_type="code" id="wyr771UWBm0R"
year_born = ('<NAME>', 1995)
year_born
# + [markdown] colab_type="text" id="jS2uoyGvBm0S"
# To define a tuple, we use a list of values separated by comma.
# Although it is not necessary, it is common to enclose tuples in parentheses.
#
# Most list operators also work on tuples.
# The bracket operator indexes an item of tuples, and the slice operator works in similar way.
#
# Here is how to define a tuple:
# + colab={} colab_type="code" id="sSoZ9rIGBm0S"
t = ( ) # Empty tuple
t
# + colab={} colab_type="code" id="YFukci9YBm0T"
t = (1)
type(t) # Its type is int, since no comma is following
# + colab={} colab_type="code" id="DCYLnFmxBm0U"
t = (1,) # One item tuple; the item needs to be followed by a comma
type(t)
# + [markdown] colab_type="text" id="JL76QtuYBm0V"
# Here is how to access elements of a tuple:
# + colab={} colab_type="code" id="Tz1NCxjyBm0W"
t = ('a', 'b', 'c', 'd')
# + colab={} colab_type="code" id="2iTzh6wwBm0Y"
t[0]
# + colab={} colab_type="code" id="PlIiJg52Bm0Z"
t[1:3]
# + [markdown] colab_type="text" id="35hOHu21Bm0a"
# But if you try to modify the elements of the tuple, you get an error.
# + colab={} colab_type="code" id="_r_paxZnBm0b"
t = ('a', 'b', 'c', 'd')
t[1] = 'B'
# + [markdown] colab_type="text" id="Posmxo-aBm0c"
# ### Tuple assignment
#
# Tuple assignment allows a tuple of variables on the left of an assignment to be assigned values from a tuple on the right of the assignment.
# (We already saw this type of statements in the previous prac)
#
# For example,
#
# + colab={} colab_type="code" id="ChvTjx6qBm0d"
t = ('David', '0233', 78)
(name, id, score) = t
# + colab={} colab_type="code" id="-WDeVp8MBm0e"
name
# + colab={} colab_type="code" id="L4Ey2gDYBm0g"
id
# + colab={} colab_type="code" id="v6i5qFp7Bm0h"
score
# + [markdown] colab_type="text" id="aYAZYDZ3Bm0i"
# Naturally, the number of variables on the left and the number of values on the right have to be the same.Otherwise, you will have a system error.
# + colab={} colab_type="code" id="CZWKRkeZBm0j"
(a, b, c, d) = (1, 2, 3)
# + [markdown] colab_type="text" id="svAgKRIMBm0l"
# ### Lists and tuples
#
# It is common to have a list of tuples. For loop can be used to traverse the data. For example,
# + colab={} colab_type="code" id="erQ7rITYBm0l"
t = [('David', 90), ('John', 88), ('James', 70)]
for (name, score) in t:
print(name, score)
# + [markdown] colab_type="text" id="0N9Ozh__Bm0m"
# <a id = "cell_dict"></a>
# + [markdown] colab_type="text" id="P7Te1xa1Bm0o"
# ### 3.3 Dictionary
#
# A **dictionary** is like an address-book where you can find the address or contact details of a person by knowing only his/her name. The way of achieving this is to associate **keys**(names) with **values**(details). Note that the key in a dictionary must be unique. Otherwise we are not able to locate correct information through the key.
#
# Also worth noted is that we can only use immutable objects(strings, tuples) for the keys, but we can use either immutable or mutable objects for the values of the dictionary. This means we can use either a string, a tuple or a list for dictionary values.
#
# The following example defines a dictionary:
# + colab={} colab_type="code" id="uclLe92SBm0o"
dict = {'David': 70, 'John': 60, 'Mike': 85}
dict['David']
dict['Anne'] = 92 # add an new item in the dictionary
dict
# + [markdown] colab_type="text" id="oLay_eiFBm0q"
# ** Traverse a dictionary **
#
# The key-value pairs in a dictionary are **not** ordered in any manner. The following example uses **for** loop to traversal a dictionary. Notice that the keys are in no particular order.
# + colab={} colab_type="code" id="QKfMs36lBm0t"
dict = {'David': 70, 'John': 60, 'Amy': 85}
for key in dict:
print(key, dict[key])
# + [markdown] colab_type="text" id="vLKSyg5OBm0u"
# However, we can sort the keys of dictionary before using it if necessary. The following example sorts the keys and stored the result in a list **sortedKey**. The **for** loop then iterates through list **sortedKey**. The items in the dictionary can then be accessed via the names in alphabetical order. Note that dictionary's **keys()** method is used to return a list of all the available keys in the dictionary.
# + colab={} colab_type="code" id="hLZi1CgdBm0u"
dict = {'David': 70, 'John': 60, 'Amy': 85}
sortedKeys = sorted(dict.keys())
for key in sortedKeys:
print(key, dict[key])
| Jupyter/SIT742P01B-ControlAdvData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import os
# -
dirname = os.getcwd()
dirname = os.path.dirname(dirname)
dataset_path = os.path.join(dirname, 'datasets/')
print(dataset_path)
# +
gloveVectors = pd.read_csv(dataset_path+'glove.42B.300d.txt', sep=' ', header=None )
#gloveVectors = pd.read_csv('/media/kandy/hdd/master-thesis/datasets/glove.42B.100d.txt', sep=' ', header=None )
#gloveVectors = pd.read_csv('/media/kandy/hdd/master-thesis/datasets/glove.42B.10d.txt', sep=' ', header=None )
print(gloveVectors.head())
# -
gloveVectors.shape
gloveVectors.index
gloveVectors.columns
sub = gloveVectors.iloc[2:4, 0:11]
print(sub)
sub_token = gloveVectors.iloc[2:4, 0:1]
sub_vectors = gloveVectors.iloc[2:4, 1:11]
print(sub_token)
print(sub_vectors)
tokens = gloveVectors.iloc[:, 0:1]
tokens = tokens.values.tolist()
print(tokens)
tokens = [token[0].lower() for token in tokens]
words = ['Volume', '*ICH*-2', 'is', 'down', 'out', 'of', 'DCan', 'Francisco', 'DC', 'but', 'not', 'out', 'of', 'the', '11', 'outlying', 'offices', 'DC', 'DC', 'MrDC', 'Daggs', 'added', '*T*-1', 'DC']
search_word = 'added'
if(search_word not in tokens):
for token in tokens:
if(token.startswith(search_word)):
print(token)
else:
print('Found!!!')
# ## Trimming down the size and save the toy and medium size datasets
# #### don't execute this unless you need to create them
toy_glove_vectors = gloveVectors.iloc[:, 0:11]
print(toy_glove_vectors.shape)
toy_glove_vectors.to_csv(dataset_path+'glove.42B.10d.txt',header=False, index=False,sep=' ')
medium_glove_vectors = gloveVectors.iloc[:, 0:101]
print(medium_glove_vectors.shape)
medium_glove_vectors.to_csv(dataset_path+'glove.42B.100d.txt',header=False, index=False,sep=' ')
| pre-processing/glove-word-vectors-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import jax
import jax.numpy as jnp
from jax import jit,vmap
import torch
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
means = np.array([-.3,2,3])
sigmas = np.array([1,2.5,.7])
ps = np.array([.2,.5,.3])
def gmm_samples(N):
c = 2
i = np.random.choice(len(means),N,p=ps)
z = np.random.randn(N)
return z*sigmas[i]+means[i]
x = gmm_samples(20000)
x = (x+5)/15
x=x[(x<1)&(x>0)]
sns.distplot(x)
# +
default_bits=12
@jit
def data2bits(val,prec=default_bits):
""" returns the sequence of bits for a number up to 2^(-num_bits) precision (between 0 and 1)"""
labels = []
for i in range(prec):
labels.append((val>.5).astype(np.int32))
val = 2*(val-(val>.5)*.5)
return jnp.array(labels)
@jit
def bits2samples(bits):
val = 0.
for i,b in enumerate(bits):
val += b/2**(i+1)
val += np.random.rand()/2**(len(bits)+1)
return val
# -
# +
# b = vmap(data2bits)(x)
# sns.distplot(x)
# sns.distplot(vmap(bits2samples)(b))
# -
def sample_data(N):
z = gmm_samples(N)
z = (z+5)/15
z=z[(z<1)&(z>0)]
return z
z =sample_data(1000)
dataset = vmap(data2bits)(z)
dataset = torch.from_numpy(np.asarray(dataset))
ds =torch.cat([2*torch.ones(dataset.shape[0],1,device=dataset.device,dtype=dataset.dtype),dataset],dim=1).long()
# +
import torch.nn as nn
import torch.nn.functional as F
class GRUautoregressor(nn.Module):
def __init__(self,classes,hidden_units):
super().__init__()
self.classes=classes
self.in_embedding = nn.Embedding(classes+1,hidden_units)
self.gru = nn.GRU(hidden_units,hidden_units,batch_first=True)
self.out_embedding = nn.Linear(hidden_units,classes+1)
self.k = hidden_units
def forward(self,X,h0=None):
""" [X (bs,N)], [h0 (bs,k)] -> [X_logp (bs,N,classes)] """
if h0 is None: h0 = torch.zeros(X.shape[0],self.k,device=X.device)
out = self.in_embedding(X)
Y,hf = self.gru(out,h0[None])
X_out_logits = self.out_embedding(Y)
X_logp = F.log_softmax(X_out_logits,dim=-1)
return X_logp
def sample(self,bs):
X = [2*torch.ones(bs).long()]
hi = torch.zeros(bs,self.k)
for i in range(default_bits):
inn = self.in_embedding(X[-1])
Y, hi = self.gru(inn.unsqueeze(1),hi.unsqueeze(0))
hi = hi.squeeze(0)
Xprobs = F.softmax(self.out_embedding(Y.squeeze(1))[:,:-1],dim=-1) #(bs,C-1) #exclude stop token
dist = torch.distributions.categorical.Categorical(Xprobs)
X.append(dist.sample())
return torch.stack(X[1:],dim=1)
def density(self):
bs = 2**default_bits
all_seqs = vmap(data2bits)(np.arange(bs)/bs)
all_seqs = torch.from_numpy(np.asarray(all_seqs)).long()
all_seqs0 = torch.cat([2*torch.ones(all_seqs.shape[0],1,device=all_seqs.device,dtype=all_seqs.dtype),all_seqs],dim=1)
logps = self.forward(all_seqs0)[:,:-1,:-1][np.arange(bs)[:,None],np.arange(default_bits)[None],all_seqs] #(bs,N)
ps = logps.sum(1).exp()
return ps*2**default_bits
def loss(model,seq):
target = torch.roll(seq,-1,1)
pred_logp = model(seq) #(bs,N,classes)
return F.nll_loss(pred_logp.reshape(-1,pred_logp.shape[-1]),target.reshape(-1))
def batches(ds,bs):
ids = np.random.permutation(len(ds))
count = 0
while count<len(ds):
yield ds[ids[count:count+bs]]
count+=bs
# +
model = GRUautoregressor(2,64)
optim = torch.optim.AdamW(model.parameters(), lr=3e-3,weight_decay=1e-2)
samples = []
losses = []
for epoch in tqdm(range(100)):
for i, x in enumerate(batches(ds,bs=50)):
optim.zero_grad()
L = loss(model,x)
L.backward()
optim.step()
losses.append(L)
if not epoch%10:
samples.append(vmap(bits2samples)(model.sample(10000).data.numpy()))
# +
#plt.plot(np.arange(2**default_bits)/2**default_bits,2**default_bits*model.density().data.numpy())
# -
# +
prec=5
#plt.hist(z,bins=np.arange(2**prec)/2**prec,label='Binned',density=True,color='purple',alpha=.3)
plt.plot(np.arange(2**default_bits)/2**default_bits,model.density().data.numpy(),label='fit',alpha=.3)
#sns.distplot(z,label='Train Data (1k)')
sns.distplot(z,label='GT (1k)')
sns.distplot(samples[-1],label='Fit')
plt.legend()
# +
prec=5
#plt.hist(z,bins=np.arange(2**prec)/2**prec,label='Binned',density=True,color='purple',alpha=.3)
plt.plot(np.arange(2**default_bits)/2**default_bits,np.cumsum(model.density().data.numpy())/2**default_bits,label='fit',alpha=.3,color='purple')
#sns.distplot(z,label='Train Data (1k)')
sorted_z = np.sort(z)
plt.plot(sorted_z,(np.arange(len(z))+.5)/len(z),label='Empirical CDF (1k dataset)',alpha=.3,color='k')
z2 = sample_data(10000)
sorted_z2 = np.sort(z2)
plt.plot(sorted_z2,(np.arange(len(z2))+.5)/len(z2),label='Empirical CDF (10k dataset)',alpha=.3,color='k')
plt.xlabel('X')
plt.ylabel('CDF')
#sns.distplot(samples[-1],label='Fit')
plt.legend()
# -
p = torch.tensor([[.5,.3],[.1,.8],[.3,.6]])
p.shape
d = torch.distributions.categorical.Categorical(p)
d.sample((10,)).shape
d.sample()
| binary_discretizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="head_image.png" width="300"/>
#
# # Image volume slicer
#
# This page demonstrates how to use `jp_doodle.stream_image_array.VolumeImageViewer` to
# explore a 3d image volume.
#
# First make a synthetic 3d volume using numpy.
# +
import numpy as np
(nx, ny, nz) = (230, 200, 240)
#(nx, ny) = (512,512)
x = np.linspace(-3, 3, nx)
y = np.linspace(-2, 3, ny)
z = np.linspace(-2, 2, nz)
xv, yv, zv = np.meshgrid(x, y, z)
blue = np.array([0,0,255]).reshape([1,1,3])
yellow = np.array([255,255,0]).reshape([1,1,3])
def testArray(a=.1, b=.1, c=-1.2, d=-.2, e=.2):
level = np.sin(a * xv * xv + b * yv * yv + c * xv * yv + d + e * zv * zv)
return np.cos(level * 2) % 0.5
points = [(1,1,1),(-1,1,-1),(1,-1,-1),(-1,1,1),]
def testArray():
A = np.zeros(xv.shape, dtype=np.float)
for (x, y, z) in points:
D = (x - xv) ** 2 + (y - yv) ** 2 + (z - zv) ** 2
A += 10/(1+D)
A = (2 * np.sin(A)) % 1
return A
A = testArray()
#A[10:100, 100:120, 50:100] = 0.0
A.shape
# +
# View the volume
from jp_doodle import stream_image_array
from jp_doodle import dual_canvas
# Higher draw_delay values prevents event flooding on slow kernel/browser connections.
# Lower values make the interface smoother.
draw_delay = 0.3
c3 = dual_canvas.SnapshotCanvas("ylm.png", width=820, height=520)
S = stream_image_array.VolumeImageViewer(A, c3, draw_delay=draw_delay)
c3.fit()
c3.display_all()
# -
| notebooks/misc/Volume slicer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DevashishX/CaptchaBreaker/blob/master/CaptchaBreaker_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="EYZefuMKnZtS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="51b0feec-a7aa-444e-c299-0deaad094dc1"
# !rm -rf ./CaptchaBreaker ; git clone https://github.com/DevashishX/CaptchaBreaker.git
# !ls -lh
#remove an image with 6 letter name
# !rm -f /content/CaptchaBreaker/samples/3bnfnd.*
# + id="Oy-If7WEqca1" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.model_selection import train_test_split
import scipy
import os, sys
# + id="U1eWx-ZCsxDe" colab_type="code" colab={}
alphabet = u"<KEY> "
token_classes = len(alphabet)
assert len(alphabet) == 37
def text_to_labels(text):
ret = []
for char in text:
ret.append(alphabet.find(char))
return ret
def labels_to_text(labels):
ret = []
for c in labels:
c = int(c)
if c == len(alphabet): # CTC Blank
ret.append("")
else:
ret.append(alphabet[c])
return "".join(ret)
# + id="MZD9IbMnqezr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} outputId="bf36128c-91b6-4971-81b3-196b32aef4ee"
imgdir = "/content/CaptchaBreaker/samples" # path to where images are stored
imgnames = os.listdir(imgdir)
print(imgnames, len(imgnames), sep="\n")
# + id="6W3FGvSjqiPZ" colab_type="code" colab={}
def imagereader(filename, imgdir):
img = Image.open(os.path.join(imgdir, filename)).convert('LA')
img = np.array(img, np.float32)[:,:,:1]/255.0
return img
# + id="tE1-NjGUzdAu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e515d01d-9924-4823-b253-3ec60798d52f"
img1 = imagereader(imgnames[0], imgdir)
print(img1.shape)
# + id="kNFUS0tXrE_w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="54efa3dd-6173-42c5-c763-6bc4959f06d9"
datadict = {"imgpath":[], "label":[], "label_len":[]}
for name in imgnames:
imgpath = os.path.join(imgdir, name)
label = name.split(".")[0]
label_len = len(label)
datadict["imgpath"].append(imgpath)
datadict["label"].append(label)
datadict["label_len"].append(label_len)
data = pd.DataFrame(datadict)
max_label_length = max(data["label_len"])
print(max_label_length)
# print(data["label_len"][0])
print(data.head())
del(datadict)
# + id="hGn6rurYst-y" colab_type="code" colab={}
train_data, val_data = train_test_split(data, test_size=0.2, random_state=42)
train_data = train_data.reset_index(drop=True)
val_data = val_data.reset_index(drop=True)
# + id="KPuS66fx_VM0" colab_type="code" colab={}
# + id="FmyETQ4QvpDA" colab_type="code" colab={}
def image_array(data, img_wt=200, img_ht=50, img_ch=1):
n_img = len(data)
PIL_size = (img_wt, img_ht)
images = np.zeros((n_img, img_ht, img_wt, img_ch), dtype=np.float32)
imgpath = data["imgpath"].tolist()
for n in range(n_img):
img = Image.open(imgpath[n]).convert('LA')
if img.size != PIL_size:
print("resize: {} to {}".format(img.size, PIL_size))
img = img.resize(PIL_size, Image.ANTIALIAS)
img = np.array(img, np.float32)[:,:,:img_ch]/255.0
images[n] = img
# print(images.shape)
return images
# + id="GtKlLdNKxtFZ" colab_type="code" colab={}
def label_array(data, max_label_length):
n_labels = len(data)
label_emb = np.zeros((n_labels, max_label_length), dtype=np.float32)
label_len = np.zeros((n_labels, 1), dtype=np.int)
for i in range(len(data["label"])):
label_emb[i] = text_to_labels(data["label"][i])
label_len[i] = data["label_len"][i]
return label_emb, label_len
# + id="5R9ZyB0w_WJf" colab_type="code" colab={}
import tensorflow as tf
import math
# + id="vwuyzyVK3IzY" colab_type="code" colab={}
class DataGenerator(tf.keras.utils.Sequence):
"""
Data to be returned:
X = X_images, y_true, input_length, label_length
X_images numpy (samples, image_height, image_width, image_channels)
y_true tensor (samples, max_string_length) containing the truth labels.
# y_pred tensor (samples, time_steps, num_categories) containing the prediction, or output of the softmax.
input_length tensor (samples, 1) containing the sequence length for each batch item in y_pred.
label_length tensor (samples, 1) containing the sequence length for each batch item in y_true.
Y = np.zeros(batch_length, dtype=np.float32)
Y should ideally be the labels but we are giving in labels via X so that we can calculate
losss in the final ctc layer.
We are just going to pass dummy zeros array
"""
def __init__(self,
df=train_data,
batch_size=32,
img_wt=200,
img_ht=50,
img_ch=1,
softmax_time_steps=47,
downsample=4,
max_label_length=max_label_length,
shuffle=False
):
self.df = df
self.batch_size = batch_size
self.img_wt = img_wt
self.img_ht = img_ht
self.img_ch = img_ch
self.softmax_time_steps = softmax_time_steps #Number of time slices which will be generated by final softmax layer
self.downsample = downsample
self.max_label_length = max_label_length
self.shuffle = shuffle
self.indices = np.arange(len(df))
self.on_epoch_end()
pass
def __len__(self):
return math.ceil(len(self.df) / self.batch_size)
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.indices)
def __getitem__(self, idx):
global_idx = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
batch_len = len(global_idx)
X_images = np.zeros((batch_len, self.img_ht, self.img_wt, self.img_ch), dtype=np.float32)
y_true = np.zeros((batch_len, self.max_label_length), dtype=np.float32)
input_length = np.ones((batch_len, 1)) # for y_pred - number of total timesteps generated by final softmax layer will go here
label_length = np.ones((batch_len, 1)) # for y_true - number of tokens in y_true labels
local_data = self.df.iloc[global_idx].reset_index(drop=True)
X_images = image_array(local_data, self.img_wt, self.img_ht, self.img_ch)
y_true, label_length = label_array(local_data, self.max_label_length)
input_length = input_length*self.softmax_time_steps
batch_inputs = {
'X_images': X_images,
'y_true': y_true,
'input_length': input_length,
'label_length': label_length,
}
return batch_inputs, y_true
pass
# + id="QRjEIFIJ3bZ8" colab_type="code" colab={}
batch_size=64
img_wt=200
img_ht=50
img_ch=1
softmax_time_steps=47
downsample=4
max_label_length=max_label_length
shuffle=False
# + id="RoYMRZO44C0r" colab_type="code" colab={}
train_generator = DataGenerator(train_data,
batch_size=batch_size,
img_wt=img_wt,
img_ht=img_ht,
img_ch=img_ch,
softmax_time_steps=softmax_time_steps,
downsample=downsample,
max_label_length=max_label_length,
shuffle=shuffle
)
val_generator = DataGenerator(val_data,
batch_size=batch_size,
img_wt=img_wt,
img_ht=img_ht,
img_ch=img_ch,
softmax_time_steps=softmax_time_steps,
downsample=downsample,
max_label_length=max_label_length,
shuffle=shuffle
)
# + id="otohHn7E_LEs" colab_type="code" colab={}
# item = train_generator.__getitem__(1)
# print([(d.shape, d[0]) for d in item[0].values()])
# print(item[1].shape, item[1][0])
# + [markdown] id="HKDNTC1-F1sz" colab_type="text"
# # Create Model
# + id="s5kmT5Q4gPTR" colab_type="code" colab={}
# the actual loss calc occurs here despite it not being
# an internal Keras loss function
def ctc_lambda_func(args):
y_pred, y_true, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
# y_pred = y_pred[:, 2:, :]
return tf.keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
# + id="p0d128Ua_hQX" colab_type="code" colab={}
def getCRNN(img_height=img_ht, img_width=img_wt, img_chan=img_ch, token_classes=token_classes, max_label_length=max_label_length):
image = tf.keras.Input(shape=(img_height, img_width, img_chan), name = "X_images", dtype="float32")
# print((img_height, img_width, img_chan))
y_true = tf.keras.layers.Input(name='y_true', shape=[max_label_length], dtype='float32')
input_length = tf.keras.layers.Input(name='input_length', shape=[1], dtype='float32')
label_length = tf.keras.layers.Input(name='label_length', shape=[1], dtype='float32')
x = tf.keras.layers.Conv2D(128, (3, 3), activation="relu", name="conv2d_1")(image)
x = tf.keras.layers.Conv2D(64, (3, 3), activation="relu", name="conv2d_2")(x)
x = tf.keras.layers.MaxPool2D((2, 2), name="maxpool2d_1")(x)
x = tf.keras.layers.Conv2D(128, (3, 3), activation="relu", name="conv2d_3")(x)
x = tf.keras.layers.Conv2D(64, (3, 3), activation="relu", name="conv2d_4")(x)
x = tf.keras.layers.MaxPool2D((2, 2), name="maxpool2d_2")(x)
x = tf.keras.layers.Conv2D(1, (1, 1), activation="relu", name="1d_conv2d_1")(x)
x = tf.squeeze(x, [3])
x = tf.keras.layers.Permute((2, 1), input_shape=x.get_shape(), name="permute_1")(x)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128, return_sequences=True, dropout=0.2), name="BiLSTM_1")(x)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True, dropout=0.2), name="BiLSTM_2")(x)
y_pred = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(token_classes+1, activation="softmax"), name="TimeDense")(x)
print(f"image: {image.get_shape()}", f"y_true: {y_true.get_shape()}", f"y_pred: {y_pred.get_shape()}",
f"input_length: {input_length.get_shape()}", f"label_length: {label_length.get_shape()}", sep="\n")
# output = CTCLayer(name='ctc_loss')(labels, x, input_length, label_length)
loss_out = tf.keras.layers.Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, y_true, input_length, label_length])
model = tf.keras.models.Model(inputs=[image,
y_true,
input_length,
label_length],
outputs=[loss_out],
name='CRNN_CTC_Model')
return model, y_true, input_length, label_length
# + id="dKfr8LxlH5vQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="fca64d38-598d-450b-edc8-1f93147ed4c1"
model, y_true, input_length, label_length = getCRNN(img_height=img_ht, img_width=img_wt,
img_chan=img_ch, token_classes=token_classes,
max_label_length=max_label_length)
# + id="T3DfSFVSTG2g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="06a50f5b-2c2f-4716-8299-151fec6b0d9d"
from tensorflow.keras.layers import Layer
model._layers = [
layer for layer in model._layers if isinstance(layer, Layer)
]
plot = tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=True, show_layer_names=True,
rankdir='TB', expand_nested=False, dpi=96
)
display(plot)
# + id="yTzZo_kHIpK7" colab_type="code" colab={}
def ctc_loss_gen(y_true_tokens, input_length, label_length):
def ctc_loss(y_true, y_pred):
loss = tf.keras.backend.ctc_batch_cost(y_true_tokens, y_pred, input_length, label_length)
return loss
return ctc_loss
# + id="axXw1qf1KIcC" colab_type="code" colab={}
def ctc_decode(y_pred, max_label_length=max_label_length):
input_length = np.ones((y_pred.shape[0]), dtype=np.float32)*y_pred.shape[1]
# input_length = max_label_length
results = tf.keras.backend.ctc_decode(y_pred, input_length,
greedy=True)
decoded_tokens = []
for r in results:
decoded_tokens.append(labels_to_text(r))
return decoded_tokens
# + id="WR5IJHNyJ0Mg" colab_type="code" colab={}
def ctc_acc_gen(y_true_tokens, batch_size=batch_size, max_label_length=max_label_length):
print(type(max_label_length))
def ctc_acc(y_true, y_pred):
print("y_true: ", y_true, y_true.get_shape())
print("y_pred: ", y_pred, y_pred.get_shape())
input_length = np.ones((batch_size, 1), dtype=np.float32)*max_label_length
result = tf.keras.backend.ctc_decode(y_pred, input_length,
greedy=True, beam_width=100, top_paths=1)
total = 0
count = 0
for x, y in zip(y_true, result):
if all(x == y):
total = total + 1
count = len(y_true)
return total/count
return ctc_acc
# + id="TnGG5itqR79i" colab_type="code" colab={}
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=5,
restore_best_weights=True)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer="adam")
# model.compile(optimizer="adam", loss=[ctc_loss_gen(y_true, input_length, label_length)], metrics=[ctc_acc_gen(y_true, batch_size, max_label_length)])
# + id="KuwpAtjVSH2N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="92d4bd1a-7fe7-4905-cc59-b5cd50807c02"
model.fit(train_generator, validation_data=val_generator, epochs=50, callbacks=[es])
# + id="28YK-W4yWGcr" colab_type="code" colab={}
prediction_model = tf.keras.models.Model(model.get_layer(name='X_images').input,
model.get_layer(name='TimeDense').output)
# + id="sa7ZsF8QWHq8" colab_type="code" colab={}
# # Let's check results on some validation samples
# for p, (inp_value, _) in enumerate(val_generator):
# bs = inp_value['X_images'].shape[0]
# X_data = inp_value['X_images']
# labels = inp_value['y_true']
# preds = prediction_model.predict(X_data)
# # print(type(labels), labels.shape, labels)
# pred_texts = ctc_decode(preds)
# print(type(pred_texts), pred_texts.shape, pred_texts)
# orig_texts = []
# for label in labels:
# print(type(label), label.shape, label)
# # text = ''.join([labels_to_text[int(x)] for x in label])
# text = labels_to_text([int(x) for x in label])
# orig_texts.append(text)
# for i in range(bs):
# print(f'Ground truth: {orig_texts[i]} \t Predicted: {pred_texts[i]}')
# break
# + id="68waFx1Nksk6" colab_type="code" colab={}
# A utility to decode the output of the network
def decode_batch_predictions(pred):
# pred = pred[:, :]
input_len = np.ones(pred.shape[0])*pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = tf.keras.backend.ctc_decode(pred,
input_length=input_len,
greedy=True)
print((results))
# Iterate over the results and get back the text
output_text = []
for res in results.numpy():
outstr = labels_to_text(res)
# for c in res:
# if c <= len(alphabet) and c >=0:
# outstr += labels_to_text([c])
output_text.append(outstr)
# return final text results
return output_text
# + id="UMzrHyi8CPoL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b4e3dd9a-93ef-4cdc-eaa1-2b0bfb47e276"
# Let's check results on some validation samples
for p, (inp_value, _) in enumerate(val_generator):
bs = inp_value['X_images'].shape[0]
X_data = inp_value['X_images']
labels = inp_value['y_true']
preds = prediction_model.predict(X_data)
# print(preds)
pred_texts = decode_batch_predictions(preds)
# print(pred_texts, pred_textx.shape)
orig_texts = []
for label in labels:
text = labels_to_text([int(x) for x in label])
orig_texts.append(text)
count = 0
total = len(orig_texts)
for i in range(bs):
if orig_texts[i] == pred_texts[i]:
count += 1
print(f'Ground truth: {orig_texts[i]} \t Predicted: {pred_texts[i]}')
print(f"Accuracy: {count*100/total}%")
break
# + id="NK3d5j65EII9" colab_type="code" colab={}
model.save("CaptchaModel_WithCTCLayer.h5")
prediction_model.save("CaptchaModel.h5")
# + id="v0xnkgesFx4B" colab_type="code" colab={}
| CaptchaBreaker_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
mnist
mnist.train.images.shape, mnist.train.labels.shape
mnist.train.labels[0]
mnist.test.images.shape, mnist.test.labels.shape
mnist.validation.images.shape
import numpy as np
from matplotlib import pyplot as plt
first_image = mnist.train.images[412]
first_image = np.array(first_image, dtype='float')
first_image = first_image.reshape((28,28))
plt.imshow(first_image)
plt.show()
with tf.Session() as sess:
print(tf.random_normal([784, 256]).eval())
# +
# weights & biases
n_input = 784
n_hidden_1 = 256
n_hidden_2 = 256
n_classes = 10
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'h1': tf.Variable(tf.random_normal([n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# -
tf.trainable_variables()
def forward_propagation(x, weights, biases):
in_layer1 = tf.add(tf.matmul(x, weights['h1']), biases['h1'])
out_layer1 = tf.nn.relu(in_layer1)
in_layer2 = tf.add(tf.matmul(out_layer1, weights['h2']), biases['h2'])
out_layer2 = tf.nn.relu(in_layer2)
output = tf.add(tf.matmul(out_layer2, weights['out']), biases['out'])
return output
x = tf.placeholder("float", [None, n_input])
y =tf.placeholder(tf.int32, [None, n_classes])
pred = forward_propagation(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
optimize = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
c, _ = sess.run([cost,optimize], feed_dict={x:mnist.train.images , y:mnist.train.labels})
c
predictions = tf.argmax(pred, 1)
correct_labels = tf.argmax(y, 1)
correct_predictions = tf.equal(predictions, correct_labels)
predictions,correct_predictions = sess.run([predictions, correct_predictions], feed_dict={x:mnist.test.images,
y:mnist.test.labels})
correct_predictions.sum()
| Lecture 25 Tensor Flow/Running the Optimizer/11. MNIST-Tensorflow-Optimizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (spk)
# language: python
# name: spk
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Clustering-Code-Along" data-toc-modified-id="Clustering-Code-Along-1"><span class="toc-item-num">1 </span>Clustering Code Along</a></span><ul class="toc-item"><li><span><a href="#Format-the-Data" data-toc-modified-id="Format-the-Data-1.1"><span class="toc-item-num">1.1 </span>Format the Data</a></span></li><li><span><a href="#Scale-the-Data" data-toc-modified-id="Scale-the-Data-1.2"><span class="toc-item-num">1.2 </span>Scale the Data</a></span></li><li><span><a href="#Train-the-Model-and-Evaluate" data-toc-modified-id="Train-the-Model-and-Evaluate-1.3"><span class="toc-item-num">1.3 </span>Train the Model and Evaluate</a></span></li></ul></li><li><span><a href="#Great-Job!" data-toc-modified-id="Great-Job!-2"><span class="toc-item-num">2 </span>Great Job!</a></span></li></ul></div>
# -
# # Clustering Code Along
#
# We'll be working with a real data set about seeds, from UCI repository: https://archive.ics.uci.edu/ml/datasets/seeds.
# The examined group comprised kernels belonging to three different varieties of wheat: Kama, Rosa and Canadian, 70 elements each, randomly selected for
# the experiment. High quality visualization of the internal kernel structure was detected using a soft X-ray technique. It is non-destructive and considerably cheaper than other more sophisticated imaging techniques like scanning microscopy or laser technology. The images were recorded on 13x18 cm X-ray KODAK plates. Studies were conducted using combine harvested wheat grain originating from experimental fields, explored at the Institute of Agrophysics of the Polish Academy of Sciences in Lublin.
#
# The data set can be used for the tasks of classification and cluster analysis.
#
#
# Attribute Information:
#
# To construct the data, seven geometric parameters of wheat kernels were measured:
# 1. area A,
# 2. perimeter P,
# 3. compactness C = 4*pi*A/P^2,
# 4. length of kernel,
# 5. width of kernel,
# 6. asymmetry coefficient
# 7. length of kernel groove.
# All of these parameters were real-valued continuous.
#
# Let's see if we can cluster them in to 3 groups with K-means!
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('cluster').getOrCreate()
# +
from pyspark.ml.clustering import KMeans
# Loads data.
dataset = spark.read.csv("seeds_dataset.csv",header=True,inferSchema=True)
# -
dataset.head()
dataset.describe().show()
# ## Format the Data
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
dataset.columns
vec_assembler = VectorAssembler(inputCols = dataset.columns, outputCol='features')
final_data = vec_assembler.transform(dataset)
# ## Scale the Data
# It is a good idea to scale our data to deal with the curse of dimensionality: https://en.wikipedia.org/wiki/Curse_of_dimensionality
from pyspark.ml.feature import StandardScaler
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures", withStd=True, withMean=False)
# Compute summary statistics by fitting the StandardScaler
scalerModel = scaler.fit(final_data)
# Normalize each feature to have unit standard deviation.
final_data = scalerModel.transform(final_data)
# ## Train the Model and Evaluate
# Trains a k-means model.
kmeans = KMeans(featuresCol='scaledFeatures',k=3)
model = kmeans.fit(final_data)
# Evaluate clustering by computing Within Set Sum of Squared Errors.
wssse = model.computeCost(final_data)
print("Within Set Sum of Squared Errors = " + str(wssse))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
model.transform(final_data).select('prediction').show()
# Now you are ready for your consulting Project!
# # Great Job!
| Given_Material/Spark_for_Machine_Learning/Clustering/Clustering Code Along.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="top"></a>
# <h2 id="loading_liberary">LOADING LIBERARY</h2>
# +
import itertools
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
# %matplotlib inline
from geopy import distance, Nominatim
from uszipcode import SearchEngine, SimpleZipcode, Zipcode
import folium
from scipy import stats
from scipy.stats import norm
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tools.eval_measures import rmse, meanabs
from statsmodels.formula.api import ols
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.preprocessing import FunctionTransformer, quantile_transform, scale, StandardScaler, MinMaxScaler, StandardScaler, Normalizer
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn import metrics
# %run "./project_class.ipynb"
func=Master
# -
# !pwd
# <h2 id="loading_dataset">LOADING DATASET</h2>
# + tags=["dataframe"]
# df.to_csv('./data/kc_house_data.gz', compression='gzip')
df = pd.read_csv("./data/kc_house_data.gz", index_col=None)
df.head()
# -
print("the data set contains {0:,} rows and {1} columns".format(df.shape[0], df.shape[1]))
# Column Names and descriptions for Kings County Data Set
#
# id unique identified for a house
# date Date house was sold
# price Price is prediction target
# bedrooms Number of Bedrooms/House
# bathrooms Number of bathrooms/bedrooms
# sqft_living square footage of the home
# sqft_lot square footage of the lot
# floors Total floors (levels) in house
# waterfront House which has a view to a waterfront
# view Has been viewed
# condition How good the condition is ( Overall )
# grade overall grade given to the housing unit, based on King County grading system
# sqft_above square footage of house apart from basement
# sqft_basement square footage of the basement
# yr_built Built Year
# yr_renovated Year when house was renovated
# zipcode zip
# lat Latitude coordinate
# long Longitude coordinate
# sqft_living15 The square footage of interior housing living space for the nearest 15 neighbors
# sqft_lot15 The square footage of the land lots of the nearest 15 neighbors
# <h2 id="descriptive">DESCRIPTIVE STATISTICS</h2>
df.describe()
# + tags=[]
df.info()
# -
# <a href="#top">TOP!</a>
# <h2 id="data_scrubbing">DATA SCRUBBING</h2>
# Check Missing Values
for i in df.columns:
total_nan = df[i].isnull().sum()
if total_nan > 0:
print("total missing value of {0:>15}is: {1:>5}".format(i, total_nan))
del total_nan
# both columns "view" and "waterfront" are categories where the "view" represented with the value 1 if the house has been seen and the "waterfront" represented with 1 if the house has waterfront, those will be filled with zeros
df['waterfront'].fillna(value=0, axis=0, inplace=True)
df['view'].fillna(value=0, axis=0, inplace=True)
# the column "yr_renovated" represents the year in which the house was renovated, we noticed that only 744 houses were renovated. since the proportion of the renovated houses is so few compared to the entire column, it would make more sense if we use it as a category where we assign 1 to the renovated houses and 0 to those that are not
df['yr_renovated'].fillna(value=0, axis=0, inplace=True)
df.loc[df['yr_renovated']!=0, ['yr_renovated']] = 1
df.loc[:,'yr_renovated'] = df['yr_renovated'].apply(np.int) #.astype('int')
df.rename(columns={'yr_renovated': 'renovated'}, inplace=True)
# Drop the column "Unnamed: 0" from axis 1 matching the string by RegEx
un_named_columns = df.iloc[:,df.columns.str.contains('^Unnamed', case=False, regex=True)]
df.drop(un_named_columns, axis=1, inplace=True)
del un_named_columns
# Drop "ID column
df.drop(columns=['id'], inplace=True)
df['date'] = pd.to_datetime(df['date'], utc=False)
# <a href="#top">TOP!</a>
# <h2 id="data_exploration">DATA EXPLORATION</h2>
#
# our first goal is to identify the types of variables we will deal with, so we start by iterating the dataframe columns in alphabetical order instead of analyzing each variable based on statistical type and scale of measurement.
#
#define a list for the variables to exclude
var_excluded = set()
# generate a list to categorize our variables type
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_size_inches(7,4)
sns.axes_style()
variables_type = [ "Quantitative ( Descrite & continues )" if df[col].dtype in ['int64', 'float64'] else "Qualitative (Categorical)" for col in df.columns]
plt.figure(figsize=(5,3))
sns.countplot(x=variables_type, ax=ax1)
plt.show()
# <h2 id="bedrooms">BEDROOMS</h2>
# + tags=[]
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_size_inches(7,4)
sns.axes_style()
sns.barplot(x=df.bedrooms.value_counts(), y=df.sqft_living)
# -
# <h2 id="sqft_basement">SQFT_ABOVE & SQFT_BASEMENT</h2>
# regarding the 2 variables "sqft_above" "sqft_basement" we noticed that the sum of both represents the actual square feet of the entire living area
#
# in fact if we take for example line number 3 where "sqft_living" is 1960 the sqft_above "is 1050 it is easy to come out with the difference of "sqft_basement" which is 910.
#
# in the real world we calculate the house price if the basement is finished since we do not have sufficient data to determine this data, we exclude this variable from our analyzes
var_excluded.update(('sqft_above', 'sqft_basement'))
# <h2 id="sqft_lot">SQFT_LOT & SQFT_LIVING</h2>
# Let us examine the importance of having a large lot. We will define a ratio of sqft_living over sqft_lot to understand if there is an ideal trade-off between lot size (presumably garden) and living space.
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
fig.set_size_inches(15,5)
sns.axes_style()
sns.scatterplot(x=df.sqft_lot, y=df.price, color='red', ax=ax1, label='Living Area Size ~ House Price')
sns.scatterplot(x=df.sqft_living, y=df.price, color='blue', ax=ax2, label='Lot Size ~ House Price')
_ = np.size(np.where((np.array(df.sqft_living/df.sqft_lot)) >= 1))
print('total houses with sqft_living >= sqft_lot:', _)
# unexpectedly we got 788 houses where the square footage of living aerea "sqft_living" is greater than lot square footage "sqft_lot", it's more likely about detached structures like garage, patios or even basement.
# <h2 id="floors">FLOORS</h2>
# + tags=[]
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_figwidth(7,4)
sns.boxplot(x=df['floors'], y=df['price'], ax=ax1)
# -
# <h2 id="waterfront">WATERFRONT</h2>
# the waterfront column describes whether the house has waterfront with the value 1, otherwise the value 0, as observed only 146 houses has a waterfront, that is less than 1% of the entire dataset.
# + tags=[]
df['waterfront'].value_counts().to_frame()
# -
waterfront_1 = df.loc[df.waterfront==1,'price'].mean()
waterfront_0 = df.loc[df.waterfront==0,'price'].mean()
print(f"{'the waterfront house prices are higher by'} {(waterfront_1/waterfront_0)*100:.2f}%")
# + tags=[]
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_size_inches(7,4)
sns.axes_style()
waterfront_bp = sns.boxplot(x=df['waterfront'], y=df['price'], ax=ax1)
# -
# count the unique values of the 'floors' & 'waterfront' to determine the houses type.
df['floors'].value_counts().to_frame()
# <h2 id="view">VIEW</h2>
# the variable "view" describes the times that the house has been seen, however we have noticed that 19485 of the data are quale to zero.
# + tags=[]
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_size_inches(7,4)
sns.axes_style()
sns.countplot(x=df['view'], ax=ax1)
# -
print("view: zeros value: {0:>10}".format((df.view==0).sum()))
print("renovated: zeros value: {0:>5}".format( (df.renovated==0).sum() ))
# we're goin to exclude the 'view' column since it contains almost all null values.
# <h2 id="condition">CONDITION</h2>
#
# <p>
# Relative to age and grade. Coded 1-5.<br>
# 1 = Poor- Worn out.<br>
# 2 = Fair- Badly worn.<br>
# 3 = Average <br>
# 4 = Good<br>
# 5= Very Good}</p>
fig, ax1 = plt.subplots(ncols=1, sharey=True)
fig.set_size_inches(7,4)
sns.axes_style()
sns.violinplot(x=df['condition'], y=df['price'], ax=ax1)
# <h2 id="grade">GRADE</h2>
# Represents the construction quality of improvements. Grades run from grade 1 to 13.:
# <a href="https://info.kingcounty.gov/assessor/esales/Glossary.aspx?type=r#b">King County link</a>
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=False)
fig.set_size_inches(15,5)
sns.axes_style()
df_by_grade = df.groupby('grade').size().reset_index(name='counts')
g = sns.barplot(x='grade', y='counts', data=df_by_grade, linewidth=3, errcolor='gray', ax=ax1)
for index, row in df_by_grade.iterrows():
g.text(row.name, row.counts, round(row.counts, 2), color='black', va='bottom', ha="center", fontsize=10)
g2=sns.barplot(x='grade', y='price', data=df, ax=ax2)
# ### let's compare the lot size with the living area and the percentage of living aerea compared to the lot size
# <h2 id="yr_built">YR_BUILT</h2>
#
# <p>
# Let's count the houses based on the year of construction, it seems to have a strong correlation/p>
fig, ax1 = plt.subplots(nrows=1)
with plt.style.context('seaborn-talk'):
fig.set_size_inches(7,4)
sns.axes_style()
sns.lineplot(x="yr_built", y="price", ci='sd', markers=True, data=df)
# <h2 id="price_distribution">PRICE DISTRIBUTION</h2>
# <p>simple Seaborn scatterplot shows homes by their latitude and longitude, with price set as the hue.</p>
plt.figure(figsize=(14,8))
sns.scatterplot(x=df.long, y=df.lat, hue=df.price, size=df.price, palette='flag')
plt.xlim(-122.5, -121.7)
plt.title('Price Distribution', fontdict={'fontsize': 20})
plt.show()
# <a href="#top">TOP!</a>
# <p>let's visualize the top 70 zip codes with with an interactive geographic map</p>
# +
dfzip = df.drop_duplicates('zipcode', keep='first')
centerlat = (dfzip['lat'].max() + dfzip['lat'].min()) / 2
centerlong = (dfzip['long'].max() + dfzip['long'].min()) / 2
map = folium.Map(location=[centerlat, centerlong], zoom_start=9)
#icon = folium.Icon(color='blue', icon_color='white', icon='info-sign',angle=0)
for i in range(dfzip.shape[0]):
pup = '${:,}'.format(dfzip.iloc[i]['price'])
if dfzip.iloc[0]['waterfront'] == 1:
ic = folium.Icon(color='red', icon_color='red')
else:
ic = folium.Icon(color='blue', icon_color='white')
folium.Marker([dfzip.iloc[i]['lat'], dfzip.iloc[i]['long']], icon=ic, popup=pup, radius=3).add_to(map)
# map.save('top_70_zipcode.html')
map
# -
# <h2 id="top">FEATURE ENGINEERING</h2>
# <p>exclude variables that are not relevant to the analysis, we're going to use the scatterplot matrix to evaluate the correlation and the Multicollinearity.</p>
var_excluded.update(('lat','long', 'zipcode', 'bathrooms','date'))
var_predictors = set(df.columns)-var_excluded
# <h2 id="long_lat">LONG & LAT</h2>
# <p>From previous plot we notice that the price seems to go down as houses are further from the center,
# It would be appropriate to create a new feature that represents distance from the center of King County.
# for feature we used the geopy library, which essentially calculates the distance in miles from specific latitude and longitude points..</p>
lat_long=[(x,y) for x,y in zip(df['lat'], df['long'])]
kc = (47.6062, -122.3321) # king county usa downtown lat long
miles = [int(round(distance.distance(i, kc).miles,0)) for i in lat_long ]
df['distance'] = miles
var_predictors.add(('distance'))
fig.set_size_inches(12, 10)
distance = sns.catplot(x='distance', y='price', data=df, height=5, aspect=1.4)
# <h2 id="cat_var">Categorical Variables</h2>
# <p>once we have identified the category variables, let's take a final visual look at the variables for further confirmation.</p>
# + tags=[]
var_categories = {'condition', 'waterfront', 'floors', 'renovated', 'bedrooms' , 'view', 'grade'}
# -
palette = itertools.cycle(sns.color_palette())
fig, axes = plt.subplots(nrows=1, ncols=len(var_categories), figsize=(20,5))
for xcol, ax in zip(var_categories, axes):
sns.scatterplot(x=df[xcol], y=df['price'] , ax=ax, label=xcol, color=next(palette))
# <p>as we see from the graph it seems that variables are cattegoric type so we're going to use the pandas CUT method to segment these values into groups of "bins".</p>
yr_built_bins = [1900,1923,1946,1969,1992,2015]
yr_built_labels = ['1900_1923','1924_1946','1947_1969','1970_1992','1993_2015']
yr_built_cat = pd.cut(x=df['yr_built'], bins=yr_built_bins, labels=yr_built_labels, include_lowest=True)
df['yr_built'] = yr_built_cat.cat.as_unordered()
var_categories.add(('yr_built'))
yr_built_unique = df.yr_built.unique()
n_construction = [df[df.yr_built == j].size for j in df.yr_built.unique()]
sns.barplot(x=yr_built_unique, y=n_construction)
# <p style="color:Black;">Convert categorical features into Int.' dtype</p>
df.condition = df.condition.astype(int)
df.waterfront = df.waterfront.astype(int)
df.floors = df.floors.astype(int)
df.renovated = df.renovated.astype(int)
df.grade = df.grade.astype(int)
df.view = df.view.astype(int)
# <h2 id="one_hat">ONE-HOT-ENCODING</h2>
# <p style="color:DodgerBlue;"></p>
# + tags=[]
#create a dummy data by removing redundant columns when using get_dummies
df_categories = pd.DataFrame()
for cat in var_categories:
df_categories[cat]=df[cat].astype('category')
df_dummy = pd.get_dummies(df_categories[cat], prefix=cat, drop_first=True)
df_categories = df_categories.join(df_dummy)
df_categories.drop(labels=cat, axis=1, inplace=True)
# -
# <p>
# using the USZIPCODE library we're going to decode the zip code in order to obtain a list of the corresponding neighborhoods. it's more efficient decoding by zipcode rather than coordinates since the unique values are only 70.
# we noticed that the 70 zip codes present in the dataframe refers to 24 neighborhoods in other words, the 21597 houses are all concentrated in 24 urban areas.</p>
# +
search = SearchEngine()
neighbourhoods = [search.by_zipcode(c).city for c in df.zipcode]
df['neighbourhoods'] = neighbourhoods
df_neighbourhoods = pd.DataFrame()
df_neighbourhoods = df['neighbourhoods'].astype('category')
df_neighbourhoods = pd.get_dummies(df_neighbourhoods, drop_first=True)
# -
var_categories.add(('neighbourhoods'))
df_categories = df_categories.join(df_neighbourhoods)
# <h2 id="corr_matrix">CORRELATION MATRIX</h2>
# <p></p>
cor_features = set(df.columns)-set(('zipcode', 'view', 'sqft_basement','sqft_above','lon','lat','bathrooms'))
corr = df[cor_features].corr(method='pearson')
# mask = corr[(corr !=1.000)]
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
plt.figure(figsize=(15, 7))
sns.heatmap(corr, mask=mask, annot=True, linewidth=.1, cmap="RdYlGn", square=True)
plt.title('Correlation between features', fontdict={'fontsize': 16})
# <!--
# predictors_corr = func.correlation(corr,.7,1)
# df[var_predictors].corr()['price'].sort_values(ascending=False)
# -->
# <a href="#top">TOP</a>
# <h2 id="pairplot">CHECK FEATURES DISTRIBUTION</h2>
# <p>
# We would like to investigate the relationship between our target variable price and the continuous feature variables in our dataset. We will make use of Seaborn's jointplot to simultaneously inspect linearity and distributions.
# </p>
df_features = pd.DataFrame(df[var_predictors-var_categories])
sns.pairplot(df_features, diag_kind="kde")
# <h2 id="standard_scaler">Min Max SCALER</h2>
# <p>Using the standard function from sklearn to scale the indepedent variables, so that all the features hold a standard weight towards the depedent variable.</p>
# + tags=[]
x_scaler = MinMaxScaler()
y_scaler = MinMaxScaler()
y = y_scaler.fit_transform(df.price.values.reshape(-1,1))
x_scaled = x_scaler.fit_transform(df_features.drop(labels=['price'], axis=1))
df_features = pd.DataFrame(x_scaled, columns=df_features.columns.difference(['price']))
df_features.head()
# -
# <a href="#top">TOP</a>
# <h2 id="standard_scaler">MODELING</h2>
# <p>
# as a starting point, we are trying to establish the following models, each responding to different criteria.</p>
#
#
# <h4 id="standard_scaler">Model A</h4>
# <p>
# our first model aims to establish the correlation between continuous features to obtain as a first result a moderate value of coefficient of determination R2.</p>
# +
x = df_features
x = sm.add_constant(x)
model_a = sm.OLS(y, x).fit()
model_a_pred = model_a.predict(x)
print(str(model_a.summary()))
# -
# <h5 id="pairplot">Model A Scores</h5>
model_a_mae, model_a_mse, model_a_rmse = func.metrics(y, model_a_pred)
# Using Variance Inflation Factor (VIF) we measure of collinearity among predictor variables within Model A.
func.vif(x)
# <h2 id="pairplot">MODEL B</h2>
# <p>the second model would be more accurate and complex given the numerous categorical variables. our goal is to obtain a better performing model with a high R2 value while maintaining a significant P-value below a threshold of 0.05</p>
x = pd.concat([x, df_categories], axis= 1)
# +
x = sm.add_constant(x)
model_b = sm.OLS(y,x).fit()
model_b_pred = model_b.predict(x)
print(str(model_b.summary()))
# -
# <h5 id="pairplot">Model b Scores #1</h5>
model_b_mae, model_b_mse, model_b_rmse = func.metrics(y, model_b_pred)
# <p>
# For an initial fit the model looks good obtaining a R-Squared of 0.785 and as well as Adj. R-squared of 0.785.
# The contribution is attributed to the categorical variabers that make the model more stable and positive.</p>
# <p>
# Surprisingly the P-Value of the continuous variables is lower than the threshold of 0.05. Furthermore, some categories that exceed the cut-off threshold, so we begin to discard these variables while maintaining those with a value of less than .05.</p>
stepwise_result = func.stepwise_selection(x, y, verbose=False)
print('resulting features:')
print(stepwise_result)
# +
x = x[stepwise_result]
x = sm.add_constant(x)
model_c = sm.OLS(y,x).fit()
model_c_pred = model_c.predict(x)
print(str(model_c.summary()))
# -
model_b_mae, model_b_mse, model_b_rmse = func.metrics(y, model_b_pred)
# <h2 id="pairplot">SKLEARN MODEL</h2>
# <p>
# Regression Model Validation
# </p>
# +
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3, random_state=7)
linreg = LinearRegression()
linreg.fit(x_train, y_train)
#Calculating predictions on the train set, and test set
y_hat_train = linreg.predict(x_train)
y_hat_test = linreg.predict(x_test)
#Calculating your residuals
train_residuals = y_hat_train - y_train
test_residuals = y_hat_test - y_test
#Calculating the Mean Squared Error
train_mse = mean_squared_error(y_train, y_hat_train)
test_mse = mean_squared_error(y_test, y_hat_test)
print("\033[94m"f"{'R^2 Score':<30}{round(linreg.score(x, y),2):>5}")
print(f"{'Train Mean Squarred Error':<30} {train_mse:>5}")
print(f"{'Test Mean Squarred Error':<30} {test_mse:>5}")
# -
model_b_mae, model_b_mse, model_b_rmse = func.metrics(y_test, y_hat_test)
# <h2 id="pairplot">CROSS VALIDATION SCORE</h2>
# <p style="color:DodgerBlue;">
#
# </p>
# +
kf = KFold(n_splits=10, shuffle=True, random_state=74)
msw = cross_val_score(linreg, x_test, y_test, scoring='neg_mean_squared_error', cv=kf, n_jobs=1)
scores = cross_val_score(linreg, x, y, scoring='r2', cv=kf, n_jobs=1)
# -
model_b_mae, model_b_mse, model_b_rmse = func.metrics(y_test, y_hat_test)
| kc_house_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# https://www.kaggle.com/kmader/nuclei-overview-to-submission/notebook
# https://www.kaggle.com/keegil/keras-u-net-starter-lb-0-277/notebook
# +
import os
import glob
import warnings
import numpy as np
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage.io import imread, imshow
# %matplotlib inline
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
# +
all_path = glob(os.path.join('data', '*_stage1', '*', '*', '*'))
len(all_path)
# +
path_df = pd.DataFrame(index=all_path)
img_type = lambda path: path.split('/')[-2]
img_id = lambda path: path.split('/')[-3]
img_group = lambda path: path.split('/')[-4].split('_')[0]
img_stage =lambda path: path.split('/')[-4].split('_')[1]
path_df['Image_STAGE'] = path_df.index.map(img_stage)
path_df['Image_GROUP'] = path_df.index.map(img_group)
path_df['Image_ID'] = path_df.index.map(img_id)
path_df['Image_TYPE'] = path_df.index.map(img_type)
# -
path_df.head()
path_df.describe()
# ### Train
# +
# %%time
train_df = path_df.query('Image_GROUP=="train"')
train_rows = []
group_cols = ['Image_STAGE', 'Image_ID']
for n_group, n_rows in train_df.groupby(group_cols):
c_row = {col_name: col_value for col_name, col_value in zip(group_cols, n_group)}
c_row['masks_path'] = n_rows.query('Image_TYPE == "masks"').index.tolist()
c_row['image_path'] = n_rows.query('Image_TYPE == "images"').index[0]
train_rows += [c_row]
train_img_df = pd.DataFrame(train_rows)
# +
# # %%time
train_img_df['shape'] = train_img_df['image_path'].map(lambda x: imread(x).shape)
train_img_df['shape'].value_counts()
# train_img_df['max'] = train_img_df['image_path'].map(lambda x: imread(x).max())
# train_img_df['max'].value_counts()
# train_img_df['min'] = train_img_df['image_path'].map(lambda x: imread(x).min())
# train_img_df['min'].value_counts()
# -
img_height, img_width, img_channels = 256, 256, 3
# Train Images
# %%time
image_array_list = [resize(imread(train_img_df.loc[_, 'image_path']), (img_height, img_width), preserve_range=True) \
for _ in range(train_img_df.shape[0])]
image_array = np.stack(image_array_list)[:, :, :, :3]
image_array.shape
# Train Masks
# %%time
masks_array_list = [resize(np.sum(np.stack([imread(mask_path) for mask_path in train_img_df.loc[_, 'masks_path']]), axis=0), #np.max
(img_height, img_width), preserve_range=True) for _ in range(train_img_df.shape[0])]
masks_array = (np.stack(masks_array_list) / 255).astype(int)
masks_array.shape
# Save
save_path = 'stage1_train_data_compressed.npz'
np.savez_compressed(save_path, images=image_array, masks=masks_array)
# Load
load_path = 'stage1_train_data_compressed.npz'
# %%time
with np.load(load_path) as f:
print(f.files)
train_images = f['images']
train_masks = f['masks']
imshow(train_masks[100,:,:])
train_img_df.loc[100, :]
# +
s = np.sum(np.stack([imread(mask_path) for mask_path in train_img_df.loc[100, 'masks_path']]), 0)
imshow(s)
# -
imshow(masks_array[100,:,:])
imshow(imread(train_img_df.loc[100, 'image_path']))
# ### Test
# +
test_df = path_df.query('Image_GROUP=="test"')
test_rows = []
group_cols = ['Image_STAGE', 'Image_ID']
for n_group, n_rows in test_df.groupby(group_cols):
c_row = {col_name: col_value for col_name, col_value in zip(group_cols, n_group)}
c_row['image_path'] = n_rows.query('Image_TYPE == "images"').index[0]
test_rows += [c_row]
test_img_df = pd.DataFrame(test_rows)
test_img_df['shape'] = test_img_df['image_path'].map(lambda x:imread(x).shape)
# -
test_img_df.head()
test_img_df['shape'].value_counts()
# %%time
test_image_array_list = [resize(imread(test_img_df.loc[_, 'image_path']), (img_height, img_width), preserve_range=True)[:,:,:3] \
for _ in range(test_img_df.shape[0])]
test_image_array = np.stack(test_image_array_list)
test_image_array.shape
test_save_path = 'stage1_test_data_compressed.npz'
test_image_shape = test_img_df['shape'].values
np.savez_compressed(test_save_path, images=test_image_array, shapes=test_image_shape)
test_img_df
| experiment/3.data_compress.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import urllib3
import time
# + [markdown] tags=[]
# # How fast can we get to [philosophy](https://en.wikipedia.org/wiki/Philosophy) ?
#
# ## Hypothesis
# In this experiment, I'll test the hypothesis that:
# **By going to the first link on any Wikipedia article, you'll end up on the [philosophy](https://en.wikipedia.org/wiki/Philosophy) article.**
#
# ## Solution
# To do this, I simplified the problem to two smaller problems:
# - Getting links from article (parsing article)
# - Downloading article and building URL tree
#
# For each article I'll enter the first URL, if that URL contains the phrase`Philosophy` the algorithm will end.
#
#
# `get_links_from_wiki` function parses the article. It works by finding a div that contains the whole article, then iterates through all paragraphs and finds all links that match pattern `/wiki/article_name`. Because there's no domain in that pattern, it is added at the end.
# -
def get_links_from_wiki(soup, n=5, prefix="https://en.wikipedia.org"):
"""
Extracts `n` first links from wikipedia articles and adds `prefix` to
internal links.
Parameters
----------
soup : BeautifulSoup
Wikipedia page
n : int
Number of links to return
prefix : str, default="https://en.wikipedia.org""
Site prefix
Returns
-------
list
List of links
"""
arr = []
# Get div with article contents
div = soup.find("div", class_="mw-parser-output")
for element in div.find_all("p") + div.find_all("ul"):
# In each paragraph find all <a href="/wiki/article_name"></a> and
# extract "/wiki/article_name"
for i, a in enumerate(element.find_all("a", href=True)):
if len(arr) >= n:
break
if (
a["href"].startswith("/wiki/")
and len(a["href"].split("/")) == 3
and ("." not in a["href"] and ("(" not in a["href"]))
):
arr.append(prefix + a["href"])
return arr
# The crawl function will be recursive, for each URL found on page I'll call it again. For rach iteration it'll check if URL contains that phrase, if so it'll return both the site and link to Philosophy. To control number of recursive calls, depth of created tree is limited by `depth` parameter.
def crawl(
pool: urllib3.PoolManager,
url,
phrase=None,
deep=1,
sleep_time=0.5,
n=5,
prefix="https://en.wikipedia.org",
verbose=False,
):
"""
Crawls given Wikipedia `url` (article) with max depth `deep`. For each page
extracts `n` urls and if `phrase` is given check if `phrase` in urls.
Parameters
----------
pool : urllib3.PoolManager
Request pool
phrase : str
Phrase to search for in urls.
url : str
Link to wikipedia article
deep : int
Depth of crawl
sleep_time : float
Sleep time between requests.
n : int
Number of links to return
prefix : str, default="https://en.wikipedia.org""
Site prefix
Returns
-------
tuple
Tuple of url, list
"""
if verbose:
site = url.split("/")[-1]
print(f"{deep} Entering {site}")
# Sleep to avoid getting banned
time.sleep(sleep_time)
site = pool.request("GET", url)
soup = BeautifulSoup(site.data, parser="lxml")
# Get links from wiki (I'll show it later)
links = get_links_from_wiki(soup=soup, n=n, prefix=prefix)
# If phrase was given check if any of the links have it
is_phrase_present = any([phrase in link for link in links]) and phrase is not None
if deep > 0 and not is_phrase_present:
return (
url,
[
crawl(
pool=pool,
url=url_,
phrase=phrase,
deep=deep - 1,
sleep_time=sleep_time,
n=n,
prefix=prefix,
verbose=verbose,
)
for url_ in links
],
)
return url, links
# ## The experiment
# Instance of PoolManager that each crawler will share
pool = urllib3.PoolManager()
# To test the hypothesis we'll start from page `https://en.wikipedia.org/wiki/Data_mining"`, look for page `Philosophy` and set link limit for crawler to `1` so that it'll only enter the first link on each page.
crawl(pool, "https://en.wikipedia.org/wiki/Doggart", phrase="Philosophy", deep=50, n=1, verbose=True)
# As you can see after 25 iterations indeed we found `Philosophy` page.
# There's a famous Wikipedia phenomena that by clicking the first link in the main text of the article on the English Wikipedia, you'll eventually end up on the [philosophy](https://en.wikipedia.org/wiki/Philosophy) page. An explanation can be found [here](https://en.wikipedia.org/wiki/Wikipedia:Getting_to_Philosophy). Briefly, it's because of Wikipedia [Manual of Style guidelines](https://en.wikipedia.org/wiki/Wikipedia:MOSBEGIN) that recommend that articles begin by telling "what or who the subject is, and often when and where".
#
# This was true for roughly 97% of articles, so there's a big chance that by entering a random Wikipedia page and following the procedure you'll indeed end up on Philosophy. I could test this by hand, but this wouldn't be a dev.to article without writing some code. We'll start with how to download Wikipedia articles.
#
# ## How to get data
#
# It's simple - just request contents of and article with `urllib3`. Wikipedia follows a convenient pattern for naming its articles. After the usual `en.wikipedia.org/` there's a `/wiki` and then `/article_name` (or media! we'll deal with that later) for example, `en.wikipedia.org/wiki/Data_mining`.
#
# Firstly, I'll create a pool from which I'll make requests to Wikipedia.
#
# ```python
# import urllib3
# from bs4 import BeautifulSoup
#
# pool = urllib3.PoolManager()
# ```
#
# From now on, I'll could download the articles one by one. To automate the process of crawling through the site, the crawler will be recursive. Each iteration of it will return `(current_url, [crawler for link_on site])`, the recursion will stop, at given depth. In the end, I'll end up with tree structure.
#
# ```python
# def crawl(
# pool: urllib3.PoolManager,
# url,
# phrase=None,
# deep=1,
# sleep_time=0.5,
# n=5,
# prefix="https://en.wikipedia.org",
# verbose=False,
# ):
# """
# Crawls given Wikipedia `url` (article) with max depth `deep`. For each page
# extracts `n` urls and if `phrase` is given check if `phrase` in urls.
#
# Parameters
# ----------
# pool : urllib3.PoolManager
# Request pool
# phrase : str
# Phrase to search for in urls.
# url : str
# Link to wikipedia article
# deep : int
# Depth of crawl
# sleep_time : float
# Sleep time between requests.
# n : int
# Number of links to return
# prefix : str, default="https://en.wikipedia.org""
# Site prefix
#
# Returns
# -------
# tuple
# Tuple of url, list
# """
# if verbose:
# site = url.split("/")[-1]
# print(f"{deep} Entering {site}")
#
# # Sleep to avoid getting banned
# time.sleep(sleep_time)
# site = pool.request("GET", url)
# soup = BeautifulSoup(site.data, parser="lxml")
#
# # Get links from wiki (I'll show it later)
# links = get_links_from_wiki(soup=soup, n=n, prefix=prefix)
#
# # If phrase was given check if any of the links have it
# is_phrase_present = any([phrase in link for link in links]) and phrase is not None
# if deep > 0 and not is_phrase_present:
# return (
# url,
# [
# crawl(
# pool=pool,
# url=url_,
# phrase=phrase,
# deep=deep - 1,
# sleep_time=sleep_time,
# n=n,
# prefix=prefix,
# verbose=verbose,
# )
# for url_ in links
# ],
# )
# return url, links
# ```
#
# If you read the code carefully, you'd notice a function `get_links_from_wiki`. `get_links_from_wiki` function parses the article. It works by finding a div that contains the whole article, then iterates through all paragraphs (or lists) and finds all links that match pattern `/wiki/article_name`. Because there's no domain in that pattern, it is added at the end.
#
# ```python
# def get_links_from_wiki(soup, n=5, prefix="https://en.wikipedia.org"):
# """
# Extracts `n` first links from wikipedia articles and adds `prefix` to
# internal links.
#
# Parameters
# ----------
# soup : BeautifulSoup
# Wikipedia page
# n : int
# Number of links to return
# prefix : str, default="https://en.wikipedia.org""
# Site prefix
# Returns
# -------
# list
# List of links
# """
# arr = []
#
# # Get div with article contents
# div = soup.find("div", class_="mw-parser-output")
#
# for element in div.find_all("p") + div.find_all("ul"):
# # In each paragraph find all <a href="/wiki/article_name"></a> and
# # extract "/wiki/article_name"
# for i, a in enumerate(element.find_all("a", href=True)):
# if len(arr) >= n:
# break
# if (
# a["href"].startswith("/wiki/")
# and len(a["href"].split("/")) == 3
# and ("." not in a["href"] and ("(" not in a["href"]))
# ):
# arr.append(prefix + a["href"])
# return arr
# ```
#
# Now we have everything to check the phenomena. I'll set max depth to 50 and set `n=1` (to only expand first link in the article).
#
# ```python
# crawl(pool, "https://en.wikipedia.org/wiki/Doggart", phrase="Philosophy", deep=50, n=1, verbose=True)
# ```
#
# Output:
# ```output
# 50 Entering Doggart
# 49 Entering Caroline_Doggart
# 48 Entering Utrecht
# 47 Entering Help:Pronunciation_respelling_key
# ...
# 28 Entering Mental_state
# 27 Entering Mind
# 26 Entering Thought
# 25 Entering Ideas
#
# ('https://en.wikipedia.org/wiki/Doggart',
# [('https://en.wikipedia.org/wiki/Caroline_Doggart',
# [('https://en.wikipedia.org/wiki/Utrecht',
# [('https://en.wikipedia.org/wiki/Help:Pronunciation_respelling_key',
# [('https://en.wikipedia.org/wiki/Pronunciation_respelling_for_English',
#
# ...
# [('https://en.wikipedia.org/wiki/Ideas',
# ['https://en.wikipedia.org/wiki/Philosophy'])])])])])])])])])])])])])])])])])])])])])])])])])])
# ```
# As you can see after 25 iterations indeed we found `Philosophy` page.
#
# Found this post interesting? Check out my [Github](https://github.com/finloop) @finloop
#
#
#
#
#
#
#
| wikipedia/wikipedia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vectors
# ##### Citation: This notebook contains notes from the spring 2007 Topic in Applied Mathmatics I taught by Dr. <NAME> and selected problems from Williamson & Trotter Multivariable Mathmatics Fourth Edition. Python help can be found here:
# https://www.python-course.eu/index.php
#
# 1. Coordinate Vectors
# 2. Geometric Vectors
# 3. Lines and Planes
# 4. Dot Products
# 5. Euclidean Geometry
# 6. The Cross Product
import numpy as np
import scipy as sp
from scipy import stats
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.interpolate import *
import matplotlib.pyplot as plt
import pandas as pd
import datetime, math
import matplotlib.dates as dates
# ### Originally vectors were conceived of as geometric objects with magnitude and direction, suitable for representing physical quantities such as displacements, velocites, or forces. A more algebraic concept of the vector can be used to simplify various topics in applied mathematics. In Python a vecor is simply 1D array
#
# #### Scalars = numerical quantity like mass or temperature
# #### Let's start by adding scalars to arrays:
lst = [2,3, 7.9, 3.3, 6.9, 0.11, 10.3, 12.9]
v = np.array(lst)
v = v + 2
print(v)
# #### Multiplication, Subtraction, Division and exponentiation are as easy as the previous addition:
print(v * 2.2)
print(v - 1.38)
print(v ** 2)
print(v ** 1.5)
# ### Arithmetic Operations with two Arrays
# #### If we use another array instead of a scalar, the elements of both arrays will be component-wise combined:
A = np.array([ [11, 12, 13], [21, 22, 23], [31, 32, 33] ])
B = np.ones((3,3))
print("Adding to arrays: ")
print(A + B)
print("\nMultiplying two arrays: ")
print(A * (B + 1))
# #### "A * B" in the previous example shouldn't be mistaken for matrix multiplication. The elements are solely component-wise multiplied.
# ### Matrix Multiplication
# #### For this purpose, we can use the dot product. The dot product of two vectors x =(x1,....xn) and y= (x1.... yn) is defined by the number given by the formula x dot y = x1y1 +...+xnyn.
#
# #### Using the previous arrays, we can calculate the matrix multiplication:
np.dot(A, B)
# ### Definition of the dot Product
# #### The dot product is defined like this:
#
# #### dot(a, b, out=None)
#
# #### For 2-D arrays the dot product is equivalent to matrix multiplication. For 1-D arrays it is the same as the inner product of vectors (without complex conjugation). For N dimensions it is a sum product over the last axis of 'a' and the second-to-last of 'b'::
| InverseProblems/Primer1_vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <marquee scrollamount="10"
# direction="left"
# behavior="alternate">
# <IMG align="left" src="https://cdn1.iconfinder.com/data/icons/virus-3/512/virus-bacteria-microorganism-12-512.png" width="100"/>
# </marquee>
# <center>
# <font size="5" color="blue">Data Analysis on Covid19 dataset in India</font>
# </center>
# From World Health Organization on 31 December 2019 alerted to several cases of pneumonia in Wuhan City, Hubei Province of China. The virus did not match any other known virus. This raised concern because when a virus is new, we do not know how it affects people.
#
# 2019 Novel Coronavirus (2019-nCoV) is a virus (more specifically, a coronavirus) identified as the cause of an outbreak of respiratory illness first detected in Wuhan, China. Early on, many of the patients in the outbreak in Wuhan, China reportedly had some link to a large seafood and animal market, suggesting animal-to-person spread. However, a growing number of patients reportedly have not had exposure to animal markets, indicating person-to-person spread is occurring. At this time, it’s unclear how easily or sustainably this virus is spreading between people.
#
# Following analysis is for India using the data available in public domain. [Click here](https://www.covid19india.org) to get the interactive dashboard on Covid19 in India. There are four APIs giving all the data necessary for doing analysis.
# * https://api.covid19india.org/data.json
# * https://api.covid19india.org/state_district_wise.json
# * https://api.covid19india.org/travel_history.json
# * https://api.covid19india.org/raw_data.json
# **API** is the acronym for *Application Programming Interface*. It is a tool that helps in the interaction between computer programs and web services.
#
# **JSON** stands for *JavaScript Object Notation*. It is lightweight format for storing and transporting data and often used when data is sent from a server to a web page.
# %reset -f
import warnings
warnings.filterwarnings("ignore")# To ignore the warnings
# +
# %%writefile corona.py
import pandas as pd # Library to read and write the data in structure format
import numpy as np # Library to deal with vector, array and matrices
import requests # Library to read APIs
import re # Library for regular expression
import json # Library to read and write JSON file
from bs4 import BeautifulSoup # Library for web scraping
####################################### APIs to be scrapped to getting real time Corna data ############################
moh_link = "https://www.mohfw.gov.in/"
url_state = "https://api.covid19india.org/state_district_wise.json"
data_data = "https://api.covid19india.org/data.json"
travel_history="https://api.covid19india.org/travel_history.json"
raw_data="https://api.covid19india.org/raw_data.json"
class COVID19India(object):
def __init__(self):
self.moh_url = moh_link # Ministry of Health and Family welfare website
self.url_state = url_state # districtwise data
self.data_url = data_data # All India data ==> Statewise data, test data, timeseries data etc
self.travel_history_url=travel_history # Travel history of Patient
self.raw_data_url=raw_data
def __request(self, url):
content=requests.get(url).json()
return content
def moh_data(self):
url = self.moh_url
df = pd.read_html(url)[-1].iloc[:-1]
del df['S. No.']
cols = df.columns.values.tolist()
return df
def statewise(self):
data=self.__request(self.data_url)
# delta=pd.DataFrame(data.get('key_values'))
statewise=pd.DataFrame(data.get('statewise'))
# statewise=pd.concat([pd.DataFrame(data.get('statewise')),pd.DataFrame([i.get('delta') for i in data.get('statewise')])],axis=1)
# del statewise["delta"]
cases_time_series=pd.DataFrame(data.get('cases_time_series'))
tested=pd.DataFrame(data.get('tested'))
return(statewise,cases_time_series,tested)
def state_district_data(self):
state_data = self.__request(self.url_state)
key1 = state_data.keys()
Values = []
for k in key1:
key2 = state_data[k]['districtData'].keys()
for k2 in key2:
c = list(state_data[k]['districtData'][k2].values())
v = [k, k2, c[0]]
Values.append(v)
state_data = pd.DataFrame(Values,columns=['State_UT', 'District', 'Confirmed'])
return state_data
def travel_history(self):
history_data = self.__request(self.travel_history_url)
travel=pd.DataFrame(history_data.get("travel_history"))
return(travel)
def raw_data_info(self):
raw = self.__request(self.raw_data_url)
data=pd.DataFrame(raw.get("raw_data"))
return(data)
# -
# To delete the memory in notebook to utilize the corona library
# %reset -f
import warnings
warnings.filterwarnings("ignore")# To ignore the warnings
import matplotlib.pyplot as plt# Python library for plotting graphs
plot_size = plt.rcParams["figure.figsize"]
plot_size[0] = 12
plot_size[1] = 3
plt.rcParams["figure.figsize"]
# %matplotlib notebook
from corona import COVID19India
coviddata=COVID19India()
# # [Ministry of Health and Family welfare](https://www.mohfw.gov.in/) data analysis
# importing the libraries
from bs4 import BeautifulSoup
import requests
import pandas as pd
url="https://www.mohfw.gov.in/"
html_content = requests.get(url).text
# Parse the html content
soup = BeautifulSoup(html_content, "lxml")
name = soup.find_all('div', { "class" :"status-update"})
value = soup.find_all('h2')
print([row.get_text() for row in value][0])
mohwf=coviddata.moh_data()[:-2]
mohwf.iloc[:,1:4]=mohwf.iloc[:,1:4].astype(int)
mohwf
print("Ministry of Health and Family welfare report")
mohwf.sum()[1:]
mohwf.iloc[:,1:4].astype(int).describe()
corr = mohwf.iloc[:,1:4].astype(int).corr(method ='spearman')
corr.style.background_gradient(cmap='coolwarm')
# This shows there is strong relationship between the cases on Indian national and the Cured/Discharged/Migrated. So, It can be infer that medical facility is better for Indian citizens than that of Foreigners or Indians migrated from taking medical treatment.
import matplotlib.pyplot as plt
# %matplotlib inline
plot_size = plt.rcParams["figure.figsize"]
plot_size[0] = 15
plot_size[1] = 4
plt.rcParams["figure.figsize"] = plot_size
mohwf.iloc[:,1:].plot(kind='density', subplots=True, layout=(1,4), sharex=True)
plt.show()
# The peaks of a Density Plot help display where values are concentrated over the interval. This graphs infer India in still in still [stage 2](https://www.connectedtoindia.com/the-four-stages-of-covid-19-explained-7280.html) of the Covid19.
#
# `Probability density is the probability per unit on the x-axis`
# **Stage 1**: When cases are only imported from affected countries and therefore only those who have travelled abroad test positive. At this stage there is no spread of the disease locally.
#
# **Stage 2**: When there is local transmission from infected persons. This will usually be relatives or acquaintances of those who travelled abroad who test positive after close contact with the infected person.
#
# At this stage, fewer people are affected, the source of the virus is known and is therefore easier to perform contact tracing and contain the spread via self-quarantining. Countries like India are currently in Stage 2.
#
# **Stage 3**: This is the stage of community transmission. In this stage those who have not been exposed to an infected person or anyone who has a travel history to affected countries, still test positive. In other words, people are unable to identify where they might have picked up the virus from. Countries like Singapore, Australia are currently in Stage 3.
#
# **Stage 4**: This is the worst stage of the infection where it takes on the form of an epidemic. Massive numbers are infected and it is very difficult to control and contain the spread. This is what China dealt with. USA, Italy and Spain is under this stage.
fig, axs = plt.subplots(3,1,figsize=(15,10))
mohwf.sort_values([list(mohwf.columns)[1]], ascending=[False]).plot(x=list(mohwf.columns)[0], y=list(mohwf.columns)[1], kind = 'bar',color='k',ax=axs[0])
axs[0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
ax0=mohwf.sort_values([list(mohwf.columns)[2]], ascending=[False]).plot(x=list(mohwf.columns)[0], y=list(mohwf.columns)[2], kind = 'bar',color='g',ax=axs[1])
axs[1].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
ax0=mohwf.sort_values([list(mohwf.columns)[3]], ascending=[False]).plot(x=list(mohwf.columns)[0], y=list(mohwf.columns)[3], kind = 'bar',color='r',ax=axs[2])
axs[2].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
plt.tight_layout()
# # [covid19india](https://www.covid19india.org) data analysis
statewise,cases_time_series,tested=coviddata.statewise()
statewise.iloc[0,:]
statewise.iloc[1:,:]
statewise[["active","confirmed","deaths","recovered"]]=statewise[["active","confirmed","deaths","recovered"]].astype(int)
from pandas.plotting import scatter_matrix
scatter_matrix(statewise)
plt.show()
corr = statewise.corr(method ='spearman')
corr.style.background_gradient(cmap='coolwarm')
# This shows recovery rate is very low in case of India as active and confirmed cases are highly coorelated.
fig, axs = plt.subplots(2,2,figsize=(15,10))
statewise.sort_values(['confirmed'], ascending=[False]).plot(x='state', y='confirmed', kind = 'bar',color='k',ax=axs[0,0])
axs[0,0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
statewise.sort_values(['active'], ascending=[False]).plot(x='state', y='active', kind = 'bar',color='b',ax=axs[0,1])
axs[0,1].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
ax0=statewise.sort_values(['recovered'], ascending=[False]).plot(x='state', y='recovered', kind = 'bar',color='g',ax=axs[1,0])
axs[1,0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
ax0=statewise.sort_values(['deaths'], ascending=[False]).plot(x='state', y='deaths', kind = 'bar',color='r',ax=axs[1,1])
axs[1,1].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.)
plt.tight_layout()
cases_time_series[["dailyconfirmed","dailydeceased","dailyrecovered","totalconfirmed","totaldeceased","totalrecovered"]]=cases_time_series[["dailyconfirmed","dailydeceased","dailyrecovered","totalconfirmed","totaldeceased","totalrecovered"]].astype(int)
cases_time_series
fig, axs = plt.subplots(3,1,figsize=(15,10))
cases_time_series.plot(x='date', y='dailyconfirmed',kind = 'bar',color='k',ax=axs[0])
cases_time_series.plot(x='date', y='dailydeceased',kind = 'bar',color='r',ax=axs[1])
cases_time_series.plot(x='date', y='dailyrecovered',kind = 'bar',color='g',ax=axs[2])
plt.tight_layout()
fig, axs = plt.subplots(3,1,figsize=(15,10))
cases_time_series.plot(x='date', y='totalconfirmed',color='k',ax=axs[0])
cases_time_series.plot(x='date', y='totaldeceased',color='r',ax=axs[1])
cases_time_series.plot(x='date', y='totalrecovered',color='g',ax=axs[2])
cases_time_series[["dailyconfirmed","dailydeceased","dailyrecovered","totalconfirmed","totaldeceased","totalrecovered"]].plot(kind='density', subplots=True, layout=(3,2), sharex=True)
plt.tight_layout()
corr = cases_time_series[["dailyconfirmed","dailydeceased","dailyrecovered"]].corr(method ='spearman')
corr.style.background_gradient(cmap='coolwarm')
# A **Pearson correlation** is a measure of a linear association between 2 normally distributed random variables. A **Spearman rank correlation** describes the monotonic relationship between 2 variables.
tested
tested['totalindividualstested'].replace('', 0, inplace=True)
tested['totalpositivecases'].replace('', 0, inplace=True)
tested['totalsamplestested'].replace('', 0, inplace=True)
tested['testsconductedbyprivatelabs'].replace('', 0, inplace=True)
tested['positivecasesfromsamplesreported'].replace('', 0, inplace=True)
tested['samplereportedtoday'].replace('', 0, inplace=True)
tested['samplereportedtoday'] =tested['samplereportedtoday'].replace(',', '', regex=True)
tested['totalpositivecases'] =tested['totalpositivecases'].replace(',', '', regex=True)
tested
import pandas as pd
# tested["updatetimestamp"]=tested["updatetimestamp"].apply(pd.to_datetime)
tested[['positivecasesfromsamplesreported','samplereportedtoday','testsconductedbyprivatelabs','totalindividualstested','totalpositivecases','totalsamplestested']]=tested[['positivecasesfromsamplesreported','samplereportedtoday','testsconductedbyprivatelabs','totalindividualstested','totalpositivecases','totalsamplestested']].astype(int)
fig, axs = plt.subplots(6,1,figsize=(15,20))
tested.plot(kind='bar',x='updatetimestamp', y='totalindividualstested',color='k',ax=axs[0])
tested.plot(kind='bar',x='updatetimestamp', y='totalpositivecases',color='r',ax=axs[1])
tested.plot(kind='bar',x='updatetimestamp', y='totalsamplestested',color='b',ax=axs[2])
tested.plot(kind='bar',x='updatetimestamp', y='positivecasesfromsamplesreported',color='k',ax=axs[3])
tested.plot(kind='bar',x='updatetimestamp', y='samplereportedtoday',color='k',ax=axs[4])
tested.plot(kind='bar',x='updatetimestamp', y='testsconductedbyprivatelabs',color='k',ax=axs[5])
plt.tight_layout()
data=coviddata.raw_data_info()
data
data.query("agebracket != '' & agebracket != '28-35' & agebracket != '1.5' & agebracket != '0.4' & agebracket != '0.3'").agebracket.astype(int).plot(kind='density', subplots=True, layout=(3,2), sharex=True,color="k")
plt.tight_layout()
# Most of the infected peoples are having the age group 27-53
data.query("agebracket != '' & agebracket != '28-35' & agebracket != '1.5' & agebracket != '0.4' & agebracket != '0.3'").agebracket.astype(int).describe()
data1=data.query("agebracket != '' & agebracket != '28-35' & agebracket != '1.5' & agebracket != '0.4' & agebracket != '0.3'")
data1.agebracket=data1.agebracket.astype(int)
print("Statistics for age group 27-53")
data1.query('agebracket >=25 and agebracket <=53').currentstatus.value_counts()
print("Statistics for age less than 27")
data1.query('agebracket <25').currentstatus.value_counts()
print("Statistics for age more than 53")
data1.query('agebracket >=53').currentstatus.value_counts()
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
stopwords =list(STOPWORDS)
stopwords.extend(["_d180g","Object","Name","NaN","dtype","Length","backupnotes","contact","history"])
def show_wordcloud(data, title = None):
wordcloud = WordCloud(background_color='white',stopwords=stopwords,max_words=200,max_font_size=40, relative_scaling = 1,random_state=1).generate(str(data))
fig = plt.figure(1, figsize=(10,10))
plt.axis('off')
if title:
fig.suptitle(title, fontsize=15)
fig.subplots_adjust(top=1.25)
plt.imshow(wordcloud)
plt.show()
show_wordcloud(data['backupnotes'],title = "Wordcloud of travel history")
data
# Most of the visitors are from Austria, Singapore, Dubai and Wuhan(China)
# +
# from TM import Topic_modeling
# tm=Topic_modeling(data,"backupnotes")# Put your DataFrame as first argument and columns name whose topic modelling you want to do as second argument
# tm.modeling()
# -
# <font color="green">Text mining results of object dtypes columns</font>
# * <a href="https://ashishcssom.github.io/Covid19-data-analysis-on-Indian-dataset/notes.html">notes</a>
# * <a href="https://ashishcssom.github.io/Covid19-data-analysis-on-Indian-dataset/backupnotes.html">backupnotes</a>
data2=data.query('gender != ""')
fig, axs = plt.subplots(2,2,figsize=(15,10))
data2.groupby(['nationality',"gender"]).size().unstack().plot(kind='bar', stacked=True,title="Gender ratio nation wise",ax=axs[0,0])
data2.groupby(['detectedstate',"gender"]).size().unstack().plot(kind='bar', stacked=True,title="Gender ratio detected state wise",ax=axs[0,1])
data2.groupby(['currentstatus',"gender"]).size().unstack().plot(kind='barh', stacked=True,title="Current status",ax=axs[1,0])
data2.groupby(['detectedstate','currentstatus']).size().unstack().plot(kind='bar', stacked=True,title="Current status state wise",ax=axs[1,1])
plt.tight_layout()
data2.gender.describe()
# Males are more infected to this virus. Reason could be `less immunity`, `more travel` or `respiratory illness` due to `smoking habit`
coviddata.travel_history()
coviddata.state_district_data()
# ## Conclusion
#
# In this analysis the data is retrive from two Ministry of Health and Family Welfare and covid19india website. After the analysis derived inferences are:
# * Outbreak is exotic and transferred to India from people to people and from the people having travel history of Wuhan, Italy, Austria, Dubai, and Singapore
# * Most of the cases are observed in a densely populated area
# * Outbreak is in Stage 2. So, lockdown is some of the measures that can be taken care of
# * Rate of screening should increase at a higher temporal and spatial resolution
# * Males are more infected to this virus
# * Old people are less immune to this disease. that's why more death happened in old age group [>55]
# ***Several more conclusions can be drawn from this analysis***
| Covid19 data analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import numpy as np
import matplotlib.pyplot as plt
from simulation import *
spec_names=('gap', 'shifted_gap', 'log10_gap', 'log10_shifted_gap', 'ratio')
dim_loc=3
L=5
n_dis=100
simdict={'dim_loc': dim_loc, 'L': L, 'n_dis': n_dis}
phi=np.pi/6
time_set= np.power(2, np.arange(40))
idata={'JZZ': 1.0, 'hZ': 0.3, 'hX': 0.1, 'alphas': np.array([np.exp(1j*phi), np.exp(-1j*phi)]),\
'betas': np.ones(dim_loc-1), 'lambdas': np.ones(dim_loc-1), 'phi': phi}
filename='clock3_'+str(L)+'.txt'
# +
clockH, clockK, clockZ = clock(dim_loc, L)
Z_mean, Z_var, spectral_data, spectral_data_var = simulation(dim_loc, L, n_dis, idata, clockH, clockK, clockZ, time_set)
with open(filename, 'wb') as f:
for key, value in simdict.items():
f.write(('\n# '+key+' '+str(value)).encode('utf-8'))
for key, value in idata.items():
f.write(('\n# '+key+' '+str(value)).encode('utf-8'))
for i in range(5):
f.write(('\n# '+spec_names[i]+' '+str(spectral_data[i])+' '+str(spectral_data_var[i])).encode('utf-8'))
f.write('\n# time\tRe(Z)\Im(Z)\tVar(Re(Z))\tVar(Im(Z))\n'.encode('utf-8'))
np.savetxt(f, np.stack((time_set, np.real(Z_mean), np.imag(Z_mean), np.real(Z_var), np.imag(Z_var)), axis=-1))
# +
import re
lenarr=len(time_set)
dt = np.dtype([('JZZ', np.float64), ('hZ', np.float64), ('hX', np.float64),
('FILENAME', np.unicode_, 32), ('phi', np.float64), ('dim_loc', np.int32),
('n_dis', np.int32), ('L', np.int32), ('time_set', np.float64, (lenarr,)),
('ReZ', np.float64, (lenarr,)), ('ImZ', np.float64, (lenarr,)), ('ReVarZ', np.float64, (lenarr,)),
('ImVarZ', np.float64, (lenarr,))])
def get_data(file):
d = np.zeros([], dtype=dt)
d['FILENAME']=file
with open(file) as f:
for line in f:
try:
m = re.match("# (\S+) ([-+]?\d+.\d+e[+-]?\d+)", line)
d[m.group(1)]=float(m.group(2))
except:
try:
m = re.match("# (\S+) ([-+]?\d+.\d+)", line)
d[m.group(1)]=float(m.group(2))
except:
try:
m = re.match("# (\S+) (\d+)", line)
d[m.group(1)]=int(m.group(2))
except: pass
d['time_set'], d['ReZ'], d['ImZ'], d['ReVarZ'], d['ImVarZ']=np.genfromtxt(file, unpack=True, dtype=np.complex128)
return d
# -
dataset=np.empty(0, dtype=dt)
for L in range(2,6):
d=get_data('clock3_%d.txt' %L)
dataset=np.append(dataset, d)
# +
f, ax = plt.subplots(1,1,figsize=(10,6))
for sim in dataset:
ax.plot(np.log10(sim['time_set']), sim['ReZ'],label= r'$L=$%d' % L)
plt.show()
# -
def get_data(file):
d = np.zeros([], dtype=dt)
d['FILENAME']=file
with open(file) as f:
for line in f:
try:
m = re.match("# (\S+) ([-+]?\d+.\d+e[+-]?\d+)", line)
d[m.group(1)]=float(m.group(2))
except:
try:
m = re.match("# (\S+) ([-+]?\d+.\d+)", line)
d[m.group(1)]=float(m.group(2))
except:
try:
m = re.match("# (\S+) (\d+)", line)
d[m.group(1)]=int(m.group(2))
except: pass
d['time_set'], d['ReZ'], d['ImZ'], Varz=np.genfromtxt(file, unpack=True, dtype=np.complex128)
return d
dataset=np.empty(0, dtype=dt)
for L in range(2,7):
d=get_data('pertkick/Z_02_0%d.dat' %L)
dataset=np.append(dataset, d)
# +
f, ax = plt.subplots(1,1,figsize=(5,3))
for sim in dataset:
ax.plot(np.log10(sim['time_set']), sim['ReZ'],label= r'$L=$%d' % sim['L'])
ax.set_xlabel(r'$\log_{10} (t/T)$')
ax.set_ylabel(r'$|Z(t)|$')
ax.legend()
plt.tight_layout()
#plt.show()
#plt.savefig('pertkick/pertkick2.eps')
# +
f2, ax2 = plt.subplots(1,1,figsize=(7,4))
phivec=np.array([np.pi/24, np.pi/12, np.pi/8, np.pi/6, -np.pi/24, -np.pi/12, -np.pi/8, -np.pi/6])
phinum=2
linesym=[]
for L in range(2,6):
epsilon, gaps, shifted_gaps, log10_gaps, log10_shifted_gaps, ratio,\
var_gaps, var_shifted_gaps, var_log10_gaps, var_log10_shifted_gaps, var_ratio\
= np.loadtxt('pertkick_spectrum/spectrum_07_0%d.dat' %L, unpack=True)
ax2.plot(np.log10(epsilon), log10_shifted_gaps,label= r'$L=$%d' % L, color='C%d' %(L-2),marker='s', linestyle='')
ax2.plot(np.log10(epsilon), log10_gaps, marker='o', color='C%d' %(L-2), linestyle='')
linesym.append(plt.Line2D((0,1),(0,0), color='C%d' %(L-2)))
#Get artists and labels for legend and chose which ones to display
handles, labels = ax2.get_legend_handles_labels()
labels, linesym = zip(*sorted(zip(labels, linesym), key=lambda t: t[0]))
#Create custom artists
symo = plt.Line2D((0,1),(0,0), color='k', marker='o', linestyle='')
syms = plt.Line2D((0,1),(0,0), color='k', marker='s', linestyle='')
#Create legend from custom artist/label lists
#figleg, axleg=plt.subplots(nrows=1, figsize=(2,2.5))
#axleg.axis('off')
#legend=axleg.legend(list(linesym)+[symo, syms], list(labels)+['$\log_{10}\Delta_0$', '$\log_{10}\Delta$'], loc='upper center', shadow=False, numpoints=1)
legend=ax2.legend(list(linesym)+[symo, syms], list(labels)+['$\log_{10}\Delta_0$', '$\log_{10}\Delta$'], loc='lower right', shadow=False, numpoints=1)
ax2.set_xlabel(r'$\log_{10} \epsilon$')
plt.tight_layout()
# -
| 2018/Z3 clock tc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fenotebook
# language: python
# name: fenotebook
# ---
# # Missing value imputation: CategoricalImputer
#
#
# CategoricalImputer performs imputation of categorical variables. It replaces missing values by an arbitrary label "Missing" (default) or any other label entered by the user. Alternatively, it imputes missing data with the most frequent category.
#
# **For this demonstration, we use the Ames House Prices dataset produced by Professor <NAME>:**
#
# <NAME> (2011) Ames, Iowa: Alternative to the Boston Housing
# Data as an End of Semester Regression Project, Journal of Statistics Education, Vol.19, No. 3
#
# http://jse.amstat.org/v19n3/decock.pdf
#
# https://www.tandfonline.com/doi/abs/10.1080/10691898.2011.11889627
#
# The version of the dataset used in this notebook can be obtained from [Kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data)
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from feature_engine.imputation import CategoricalImputer
# -
data = pd.read_csv('houseprice.csv')
data.head()
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(
data.drop(['Id', 'SalePrice'], axis=1), data['SalePrice'], test_size=0.3, random_state=0)
X_train.shape, X_test.shape
# +
# these are categorical variables with missing data
X_train[['Alley', 'MasVnrType']].isnull().mean()
# +
# number of observations per category
X_train['MasVnrType'].value_counts().plot.bar()
# -
# ## Imputation with string missing
# +
imputer = CategoricalImputer(
imputation_method='missing',
variables=['Alley', 'MasVnrType'])
imputer.fit(X_train)
# -
# here we find the string that will be used for the imputation
imputer.fill_value
# +
# same string for all variables
imputer.imputer_dict_
# +
# perform imputation
train_t = imputer.transform(X_train)
test_t = imputer.transform(X_test)
# +
# we can observe in the below plot, the presence of a
# new category, where before there were NA
test_t['MasVnrType'].value_counts().plot.bar()
# -
test_t['Alley'].value_counts().plot.bar()
# ## Frequent Category Imputation
#
# We can also replace missing values with the most frequent category
# +
imputer = CategoricalImputer(
imputation_method='frequent',
variables=['Alley', 'MasVnrType'])
imputer.fit(X_train)
# +
# in this attribute we find the most frequent category
# per variable to impute
imputer.imputer_dict_
# +
train_t = imputer.transform(X_train)
test_t = imputer.transform(X_test)
# let's count the number of observations per category
# in the original train set
X_train['MasVnrType'].value_counts()
# +
# note that we have a few more observations in the
# most frequent category, which for this variable
# is 'None'
train_t['MasVnrType'].value_counts()
# -
# See how the number of observations for None in MasVnrType has increased from 609 to 614, thanks to replacing the NA with this label in the dataset.
# ## Automatically select categorical variables
#
# When no variable list is passed when calling the imputer, all categorical variables will be selected by the imputer
# +
# create an instance to impute all categorical variables
# with the most frequent category
imputer = CategoricalImputer(imputation_method='frequent')
# with fit, the transformer identifies the categorical variables
# in the train set
imputer.fit(X_train)
# here we find a dictionary with the categorical variables
# to imputation value: most frequent category
imputer.imputer_dict_
# +
# with transform we remove missing data
train_t = imputer.transform(X_train)
test_t = imputer.transform(X_test)
# missing values in categorical variables in original train set
X_train[imputer.variables_].isnull().mean()
# -
# no NA after the imputation in the categorical variables
train_t[imputer.variables_].isnull().mean()
# ## Impute with user defined string
#
# The user can also enter a specific string for imputation (instead of the default 'Missing'
# +
imputer = CategoricalImputer(variables='MasVnrType',fill_value="this_is_missing")
# we can fit and transform the train set
train_t = imputer.fit_transform(X_train)
# and then transform the test set
test_t = imputer.transform(X_test)
# -
# the string to be used for the imputation
imputer.fill_value
# +
# after the imputation we see the new category
test_t['MasVnrType'].value_counts().plot.bar()
# +
# which was not present in the original dataset
X_test['MasVnrType'].value_counts().plot.bar()
# -
| imputation/CategoricalImputer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
agents = pd.read_csv("/Users/alexanderkell/Documents/SGI/Projects/11-starter-kits/data/external/muse_data/default/technodata/Agents.csv")
agents
# +
muse_data = {}
muse_data['inputs'] = {'GlobalCommodities': "global_commodities"}
muse_data["technodata"] = {"Agents": "agents"}
muse_data["technodata"]["power"] = {
"ExistingCapacity": "existing_capacity"
}
muse_data["technodata"]["power"]["Technodata"] = "technodata"
muse_data["technodata"]["power"]["CommIn"] = "commin"
muse_data["technodata"]["power"]["CommOut"] = "commout"
muse_data["technodata"]["oil"] = {"Technodata": "technodata"}
muse_data["technodata"]["oil"]["CommIn"] = "commin"
muse_data["technodata"]["oil"]["CommOut"] = "commout"
muse_data["technodata"]["oil"][
"ExistingCapacity"
] = "existingcaapcity"
muse_data
# -
results_data = muse_data
for folder in results_data:
for sector in results_data[folder]:
if type(results_data[folder][sector]) is str:
print("hi")
else:
for csv in results_data[folder][sector]:
# print(csv)
print("Ho")
| notebooks/.ipynb_checkpoints/5.0-ajmk-agents-file-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
movies=pd.read_csv("http://bit.ly/imdbratings")
movies.head()
movies.shape
movies[movies['duration']>=200]
movies.loc[movies.duration>=200,'title']
movies['duration'].sort_values()
| Lecture Assignments/Lec8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="3yFpBwmNz70v"
# # Regresión con Árboles de Decisión
# + [markdown] colab_type="text" id="v8OxSXXSz-OP"
# # Cómo importar las librerías
#
# + colab={} colab_type="code" id="edZX51YLzs59"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] colab_type="text" id="8XfXlqtF0B58"
# # Importar el data set
#
# + colab={} colab_type="code" id="-nnozsHsz_-N"
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# -
dataset.head(10)
# Solo tenemos 10 datos en el dataset.
print(X)
print(y)
# + [markdown] colab_type="text" id="SsVEdPzf4XmV"
# # Dividir el data set en conjunto de entrenamiento y conjunto de testing
#
# + colab={} colab_type="code" id="v9CtwK834bjy"
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
"""
# + [markdown] colab_type="text" id="5AH_uCEz68rb"
# # Escalado de variables
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="oeuAy8LI69vi" outputId="10346439-d6ac-4abd-b5bb-033e9a284716"
"""
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
"""
# + [markdown] colab_type="text" id="cgweTaJ67BOB"
# # Ajustar la regresión con el dataset
#
# [DecisionTreeRegressor()](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)
# + colab={} colab_type="code" id="2AwTWELX7DZQ"
from sklearn.tree import DecisionTreeRegressor
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="9xFOzJL77Fm4" outputId="4a6494d7-5a6e-4eed-e3f4-efa27b8317f8"
regression = DecisionTreeRegressor(random_state = 0)
regression.fit(X, y)
# -
# No pusimos ninguna cota para el número de hojas ni para la cantidad de datos que tiene que contener cada hoja.get_n_leaves()
regression.fit(X, y).get_n_leaves()
# Se aprecia que el número de hojas o niveles es igual al número de datos que contiene el dataset.
# + [markdown] colab_type="text" id="je3kcRlG7JV5"
# # Predicción de nuestros modelos
# + colab={} colab_type="code" id="HS-M9s587Kj3"
y_pred = regression.predict([[6.5]])
print(y_pred)
# + [markdown] colab_type="text" id="JnMLSqzW8NH7"
# # Visualización de los resultados del Modelo Polinómico
#
# + colab={} colab_type="code" id="1qZ3wRR08Oar"
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color = "red")
plt.plot(X, regression.predict(X), color = "blue")
plt.title("Modelo de Regresión")
plt.xlabel("Posición del empleado")
plt.ylabel("Sueldo (en $)")
plt.show()
# -
# Vemos que el modelo pasa por todos los puntos, pero en realidad, no vemos las fronteras de las hojas.
#
# El problema es qué ocurre cuando queremos predecir el salario de un empleado con un nivel mayor a 10.5, por ejemplo 11; habrá problemas.
plt.scatter(X, y, color = "red")
plt.plot(X_grid, regression.predict(X_grid), color = "blue")
plt.title("Modelo de Regresión")
plt.xlabel("Posición del empleado")
plt.ylabel("Sueldo (en $)")
plt.show()
# Intuitivamente, vemos que las frinteras son los puntos medios entre los puntos del dataset.
#
# [plot.tree()](https://scikit-learn.org/stable/modules/generated/sklearn.tree.plot_tree.html)
from sklearn import tree
clf = regression.fit(X, y)
text_representation = tree.export_text(clf)
print(text_representation)
fig = plt.figure(figsize=(25,20))
fig = tree.plot_tree(clf,
filled=True)
| datasets/Part 2 - Regression/Section 8 - Decision Tree Regression/decission_tree_regression_new_version.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# pip install Keras
# +
# pip install tensorflow
# +
# Import libraries
import pandas as pd
import librosa
import numpy as np
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
import os
from tqdm import tqdm
import IPython.display as ipd # To play sound in the notebook
import random
import json
# ignore warnings
import warnings
warnings.simplefilter('ignore')
# -
# Keras
import keras
from keras import regularizers
from keras.preprocessing import sequence
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model, model_from_json
from keras.layers import Dense, Embedding, LSTM
from keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from keras.utils import np_utils, to_categorical
from keras.callbacks import ModelCheckpoint
from keras.callbacks import History, ReduceLROnPlateau, CSVLogger
# sklearn
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv("Data_path.csv")
print(df.shape, '\n')
print(df.labels.value_counts(), '\n')
df.head()
plt.figure(figsize=(8, 8))
sns.countplot('labels', data=df)
plt.title('Balanced Labels')
plt.show()
# ## Undersample the data
# +
# Shuffle the Dataset
shuffled_df = df.sample(frac=1,random_state=4)
# Put all the positive labels in a separate datasets
positive_df = shuffled_df.loc[shuffled_df['labels'] == 'positive']
neutral_df = shuffled_df.loc[shuffled_df['labels'] == 'neutral']
# Randomly select 2575 observations from the negative label (majority class)
negative_df = shuffled_df.loc[shuffled_df['labels'] == 'negative'].sample(n=2575,random_state=42)
# Concatenate all three dataframes again
normalized_df = pd.concat([negative_df, positive_df, neutral_df])
#plot the dataset after the undersampling
plt.figure(figsize=(8, 8))
sns.countplot('labels', data=normalized_df)
plt.title('Balanced Labels')
plt.show()
# -
print(normalized_df.shape)
#normalized_df.to_csv("NormData_path.csv",index=False)
# ## Getting the features of audio files using librosa
# +
df = pd.read_csv("NormData_path.csv")
data = pd.DataFrame(columns=['feature'])
input_duration = 4
for i in tqdm(range(len(df))):
X, sample_rate = librosa.load(df.path[i],
res_type='kaiser_fast',
duration=input_duration,
sr=22050*2,
offset=0.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13), axis=0)
feature = mfccs
data.loc[i] = [feature]
# -
data.head()
df_new = pd.DataFrame(data['feature'].values.tolist())
labels = df.labels
df_new.head()
newdf = pd.concat([df_new,labels], axis=1)
rnewdf = newdf.rename(index=str, columns={"0": "label"})
len(rnewdf)
rnewdf.head(10)
rnewdf.isnull().sum().sum()
rnewdf = rnewdf.fillna(0)
print(rnewdf.shape)
rnewdf.head()
# ## Data Augmentation
# +
def plot_time_series(data):
"""
Plot the Audio Frequency.
"""
fig = plt.figure(figsize=(14, 8))
plt.title('Raw wave ')
plt.ylabel('Amplitude')
plt.plot(np.linspace(0, 1, len(data)), data)
plt.show()
def noise(data):
"""
Adding White Noise.
"""
# you can take any distribution from https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html
noise_amp = 0.005*np.random.uniform()*np.amax(data)
data = data.astype('float64') + noise_amp * np.random.normal(size=data.shape[0])
return data
def shift(data):
"""
Random Shifting.
"""
s_range = int(np.random.uniform(low=-5, high = 5)*500)
return np.roll(data, s_range)
def stretch(data, rate=0.8):
"""
Streching the Sound.
"""
data = librosa.effects.time_stretch(data, rate)
return data
def pitch(data, sample_rate):
"""
Pitch Tuning.
"""
bins_per_octave = 12
pitch_pm = 2
pitch_change = pitch_pm * 2*(np.random.uniform())
data = librosa.effects.pitch_shift(data.astype('float64'),
sample_rate, n_steps=pitch_change,
bins_per_octave=bins_per_octave)
return data
def dyn_change(data):
"""
Random Value Change.
"""
dyn_change = np.random.uniform(low=1.5,high=3)
return (data * dyn_change)
def speedNpitch(data):
"""
Speed and Pitch Tuning.
"""
# you can change low and high here
length_change = np.random.uniform(low=0.8, high = 1)
speed_fac = 1.0 / length_change
tmp = np.interp(np.arange(0,len(data),speed_fac),np.arange(0,len(data)),data)
minlen = min(data.shape[0], tmp.shape[0])
data *= 0
data[0:minlen] = tmp[0:minlen]
return data
# +
# Augmentation Method 1
syn_data1 = pd.DataFrame(columns=['feature', 'labels'])
for i in tqdm(range(len(df))):
X, sample_rate = librosa.load(df.path[i],
res_type='kaiser_fast',
duration=input_duration,
sr=22050*2,
offset=0.5)
if df.labels[i]:
X = noise(X)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13), axis=0)
feature = mfccs
a = random.uniform(0, 1)
syn_data1.loc[i] = [feature, df.labels[i]]
# +
# Augmentation Method 2
syn_data2 = pd.DataFrame(columns=['feature', 'labels'])
for i in tqdm(range(len(df))):
X, sample_rate = librosa.load(df.path[i],
res_type='kaiser_fast',
duration=input_duration,
sr=22050*2,
offset=0.5)
if df.labels[i]:
X = pitch(X, sample_rate)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13), axis=0)
feature = mfccs
a = random.uniform(0, 1)
syn_data2.loc[i] = [feature, df.labels[i]]
# -
len(syn_data1), len(syn_data2)
syn_data1 = syn_data1.reset_index(drop=True)
syn_data2 = syn_data2.reset_index(drop=True)
df2 = pd.DataFrame(syn_data1['feature'].values.tolist())
labels2 = syn_data1.labels
syndf1 = pd.concat([df2,labels2], axis=1)
syndf1 = syndf1.rename(index=str, columns={"0": "label"})
syndf1 = syndf1.fillna(0)
len(syndf1)
syndf1.head()
df2 = pd.DataFrame(syn_data2['feature'].values.tolist())
labels2 = syn_data2.labels
syndf2 = pd.concat([df2,labels2], axis=1)
syndf2 = syndf2.rename(index=str, columns={"0": "label"})
syndf2 = syndf2.fillna(0)
len(syndf2)
syndf2.head()
# Combining the Augmented data with original
combined_df = pd.concat([rnewdf, syndf1, syndf2], ignore_index=True)
combined_df = combined_df.fillna(0)
combined_df.head()
# +
# Stratified Shuffle Split
X = combined_df.drop(['labels'], axis=1)
y = combined_df.labels
xxx = StratifiedShuffleSplit(1, test_size=0.2, random_state=12)
for train_index, test_index in xxx.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# -
y_train.value_counts()
# Checking for NaN values
X_train.isna().sum().sum()
# +
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
lb = LabelEncoder()
y_train = np_utils.to_categorical(lb.fit_transform(y_train))
y_test = np_utils.to_categorical(lb.fit_transform(y_test))
print(X_train.shape)
print(lb.classes_)
# +
# Pickel the lb object for future use
#filename = 'labels'
#outfile = open(filename,'wb')
#pickle.dump(lb,outfile)
#outfile.close()
# -
# ## Changing dimension for CNN model
x_traincnn = np.expand_dims(X_train, axis=2)
x_testcnn = np.expand_dims(X_test, axis=2)
# +
# Set up Keras util functions
from keras import backend as K
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
#def fscore(y_true, y_pred):
# if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
# return 0
#
# p = precision(y_true, y_pred)
# r = recall(y_true, y_pred)
# f_score = 2 * (p * r) / (p + r + K.epsilon())
# return f_score
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
# -
# New model
model = Sequential()
model.add(Conv1D(256, 8, padding='same',input_shape=(X_train.shape[1],1)))
model.add(Activation('relu'))
model.add(Conv1D(256, 8, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(64, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(64, 8, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
# Edit according to target class no.
model.add(Dense(3))
model.add(Activation('softmax'))
opt = keras.optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
# Plotting Model Summary
model.summary()
# Compiling the Model
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# +
# Model Training
lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=20, min_lr=0.000001)
# Please change the model name accordingly.
model_save = ModelCheckpoint('Augmented_Model.h5', save_best_only=True, monitor='val_loss', mode='min')
cnnhistory=model.fit(x_traincnn, y_train, batch_size=16, epochs=500,
validation_data=(x_testcnn, y_test), callbacks=[model_save, lr_reduce])
# +
# Plotting the Train Valid Loss Graph
plt.plot(cnnhistory.history['loss'])
plt.plot(cnnhistory.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# +
# Saving the model.json
import json
model_json = model.to_json()
with open("MLModel.json", "w") as json_file:
json_file.write(model_json)
# +
# loading json and creating model
from keras.models import model_from_json
json_file = open('MLModel.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("Augmented_Model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
score = loaded_model.evaluate(x_testcnn, y_test, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
# +
preds = loaded_model.predict(x_testcnn,
batch_size=16,
verbose=1)
preds=preds.argmax(axis=1)
preds
# -
# predictions
preds = preds.astype(int).flatten()
preds = (lb.inverse_transform((preds)))
preds = pd.DataFrame({'predicted_values': preds})
# Actual labels
test_valid_lb = np.array(df.labels)
lb = LabelEncoder()
test_valid_lb = np_utils.to_categorical(lb.fit_transform(test_valid_lb))
actual=test_valid_lb.argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (lb.inverse_transform((actual)))
actual = pd.DataFrame({'actual_values': actual})
# Lets combined both of them into a single dataframe
finaldf = actual.join(preds)
finaldf[170:180]
finaldf.groupby('predicted_values').count()
| audio_analysis/Notebooks/ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests as req
from bs4 import BeautifulSoup
url = "https://www.flipkart.com/search?q=tshirt&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off"
page = req.get(url)
soup = BeautifulSoup(page.text,"html.parser")
container = soup.find('div',{"class":"_1HmYoV _35HD7C"})
p_rows = container.find_all('div',{"class":"bhgxx2"})
images = container.find_all('img')
images = soup.find_all('img')
images[14]
# +
## Selenium Way
# -
from selenium import webdriver
import time
# + active=""
# browser = webdriver.Chrome(executable_path =r"C:\Users\lodha\Downloads\Software Installers\chromedriver.exe")
# -
browser.get(url)
images = browser.find_elements_by_tag_name('img')
len(images)
import pandas as pd
df = pd.read_csv("flipkart_img_urls.csv")
image_urls = list(df['0'].values)
# +
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
browser = webdriver.Chrome(executable_path =r"C:\Users\lodha\Downloads\Software Installers\chromedriver.exe")
image_urls = []
url_tshirt = "https://www.flipkart.com/search?q=tshirt&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off&page={}"
url_croptop = "https://www.flipkart.com/womens-clothing/pr?sid=2oq%2Cc1r&q=tshirt&p[]=facets.serviceability%5B%5D%3Dtrue&p[]=facets.type%255B%255D%3DCrop%2BTop&otracker=categorytree&page={}"
url_shirt = "https://www.flipkart.com/men/shirts/pr?sid=2oq%2Cs9b%2Cmg4&otracker=nmenu_sub_Men_0_Shirts&sort=popularity&p%5B%5D=facets.serviceability%5B%5D%3Dtrue&page={}"
browser.get("https://www.flipkart.com/men/shirts/pr?sid=2oq%2Cs9b%2Cmg4&otracker=nmenu_sub_Men_0_Shirts&sort=popularity&p%5B%5D=facets.serviceability%5B%5D%3Dtrue&page=2")
# -
for i in range(1,40):
url = url_shirt.format(i)
browser.get(url)
browser.implicitly_wait(60)
images = browser.find_elements_by_tag_name('img')
for img in images:
img_url = img.get_property("src")
if(img_url.find(".svg")==-1 and img_url.find("data:")==-1):
image_urls.append(img_url)
print(len(image_urls),end='\r')
# +
import pandas as pd
name=r"D:\Projects\Fashion.io\Flipkart_Scrapper\crop_top.csv"
df = pd.DataFrame(image_urls)
df.to_csv(name)
# -
# ### Actually Downloading images
# +
import pandas as pd
name=r"D:\Projects\Fashion.io\Flipkart_Scrapper\crop_top.csv"
df = pd.read_csv(name)
# -
print("Number of Image Samples",len(df))
df.drop_duplicates(inplace=True)
print("Number of Unique Image Samples",len(df))
links = df["0"].values
links = [link for link in links if link.find("jpeg")!=-1]
links = [[i,link] for i,link in enumerate(links)]
# +
from multiprocessing.pool import ThreadPool
import shutil
from time import time as timer
import requests
from keras_retinanet.utils.image import read_image_bgr
def download_image(link):
path, url = link
img_path = r"D:\Projects\Fashion.io\Flipkart_Scrapper\crop_top\{}.jpeg".format(path)
r = requests.get(url)
if r.status_code==200:
f = open(img_path,'wb')
f.write(r.content)
f.close()
del r
return img_path
start = timer()
results = ThreadPool(8).imap_unordered(download_image, links)
for path in results:
print(path,end='\r')
print(f"Elapsed Time: {timer() - start}")
# -
| Flipkart Scraper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''hyeon'': virtualenv)'
# metadata:
# interpreter:
# hash: 28792d561f34874995b7b45a59a55237c48166ddd3475ee9b4e7eab648147b06
# name: python3
# ---
import pyforest
all_libs = dir(pyforest)
active_libs = pyforest.active_imports()
laze_libs = pyforest.lazy_imports()
print('==================')
print('All imported libs \n',all_libs)
print('==================')
print('==================')
print('Active libs \n',len(active_libs))
print('==================')
print('Laze libs \n',len(laze_libs))
print('==================')
np_array = np.random.randn(100)
plt.plot(np_array)
pd_dataframe = pd.DataFrame(np_array.reshape(10,10))
sns.heatmap(pd_dataframe)
active_libs = pyforest.active_imports()
laze_libs = pyforest.lazy_imports()
print('==================')
print('Active libs \n',len(active_libs))
print('==================')
print('Laze libs \n',len(laze_libs))
print('==================')
| assets/examples/pyforest_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from operator import mul
from scipy.special import binom
from functools import reduce
from itertools import product
from math import factorial
def num_partitions(k, n):
if k == 0 and n == 0:
return 1
if n <= 0 or k <= 0:
return 0
return num_partitions(k, n-k) + num_partitions(k-1, n-1)
def partitions(n, max_length):
L = [[1]]
for _ in range(n-1):
L2 = []
for l in L:
L2.append(l[:-1] + [l[-1] + 1])
if len(l) < max_length:
L2.append(l + [1])
L = L2
return [l for l in L if len(l) > 1]
def fixed_length_partitions(n, num_partitions, min_value=1):
assert n >= num_partitions
L = [[]]
for i in range(num_partitions - 1):
L2 = []
for l in L:
L2 += [l + [k] for k in range(min_value, n - sum(l) + 1 - (num_partitions - 1 - i))]
L = L2
for l in L:
l.append(n - sum(l))
return L
def ordered_subsets(n, max_length):
L = [[]]
while True:
small_L = [l for l in L if len(l) < max_length]
if len(small_L) == 0:
break
L = []
for l in small_L:
for i in range(l[-1]+1 if len(l) > 0 else 1, n+1):
yield l + [i]
L.append(l + [i])
def count_programs(arities, predicates_with_arity, num_variables, num_constants, max_num_nodes, max_num_clauses):
num_predicates = sum(predicates_with_arity)
def arity(a):
'The arity of predicate indexed at a'
i = 0
while (a + 1 > predicates_with_arity[i]):
a -= predicates_with_arity[i]
i += 1
return arities[i]
def P(n):
t = num_constants**n
for s in ordered_subsets(n, num_variables):
s = [0] + s + [n+1]
t += reduce(mul, [(num_constants + i)**(s[i+1] - s[i] - 1) for i in range(len(s) - 1)], 1)
#print('P(' + str(n) + ') =', t)
return t
def T(n, a):
if n == 1:
return predicates_with_arity[arities.index(a)] if a in arities else 0
s = 0
for partition1 in partitions(n-1, a / min(arities) if min(arities) > 0 else float('inf')):
for partition2 in fixed_length_partitions(a, len(partition1), min(arities)):
s += reduce(mul, [T(k, l) for k, l in zip(partition1, partition2)], 1)
return T(n-1, a) + 2 * s
def C(a):
if a == 0:
return 1
return sum(T(n, a) for n in range(1, max_num_nodes + 1))
s = 0
for n in range(num_predicates, max_num_clauses + 1):
for partition in fixed_length_partitions(n, num_predicates):
m = 1
for i,h in enumerate(partition):
t = 0
for a in range(max(arities) * max_num_nodes + 1):
foo = int(C(a) * P(a + arity(i)))
#print('arity', a, 'gets', foo, 'possibilities')
t += foo
m *= int(binom(t, h))
#print(partition, m)
s += m
return s
# +
arities = [[1], [2], [3], [2, 1], [4], [3, 1]]
r = list(range(1, 5))
predicates_with_arity = {1: r, 2: list(product(r, r)), 3: list(product(r, r, r)), 4: list(product(r, r, r, r))}
num_variables = r
num_constants = range(0, 4)
max_num_nodes = r
MAX = 100000
f = open('../data/program_counts.csv', 'w+')
for arity in arities:
for pred in predicates_with_arity[len(arity)]:
if isinstance(pred, tuple):
pred = list(pred)
elif not isinstance(pred, list):
pred = [pred]
num_pred = sum(pred)
for num_var in num_variables:
for num_const in num_constants:
for max_nodes in max_num_nodes:
for max_clauses in range(num_pred, num_pred + 6):
count = count_programs(arity, pred, num_var, num_const, max_nodes, max_clauses)
if count > MAX:
break
d = [arity, pred, num_var, num_const, max_nodes, max_clauses, count]
s = ';'.join([str(t) for t in d])
f.write(s+'\n')
f.close()
# -
count_programs([1], [1], 1, 1, 1, 2)
num_partitions(3, 2)
list(ordered_subset(3, 2))
| scripts/Counting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2 # computer vision library
import helper
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
# -
# Image data directories
image_dir_training = "../Data/Day-Night-Train/"
image_dir_test = "../Data/Day-Night-Test/"
# Using the load_dataset function in helper.py
# Load training data
IMAGE_LIST = helper.load_dataset(image_dir_training)
# Standardize all training images
STANDARDIZED_LIST = helper.standardize(IMAGE_LIST)
# +
# Display a standardized image and its label
# Select an image by index
image_num = 0
selected_image = STANDARDIZED_LIST[image_num][0]
selected_label = STANDARDIZED_LIST[image_num][1]
# Display image and data about it
plt.imshow(selected_image)
print("Shape: "+str(selected_image.shape))
print("Label [1 = day, 0 = night]: " + str(selected_label))
# -
# Find the average Value or brightness of an image
def avg_brightness(rgb_image):
# Convert image to HSV
hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)
# Add up all the pixel values in the V channel
sum_brightness = np.sum(hsv[:,:,2])
area = 600*1100.0 # pixels
# find the avg
avg = sum_brightness/area
return avg
# +
# Testing average brightness levels
# Look at a number of different day and night images and think about
# what average brightness value separates the two types of images
# As an example, a "night" image is loaded in and its avg brightness is displayed
image_num = 190
test_im = STANDARDIZED_LIST[image_num][0]
avg = avg_brightness(test_im)
print('Avg brightness: ' + str(avg))
plt.imshow(test_im)
# -
# ## Testing the classifier
#
# Here is where we test your classification algorithm using our test set of data that we set aside at the beginning of the notebook!
#
# Since we are using a pretty simple brightess feature, we may not expect this classifier to be 100% accurate. We'll aim for around 75-85% accuracy usin this one feature.
#
#
# ### Test dataset
#
# Below, we load in the test dataset, standardize it using the `standardize` function you defined above, and then **shuffle** it; this ensures that order will not play a role in testing accuracy.
# +
import random
# Using the load_dataset function in helpers.py
# Load test data
TEST_IMAGE_LIST = helper.load_dataset(image_dir_test)
# Standardize the test data
STANDARDIZED_TEST_LIST = helper.standardize(TEST_IMAGE_LIST)
# Shuffle the standardized test data
random.shuffle(STANDARDIZED_TEST_LIST)
# -
# ## Determine the Accuracy
#
# Compare the output of your classification algorithm (a.k.a. your "model") with the true labels and determine the accuracy.
#
# This code stores all the misclassified images, their predicted labels, and their true labels, in a list called `misclassified`.
# Constructs a list of misclassified images given a list of test images and their labels
def get_misclassified_images(test_images):
# Track misclassified images by placing them into a list
misclassified_images_labels = []
# Iterate through all the test images
# Classify each image and compare to the true label
for image in test_images:
# Get true data
im = image[0]
true_label = image[1]
# Get predicted label from your classifier
predicted_label = estimate_label(im)
# Compare true and predicted labels
if(predicted_label != true_label):
# If these labels are not equal, the image has been misclassified
misclassified_images_labels.append((im, predicted_label, true_label))
# Return the list of misclassified [image, predicted_label, true_label] values
return misclassified_images_labels
# +
# Find all misclassified images in a given test set
MISCLASSIFIED = get_misclassified_images(STANDARDIZED_TEST_LIST)
# Accuracy calculations
total = len(STANDARDIZED_TEST_LIST)
num_correct = total - len(MISCLASSIFIED)
accuracy = num_correct/total
print('Accuracy: ' + str(accuracy))
print("Number of misclassified images = " + str(len(MISCLASSIFIED)) +' out of '+ str(total))
# -
# ---
# <a id='task9'></a>
# ### TO-DO: Visualize the misclassified images
#
# Visualize some of the images you classified wrong (in the `MISCLASSIFIED` list) and note any qualities that make them difficult to classify. This will help you identify any weaknesses in your classification algorithm.
# +
# Visualize misclassified example(s)
num = 0
test_mis_im = MISCLASSIFIED[num][0]
## TODO: Display an image in the `MISCLASSIFIED` list
## TODO: Print out its predicted label -
## to see what the image *was* incorrectly classified as
# -
# ---
# <a id='question2'></a>
# ## (Question): After visualizing these misclassifications, what weaknesses do you think your classification algorithm has?
# **Answer:** Write your answer, here.
# # 5. Improve your algorithm!
#
# * (Optional) Tweak your threshold so that accuracy is better.
# * (Optional) Add another feature that tackles a weakness you identified!
# ---
#
| Image-Representation-and-Classification/Day-Night-Classifier-Part-5.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # Getting started
#
# Topics:
# 1. How to print
# 2. How to assign variables
# 3. How to comment
# 4. Syntax for basic math
# ## How to print
#
# In Julia we usually use `println()` to print
println("I'm excited to learn Julia!")
# ## How to assign variables
#
# All we need is a variable name, value, and an equal's sign!<br>
# Julia will figure out types for us.
my_answer = 42
typeof(my_answer)
my_pi = 3.14159
typeof(my_pi)
😺 = "smiley cat!"
typeof(😺)
# To type a smiley cat, use tab completion to select the emoji name and then tab again
# +
# \:smi + <tab> --> select with down arrow + <enter> ---> <tab> + <enter> to complete
# -
# After assigning a value to a variable, we can reassign a value of a different type to that variable without any issue.
😺 = 1
typeof(😺)
# Note: Julia allows us to write super generic code, and 😺 is an example of this.
#
# This allows us to write code like
😀 = 0
😞 = -1
😺 + 😞 == 😀
# ## How to comment
# +
# You can leave comments on a single line using the pound/hash key
# +
#=
For multi-line comments,
use the '#= =#' sequence.
=#
# -
# ## Syntax for basic math
sum = 3 + 7
difference = 10 - 3
product = 20 * 5
quotient = 100 / 10
power = 10 ^ 2
modulus = 101 % 2
# ### Exercises
#
# #### 1.1
# Look up docs for the `convert` function.
# #### 1.2
# Assign `365` to a variable named `days`. Convert `days` to a float and assign it to variable `days_float`
days=365
days_float =float(365)
# + deletable=false editable=false hide_input=true nbgrader={"checksum": "a2dc243275e0310c3b29a745b952f321", "grade": true, "grade_id": "cell-715f78016beb0489", "locked": true, "points": 1, "schema_version": 1, "solution": false}
@assert days == 365
@assert days_float == 365.0
# -
# #### 1.3
# See what happens when you execute
#
# ```julia
# convert(Int64, "1")
# ```
# and
#
# ```julia
# parse(Int64, "1")
# ```
convert(Int64,'1')
parse(Int64, "1")
Please click on `Validate` on the top, once you are done with the exercises.
| Julia102 - Getting started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (geodeep)
# language: python
# name: geodeep
# ---
# +
import random
import collections
import math
import os
from scipy.sparse import coo_matrix
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.neighbors import NearestNeighbors
import pickle
import ipywidgets as widgets
from ipywidgets import interact, fixed
# +
def create_df(tumorList, stromaList, TILList1, TILList2, NK, MP,
numtumor=500, numstroma=500, numTIL1=0, numTIL2=0, numNK=0, numMP=0):
df = pd.DataFrame(columns=['x', 'y', 'label'])
pos= []
x = []
y = []
label = []
tumor = random.sample(tumorList, numtumor)
stroma = random.sample(stromaList, numstroma)
TIL1 = random.sample(set(TILList1) - set(tumor) - set(stroma), numTIL1)
TIL2 = random.sample(set(TILList2) - set(tumor) - set(stroma) - set(TIL1), numTIL2)
NK = random.sample(set(NK) - set(tumor) - set(stroma)- set(TIL1) - set(TIL2), numNK)
MP = random.sample(set(MP) - set(tumor) - set(stroma)- set(TIL1) - set(TIL2)-set(NK), numMP)
loop1 = []
loop2 = []
for i,j in zip([tumor, stroma, TIL1, TIL2, NK, MP], ['Tumor', 'Stroma', 'TIL1', 'TIL2', 'NK', 'MP']):
if i:
loop1.append(i)
loop2.append(j)
for l, labelName in zip(loop1, loop2):
pos.extend(l)
for idx, content in enumerate(zip(*l)):
[x, y][idx].extend(content)
label.extend([labelName for i in range(len(content))])
df['x'] = x
df['y'] = y
df['label'] = label
return df, pos
def create_graph(df, pos):
dfXY = df[['x', 'y']].copy()
N = len(dfXY)
nn = NearestNeighbors(radius=60)
nn.fit(dfXY)
dists, ids = nn.radius_neighbors(dfXY)
dists_ = [j for i in dists for j in i]
ids_ = [j for i in ids for j in i]
# generate row indices
rows = [i for i, j in enumerate(ids) for k in j]
# number of edges
M = len(rows)
w = np.ones(M)
# complete matrix according to positions
_W = coo_matrix((w, (rows, ids_)), shape=(N, N))
coo_matrix.setdiag(_W, 0)
_W = 1/2*(_W + _W.T)
# create networkx graph
G = nx.from_scipy_sparse_matrix(_W)
for i in range(len(G.nodes)):
G.nodes[i]['pos'] = pos[i]
G.nodes[i]['cell_types'] = df['label'][i]
return G
def add_data(id_, range_, nums=[1500, 1500, 0, 0, 0, 0], count=1):
TILList1 = [(x+1,y+1) for x in range(range_[0][0], range_[0][1]) for y in range(range_[0][2], range_[0][3])]
TILList2 = [(x+1,y+1) for x in range(range_[1][0], range_[1][1]) for y in range(range_[1][2], range_[1][3])]
NK = [(x+1,y+1) for x in range(range_[2][0], range_[2][1]) for y in range(range_[2][2], range_[2][3])]
MP = [(x+1,y+1) for x in range(range_[3][0], range_[3][1]) for y in range(range_[3][2], range_[3][3])]
for j in range(count):
df, pos = create_df(tumorList, stromaList, TILList1, TILList2, NK, MP, \
numtumor=nums[0], numstroma=nums[1], numTIL1=nums[2], numTIL2=nums[3], \
numNK=nums[4], numMP=nums[5])
G = create_graph(df, pos)
patientDict[id_].append(G)
# -
# # Data creation
# +
# set a fixed random seed for training (123) / val (124) / test (125)
random.seed(123)
patientKeys = [('{:0>4d}'.format(i+1)) for i in range(10)]
patientDict = collections.defaultdict(list)
tumorList = [(x+1,y+1) for x in range(0, 500) for y in range(0, 1000)]
stromaList = [(x+1,y+1) for x in range(500, 1000) for y in range(0, 1000)]
# add similar graphs
for i in patientKeys:
add_data(i, [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] , [0, 0, 0, 0]], \
nums=[500, 500, 0, 0, 0, 0], count=3)
patch1 = [[425, 575, 0, 1000], [425, 575, 0, 1000], [0, 0, 0, 0], [0, 0, 0, 0]]
patch2 = [[0, 500, 0, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
patch3 = [[0, 500, 0, 1000], [0, 500, 0, 1000], [0, 0, 0, 0], [0, 0, 0, 0]]
patch4 = [[0, 500, 0, 1000], [0, 500, 0, 1000], [0, 0, 0, 0], [0, 1000, 0, 1000]]
patch5 = [[0, 500, 0, 1000], [0, 500, 0, 1000], [0, 1000, 0, 1000], [0, 1000, 0, 1000]]
num1, num2, num3, num4, num5 = [400, 400, 100, 100, 0, 0], [300, 300, 400, 0, 0, 0], \
[300, 300, 200, 200, 0, 0], [300, 300, 150, 150, 0, 100], \
[300, 300, 100, 100, 100, 100]
for fold in range(1):
# add discriminative graphs
add_data(patientKeys[10*fold], patch1, num1)
add_data(patientKeys[10*fold], patch2, num2)
add_data(patientKeys[10*fold+1], patch1, num1)
add_data(patientKeys[10*fold+1], patch3, num3)
add_data(patientKeys[10*fold+2], patch1, num1)
add_data(patientKeys[10*fold+2], patch4, num4)
add_data(patientKeys[10*fold+3], patch1, num1)
add_data(patientKeys[10*fold+3], patch5, num5)
add_data(patientKeys[10*fold+4], patch2, num2)
add_data(patientKeys[10*fold+4], patch3, num3)
add_data(patientKeys[10*fold+5], patch2, num2)
add_data(patientKeys[10*fold+5], patch4, num4)
add_data(patientKeys[10*fold+6], patch2, num2)
add_data(patientKeys[10*fold+6], patch5, num5)
add_data(patientKeys[10*fold+7], patch3, num3)
add_data(patientKeys[10*fold+7], patch4, num4)
add_data(patientKeys[10*fold+8], patch3, num3)
add_data(patientKeys[10*fold+8], patch5, num5)
add_data(patientKeys[10*fold+9], patch4, num4)
add_data(patientKeys[10*fold+9], patch5, num5)
# -
# # Visualization
# +
# import pickle
# with open(r'./data/patient_gumbel_test.pickle', 'rb') as handle:
# patientDict = pickle.load(handle)
# +
# Create widgets
id_ = \
widgets.Dropdown(
options = patientDict.keys(),
description='Patient ID: '
)
graphs = widgets.IntSlider(
min=0,
max=len(patientDict[id_.value])-1,
step=1,
description='Graph Index: ',
orientation='horizontal',
continuous_update = False
)
# Update graph options based on patient id
def update_graphs(*args):
graphs.max = len(patientDict[id_.value])-1
# Tie graph options to patient id
id_.observe(update_graphs, 'value')
nodeColorsDict = {'Tumor': 'c', 'Stroma': 'y', 'TIL1': 'r', 'TIL2': 'b', 'NK': 'g', 'MP': 'orange'}
def graph_visualization(id_, graphs):
plt.figure(figsize = (8, 8))
G = patientDict[id_][graphs]
posDict = nx.get_node_attributes(G, 'pos')
for label in nodeColorsDict:
plt.plot([0], [0], color=nodeColorsDict[label], label=label)
nodeColorList = [nodeColorsDict[i] for i in list(nx.get_node_attributes(G, 'cell_types').values())]
nx.draw_networkx(G, pos=posDict, with_labels=False, node_size=10, node_color=nodeColorList)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
_ = interact(graph_visualization, id_=id_, graphs=graphs)
# -
# # Save data
# +
# choose one out of three
# if not os.path.exists(r'./data/patient_gumbel_train.pickle'):
# with open(r'./data/patient_gumbel_train.pickle', 'wb') as handle:
# pickle.dump(patientDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# if not os.path.exists(r'./data/patient_gumbel_val.pickle'):
# with open(r'./data/patient_gumbel_val.pickle', 'wb') as handle:
# pickle.dump(patientDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# if not os.path.exists(r'./data/patient_gumbel_test.pickle'):
# with open(r'./data/patient_gumbel_test.pickle', 'wb') as handle:
# pickle.dump(patientDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
| Data-Generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Задача 1
# * Копирование файла, в текущей папке есть изображение img1.jpg, нужно скопировать это изображение и назвать img2.jpg
#
# Режимы открытия файла, можно прочитать здесь: https://www.w3schools.com/python/ref_func_open.asp
# +
#better use shutil.copy()
with open('img1.jpg', 'br') as fi:
with open('img2.jpg', 'bw') as fo:
fo.write(fi.read())
with open('test.png', 'br') as fi:
with open('test-processed.png', 'bw') as fo:
byte = fi.read(1)
while byte != b'':
fo.write(byte)
byte = fi.read(1)
'''for i in range(8):
fo.write(byte)
byte = fi.read(1)
while byte != b'':
t = int.from_bytes(byte, byteorder='big', signed=True)
t = t & int('00111111', 2)
byte = t.to_bytes(1, byteorder='big', signed=True)
fo.write(byte)
byte = fi.read(1)'''
'''i = int.from_bytes(b'\x00\x0F', byteorder='big', signed=True)
print(i)
single_byte = i.to_bytes(1, byteorder='big', signed=True)
print(single_byte)
one_byte = int('11110000', 2)
print(one_byte)'''
# -
# # Задача 2
# * В текущей директории лежит файл addresses.csv, прочитайте этот файл, выведите информацию о том сколько в нем записей(строк) и сколько признаков(колонок), добавьте каждой записи новую колонку country и запишите информацию в новый файл addresses2.csv
#
# Информация о writer - https://docs.python.org/3/library/csv.html
# +
import csv
import random
data = []
with open('addresses.csv', 'r') as fin:
csv_reader = csv.reader(fin, delimiter=',')
for row in csv_reader:
data.append(row)
print('Rows: ', len(data), ', columns: ', len(data[0]), sep='')
print(data)
countries = ['USA', 'Brazil', 'Germany', 'France']
with open('addresses2.csv', 'w') as fout:
csv_writer = csv.writer(fout, delimiter=',', lineterminator='\n')
for i in range(len(data)):
num = random.randint(0, len(countries)-1)
csv_writer.writerow(data[i] + [countries[num]])
data = []
with open('addresses2.csv', 'r') as fin:
csv_reader = csv.reader(fin, delimiter=',')
for row in csv_reader:
data.append(row)
print('Rows: ', len(data), ', columns: ', len(data[0]), sep='')
print(data)
| module_001_python/lesson_005_os_files/student_tasks/HomeWork.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing and Analyzing Jigsaw
import pandas as pd
import re
import numpy as np
# In the previous section, we explored how to generate topics from a textual dataset using LDA. But how can this be used as an application?
#
# Therefore, in this section, we will look into the possible ways to read the topics as well as understand how it can be used.
# We will now import the preloaded data of the LDA result that was achieved in the previous section.
df = pd.read_csv("https://raw.githubusercontent.com/dudaspm/LDA_Bias_Data/main/topics.csv")
df.head()
# We will visualize these results to understand what major themes are present in them.
# + tags=["hide-input"] language="html"
#
# <iframe src='https://flo.uri.sh/story/941631/embed' title='Interactive or visual content' class='flourish-embed-iframe' frameborder='0' scrolling='no' style='width:100%;height:600px;' sandbox='allow-same-origin allow-forms allow-scripts allow-downloads allow-popups allow-popups-to-escape-sandbox allow-top-navigation-by-user-activation'></iframe><div style='width:100%!;margin-top:4px!important;text-align:right!important;'><a class='flourish-credit' href='https://public.flourish.studio/story/941631/?utm_source=embed&utm_campaign=story/941631' target='_top' style='text-decoration:none!important'><img alt='Made with Flourish' src='https://public.flourish.studio/resources/made_with_flourish.svg' style='width:105px!important;height:16px!important;border:none!important;margin:0!important;'> </a></div>
# -
# ### An Overview of the analysis
# From the above visualization, an anomaly that we come across is that the dataset we are examining is supposed to be related to people with physical, mental, and learning disabilities. But unfortunately, based on the topics that were extracted, we notice just a small subset of words that are related to this topic.
# Topic 2 has words that address themes related to what we were expecting the dataset to have. But the major theme that was noticed in the Top 5 topics are main terms that are political.
# (The Top 10 topics show themes related to Religion as well, which is quite interesting.)
# LDA hence helped in understanding what the conversations the dataset consisted.
# From the word collection, we also notice that there were certain words such as \'kill' that can be categorized as \'Toxic'\. To analyze this more, we can classify each word because it can be categorized wi by an NLP classifier.
# To demonstrate an example of a toxic analysis framework, the below code shows the working of the Unitary library in python. {cite}`Detoxify`
#
# This library provides a toxicity score (from a scale of 0 to 1) for the sentence that is passed through it.
# + tags=["remove-input"]
headers = {"Authorization": f"Bearer api_ZtUEFtMRVhSLdyTNrRAmpxXgMAxZJpKLQb"}
# -
# To get access to this software, you will need to get an API KEY at https://huggingface.co/unitary/toxic-bert
# Here is an example of what this would look like.
# ```python
# headers = {"Authorization": f"Bearer api_XXXXXXXXXXXXXXXXXXXXXXXXXXX"}
# ```
# +
import requests
API_URL = "https://api-inference.huggingface.co/models/unitary/toxic-bert"
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# -
query({"inputs": "addict"})
# You can input words or sentences in \<insert word here>, in the code, to look at the results that are generated through this.
#
# This example can provide an idea as to how ML can be used for toxicity analysis.
query({"inputs": "<insert word here>"})
# + tags=["hide-input"] language="html"
#
# <iframe src='https://flo.uri.sh/story/941681/embed' title='Interactive or visual content' class='flourish-embed-iframe' frameborder='0' scrolling='no' style='width:100%;height:600px;' sandbox='allow-same-origin allow-forms allow-scripts allow-downloads allow-popups allow-popups-to-escape-sandbox allow-top-navigation-by-user-activation'></iframe><div style='width:100%!;margin-top:4px!important;text-align:right!important;'><a class='flourish-credit' href='https://public.flourish.studio/story/941681/?utm_source=embed&utm_campaign=story/941681' target='_top' style='text-decoration:none!important'><img alt='Made with Flourish' src='https://public.flourish.studio/resources/made_with_flourish.svg' style='width:105px!important;height:16px!important;border:none!important;margin:0!important;'> </a></div>
# -
# #### The Bias
# The visualization shows how contextually toxic words are derived as important words within various topics related to this dataset. These toxic words can lead to any Natural Language Processing kernel learning this dataset to provide skewed analysis for the population in consideration, i.e., people with mental, physical, and learning disabilities. This can lead to very discriminatory classifications.
# ##### An Example
# To illustrate the impact better, we will be taking the most associated words to the word 'mental' from the results. Below is a network graph that shows the commonly associated words. It is seen that words such as 'Kill' and 'Gun' appear with the closest association. This can lead to the machine contextualizing the word 'mental' to be associated with such words.
# + tags=["hide-input"] language="html"
# <iframe src='https://flo.uri.sh/visualisation/6867000/embed' title='Interactive or visual content' class='flourish-embed-iframe' frameborder='0' scrolling='no' style='width:100%;height:600px;' sandbox='allow-same-origin allow-forms allow-scripts allow-downloads allow-popups allow-popups-to-escape-sandbox allow-top-navigation-by-user-activation'></iframe><div style='width:100%!;margin-top:4px!important;text-align:right!important;'><a class='flourish-credit' href='https://public.flourish.studio/visualisation/6867000/?utm_source=embed&utm_campaign=visualisation/6867000' target='_top' style='text-decoration:none!important'><img alt='Made with Flourish' src='https://public.flourish.studio/resources/made_with_flourish.svg' style='width:105px!important;height:16px!important;border:none!important;margin:0!important;'> </a></div>
# -
# It is hence essential to be aware of the dataset that is being used to analyze a specific population. With LDA, we were able to understand that this dataset cannot be used as a good representation of the disabled community. To bring about a movement of unbiased AI, we need to perform such preliminary analysis and more not to cause unintended discrimination.
# ## The Dashboard
#
# Below is the complete data visualization dashboard of the topic analysis. Feel feel to experiment and compare various labels to your liking.
# + tags=["hide-input"] language="html"
#
# <iframe src='https://flo.uri.sh/visualisation/6856937/embed' title='Interactive or visual content' class='flourish-embed-iframe' frameborder='0' scrolling='no' style='width:100%;height:600px;' sandbox='allow-same-origin allow-forms allow-scripts allow-downloads allow-popups allow-popups-to-escape-sandbox allow-top-navigation-by-user-activation'></iframe><div style='width:100%!;margin-top:4px!important;text-align:right!important;'><a class='flourish-credit' href='https://public.flourish.studio/visualisation/6856937/?utm_source=embed&utm_campaign=visualisation/6856937' target='_top' style='text-decoration:none!important'><img alt='Made with Flourish' src='https://public.flourish.studio/resources/made_with_flourish.svg' style='width:105px!important;height:16px!important;border:none!important;margin:0!important;'> </a></div>
# -
# ## Thank you!
#
# We thank you for your time!
| docs/_sources/Visualizing and Analyzing Jigsaw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="tTuYGOlnh117"
# # Tutorial Part 7: Going Deeper On Molecular Featurizations
#
# One of the most important steps of doing machine learning on molecular data is transforming the data into a form amenable to the application of learning algorithms. This process is broadly called "featurization" and involves turning a molecule into a vector or tensor of some sort. There are a number of different ways of doing that, and the choice of featurization is often dependent on the problem at hand. We have already seen two such methods: molecular fingerprints, and `ConvMol` objects for use with graph convolutions. In this tutorial we will look at some of the others.
#
# ## Colab
#
# This tutorial and the rest in this sequence can be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
#
# [](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/07_Going_Deeper_on_Molecular_Featurizations.ipynb)
#
# ## Setup
#
# To run DeepChem within Colab, you'll need to run the following installation commands. This will take about 5 minutes to run to completion and install your environment. You can of course run this tutorial locally if you prefer. In that case, don't run these cells since they will download and install Anaconda on your local machine.
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="tS3siM3Ch11-" outputId="3a96e0a7-46c1-4baa-91da-f98ca5a33d6d"
# !curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
# !/root/miniconda/bin/conda info -e
# + colab={"base_uri": "https://localhost:8080/", "height": 188} colab_type="code" id="D43MbibL_EK0" outputId="e7b205ae-9962-4089-d49a-6d0ebe4c8430"
# !pip install --pre deepchem
import deepchem
deepchem.__version__
# + [markdown] colab_type="text" id="omxBgQVDh12B"
# ## Featurizers
#
# In DeepChem, a method of featurizing a molecule (or any other sort of input) is defined by a `Featurizer` object. There are three different ways of using featurizers.
#
# 1. When using the MoleculeNet loader functions, you simply pass the name of the featurization method to use. We have seen examples of this in earlier tutorials, such as `featurizer='ECFP'` or `featurizer='GraphConv'`.
#
# 2. You also can create a Featurizer and directly apply it to molecules. For example:
# + colab={} colab_type="code" id="Sp5Hbb4nh12C"
import deepchem as dc
featurizer = dc.feat.CircularFingerprint()
print(featurizer(['CC', 'CCC', 'CCO']))
# + [markdown] colab_type="text" id="_bC1mPM4h12F"
# 3. When creating a new dataset with the DataLoader framework, you can specify a Featurizer to use for processing the data. We will see this in a future tutorial.
#
# We use propane (CH<sub>3</sub>CH<sub>2</sub>CH<sub>3</sub>, represented by the SMILES string `'CCC'`) as a running example throughout this tutorial. Many of the featurization methods use conformers of the molecules. A conformer can be generated using the `ConformerGenerator` class in `deepchem.utils.conformers`.
# + [markdown] colab_type="text" id="4D9z0slLh12G"
# ### RDKitDescriptors
# + [markdown] colab_type="text" id="oCfATWYIh12H"
# `RDKitDescriptors` featurizes a molecule by using RDKit to compute values for a list of descriptors. These are basic physical and chemical properties: molecular weight, polar surface area, numbers of hydrogen bond donors and acceptors, etc. This is most useful for predicting things that depend on these high level properties rather than on detailed molecular structure.
#
# Intrinsic to the featurizer is a set of allowed descriptors, which can be accessed using `RDKitDescriptors.allowedDescriptors`. The featurizer uses the descriptors in `rdkit.Chem.Descriptors.descList`, checks if they are in the list of allowed descriptors, and computes the descriptor value for the molecule.
#
# Let's print the values of the first ten descriptors for propane.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="3dt_vjtXh12N" outputId="c6f73232-0765-479c-93b0-ba18cbf6f33a"
rdkit_featurizer = dc.feat.RDKitDescriptors()
features = rdkit_featurizer(['CCC'])[0]
for feature, descriptor in zip(features[:10], rdkit_featurizer.descriptors):
print(descriptor, feature)
# -
# Of course, there are many more descriptors than this.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="KfyDpE81h12Q" outputId="46673131-c504-48ca-db35-5d689e218069"
print('The number of descriptors present is: ', len(features))
# + [markdown] colab_type="text" id="41RwzbTth12U"
# ### WeaveFeaturizer and MolGraphConvFeaturizer
#
# We previously looked at graph convolutions, which use `ConvMolFeaturizer` to convert molecules into `ConvMol` objects. Graph convolutions are a special case of a large class of architectures that represent molecules as graphs. They work in similar ways but vary in the details. For example, they may associate data vectors with the atoms, the bonds connecting them, or both. They may use a variety of techniques to calculate new data vectors from those in the previous layer, and a variety of techniques to compute molecule level properties at the end.
#
# DeepChem supports lots of different graph based models. Some of them require molecules to be featurized in slightly different ways. Because of this, there are two other featurizers called `WeaveFeaturizer` and `MolGraphConvFeaturizer`. They each convert molecules into a different type of Python object that is used by particular models. When using any graph based model, just check the documentation to see what featurizer you need to use with it.
# + [markdown] colab_type="text" id="SF3l5yJ4h12f"
# ### CoulombMatrix
#
# All the models we have looked at so far consider only the intrinsic properties of a molecule: the list of atoms that compose it and the bonds connecting them. When working with flexible molecules, you may also want to consider the different conformations the molecule can take on. For example, when a drug molecule binds to a protein, the strength of the binding depends on specific interactions between pairs of atoms. To predict binding strength, you probably want to consider a variety of possible conformations and use a model that takes them into account when making predictions.
#
# The Coulomb matrix is one popular featurization for molecular conformations. Recall that the electrostatic Coulomb interaction between two charges is proportional to $q_1 q_2/r$ where $q_1$ and $q_2$ are the charges and $r$ is the distance between them. For a molecule with $N$ atoms, the Coulomb matrix is a $N \times N$ matrix where each element gives the strength of the electrostatic interaction between two atoms. It contains information both about the charges on the atoms and the distances between them. More information on the functional forms used can be found [here](https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.108.058301).
#
# To apply this featurizer, we first need a set of conformations for the molecule. We can use the `ConformerGenerator` class to do this. It takes a RDKit molecule, generates a set of energy minimized conformers, and prunes the set to only include ones that are significantly different from each other. Let's try running it for propane.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="evLPEI6mh12g" outputId="c0895d51-a38d-494e-d161-31ce5c421fb3"
from rdkit import Chem
generator = dc.utils.ConformerGenerator(max_conformers=5)
propane_mol = generator.generate_conformers(Chem.MolFromSmiles('CCC'))
print("Number of available conformers for propane: ", len(propane_mol.GetConformers()))
# -
# It only found a single conformer. This shouldn't be surprising, since propane is a very small molecule with hardly any flexibility. Let's try adding another carbon.
butane_mol = generator.generate_conformers(Chem.MolFromSmiles('CCCC'))
print("Number of available conformers for butane: ", len(butane_mol.GetConformers()))
# Now we can create a Coulomb matrix for our molecule.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="pPIqy39Ih12i" outputId="ca7b18b3-cfa4-44e8-a907-cbffd4e65364"
coulomb_mat = dc.feat.CoulombMatrix(max_atoms=20)
features = coulomb_mat(propane_mol)
print(features)
# + [markdown] colab_type="text" id="Uyq3Xk3sh12l"
# Notice that many elements are 0. To combine multiple molecules in a batch we need all the Coulomb matrices to be the same size, even if the molecules have different numbers of atoms. We specified `max_atoms=20`, so the returned matrix has size (20, 20). The molecule only has 11 atoms, so only an 11 by 11 submatrix is nonzero.
# + [markdown] colab_type="text" id="P-sGs7W2h12p"
# ### CoulombMatrixEig
# + [markdown] colab_type="text" id="9NTjtDUzh12p"
# An important feature of Coulomb matrices is that they are invariant to molecular rotation and translation, since the interatomic distances and atomic numbers do not change. Respecting symmetries like this makes learning easier. Rotating a molecule does not change its physical properties. If the featurization does change, then the model is forced to learn that rotations are not important, but if the featurization is invariant then the model gets this property automatically.
#
# Coulomb matrices are not invariant under another important symmetry: permutations of the atoms' indices. A molecule's physical properties do not depend on which atom we call "atom 1", but the Coulomb matrix does. To deal with this, the `CoulumbMatrixEig` featurizer was introduced, which uses the eigenvalue spectrum of the Coulumb matrix and is invariant to random permutations of the atom's indices. The disadvantage of this featurization is that it contains much less information ($N$ eigenvalues instead of an $N \times N$ matrix), so models will be more limited in what they can learn.
#
# `CoulombMatrixEig` inherits from `CoulombMatrix` and featurizes a molecule by first computing the Coulomb matrices for different conformers of the molecule and then computing the eigenvalues for each Coulomb matrix. These eigenvalues are then padded to account for variation in number of atoms across molecules.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="ga1-nNiWh12t" outputId="2df3163c-6808-49e6-dba8-282ddd7fa3c4"
coulomb_mat_eig = dc.feat.CoulombMatrixEig(max_atoms=20)
features = coulomb_mat_eig(propane_mol)
print(features)
# + [markdown] colab_type="text" id="wssi6cBmh12z"
# # Congratulations! Time to join the Community!
#
# Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:
#
# ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)
# This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.
#
# ## Join the DeepChem Gitter
# The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
| examples/tutorials/07_Going_Deeper_on_Molecular_Featurizations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 3 Assessment: Orthogonal Projections
# ## Learning Objectives
#
# In this week, we will write functions which perform orthogonal projections.
#
# By the end of this week, you should be able to
#
# 1. Write code that projects data onto lower-dimensional subspaces.
# 2. Understand the real world applications of projections.
#
# We highlight some tips and tricks which would be useful when you implement numerical
# algorithms that you have never encountered before.
# You are invited to think about these concepts when you
# write your program.
#
# The important thing is to learn to map from mathematical equations to code. It is not always
# easy to do so, but you will get better at it with more practice.
#
# We will apply this to project high-dimensional face images onto lower dimensional basis which we call "eigenfaces". We will also revisit the problem of linear regression, but from the perspective of solving normal equations,
# the concept which you apply to derive the formula for orthogonal projections. We will apply this to predict housing
# prices for the Boston housing dataset, which is a classic example in machine learning.
# PACKAGE: DO NOT EDIT
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import numpy as np
from sklearn.datasets import fetch_olivetti_faces, fetch_lfw_people
from ipywidgets import interact
# %matplotlib inline
image_shape = (64, 64)
# Load faces data
dataset = fetch_olivetti_faces()
faces = dataset.data
# ### Advice for testing numerical algorithms
#
# Testing machine learning algorithms (or numerical algorithms in general)
# is sometimes really hard as it depends on the dataset
# to produce an answer, and you will never be able to test your algorithm on all the datasets
# we have in the world. Nevertheless, we have some tips for you to help you identify bugs in
# your implementations.
#
# #### 1. Test on small dataset
# Test your algorithms on small dataset: datasets of size 1 or 2 sometimes will suffice. This
# is useful because you can (if necessary) compute the answers by hand and compare them with
# the answers produced by the computer program you wrote. In fact, these small datasets can even have special numbers,
# which will allow you to compute the answers by hand easily.
#
# #### 2. Find invariants
# Invariants refer to properties of your algorithm and functions that are maintained regardless
# of the input. We will highlight this point later in this notebook where you will see functions,
# which will check invariants for some of the answers you produce.
#
# Invariants you may want to look for:
# 1. Does your algorithm always produce a positive/negative answer, or a positive definite matrix?
# 2. If the algorithm is iterative, do the intermediate results increase/decrease monotonically?
# 3. Does your solution relate with your input in some interesting way, e.g. orthogonality?
#
# When you have a set of invariants, you can generate random inputs and make
# assertions about these invariants. This is sometimes known as [fuzzing](https://en.wikipedia.org/wiki/Fuzzing), which has proven to be a very effective technique for identifying bugs in programs.
#
# Finding invariants is hard, and sometimes there simply isn't any invariant. However, DO take advantage of them if you can find them. They are the most powerful checks when you have them.
# ## 1. Orthogonal Projections
# Recall that for projection of a vector $x$ onto a 1-dimensional subspace $U$ with basis vector $\boldsymbol b$ we have
#
# $${\pi_U}(\boldsymbol x) = \frac{\boldsymbol b\boldsymbol b^T}{{\lVert \boldsymbol b \rVert}^2}\boldsymbol x $$
#
# And for the general projection onto an M-dimensional subspace $U$ with basis vectors $\boldsymbol b_1,\dotsc, \boldsymbol b_M$ we have
#
# $${\pi_U}(\boldsymbol x) = \boldsymbol B(\boldsymbol B^T\boldsymbol B)^{-1}\boldsymbol B^T\boldsymbol x $$
#
# where
#
# $$\boldsymbol B = (\boldsymbol b_1|...|\boldsymbol b_M)$$
#
#
# Your task is to implement orthogonal projections. We can split this into two steps
# 1. Find the projection matrix $\boldsymbol P$ that projects any $\boldsymbol x$ onto $U$.
# 2. The projected vector $\pi_U(\boldsymbol x)$ of $\boldsymbol x$ can then be written as $\pi_U(\boldsymbol x) = \boldsymbol P\boldsymbol x$.
# Note that for orthogonal projections, we have the following invariants:
# +
import numpy.testing as np_test
def test_property_projection_matrix(P):
"""Test if the projection matrix satisfies certain properties.
In particular, we should have P @ P = P, and P = P^T
"""
np_test.assert_almost_equal(P, P @ P)
np_test.assert_almost_equal(P, P.T)
def test_property_projection(x, p):
"""Test orthogonality of x and its projection p."""
np_test.assert_almost_equal(np.dot(p-x, p), 0)
# +
# GRADED FUNCTION: DO NOT EDIT THIS LINE
# Projection 1d
# ===YOU SHOULD EDIT THIS FUNCTION===
def projection_matrix_1d(b):
"""Compute the projection matrix onto the space spanned by `b`
Args:
b: ndarray of dimension (D,), the basis for the subspace
Returns:
P: the projection matrix
"""
D, = b.shape
P = np.outer(b, b.T)/np.square(np.linalg.norm(b)) # EDIT THIS
return P
# ===YOU SHOULD EDIT THIS FUNCTION===
def project_1d(x, b):
"""Compute the projection matrix onto the space spanned by `b`
Args:
x: the vector to be projected
b: ndarray of dimension (D,), the basis for the subspace
Returns:
y: projection of x in space spanned by b
"""
p = np.zeros(3) # EDIT THIS
P = np.outer(b, b.T)/np.square(np.linalg.norm(b)) # EDIT THIS
p = P @ x
return p
# Projection onto general subspace
# ===YOU SHOULD EDIT THIS FUNCTION===
def projection_matrix_general(B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, M), the basis for the subspace
Returns:
P: the projection matrix
"""
# P = A(ATA)−1 AT.
P = np.eye(B.shape[0]) # EDIT THIS
temp = np.linalg.inv(B.T @ B)
P = B @ temp @ B.T
return P
# ===YOU SHOULD EDIT THIS FUNCTION===
def project_general(x, B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, E), the basis for the subspace
Returns:
y: projection of x in space spanned by b
"""
temp = np.linalg.inv(B.T @ B)
P = B @ temp @ B.T
p = P @ x
return p
# -
# We have included some unittest for you to test your implementation.
# Orthogonal projection in 2d
# define basis vector for subspace
b = np.array([2,1]).reshape(-1,1)
# point to be projected later
x = np.array([1,2]).reshape(-1, 1)
# +
# Test 1D
np_test.assert_almost_equal(projection_matrix_1d(np.array([1, 2, 2])),
np.array([[1, 2, 2],
[2, 4, 4],
[2, 4, 4]]) / 9)
np_test.assert_almost_equal(project_1d(np.ones(3),
np.array([1, 2, 2])),
np.array([5, 10, 10]) / 9)
B = np.array([[1, 0],
[1, 1],
[1, 2]])
# Test General
np_test.assert_almost_equal(projection_matrix_general(B),
np.array([[5, 2, -1],
[2, 2, 2],
[-1, 2, 5]]) / 6)
np_test.assert_almost_equal(project_general(np.array([6, 0, 0]), B),
np.array([5, 2, -1]))
print('correct')
# -
# ## 2. Eigenfaces (optional)
#
# Next, we will take a look at what happens if we project some dataset consisting of human faces onto some basis we call
# the "eigenfaces".
from sklearn.datasets import fetch_olivetti_faces, fetch_lfw_people
from ipywidgets import interact
# %matplotlib inline
image_shape = (64, 64)
# Load faces data
dataset = fetch_olivetti_faces()
faces = dataset.data
mean = faces.mean(axis=0)
std = faces.std(axis=0)
faces_normalized = (faces - mean) / std
# The data for the basis has been saved in a file named `eigenfaces.py`, first we load it into the variable B.
B = np.load('eigenfaces.npy')[:50] # we use the first 50 dimensions of the basis, you should play around with the dimension here.
print("the eigenfaces have shape {}".format(B.shape))
# Along the first dimension of B, each instance is a `64x64` image, an "eigenface". Let's visualize
# a few of them.
plt.figure(figsize=(10,10))
plt.imshow(np.hstack(B[:5]), cmap='gray');
# Take a look at what happens if we project our faces onto the basis spanned by these "eigenfaces". This requires
# us to reshape B into the same shape as the matrix representing the basis as we have done earlier. Then we can
# reuse the functions we implemented earlier to compute the projection matrix and the projection. Complete the code below to visualize the reconstructed faces that lie on the subspace spanned by the "eigenfaces".
@interact(i=(0, 10))
def show_eigenface_reconstruction(i):
original_face = faces_normalized[i].reshape(64, 64)
# project original_face onto the vector space spanned by B_basis,
# you should take advantage of the functions you have implemented above
# to perform the projection. First, reshape B such that it represents the basis
# for the eigenfaces. Then perform orthogonal projection which would give you
# `face_reconstruction`.
B_basis = ???
face_reconstruction = ???
plt.figure()
plt.imshow(np.hstack([original_face, face_reconstruction]), cmap='gray')
plt.show()
# __Question__:
#
# What would happen to the reconstruction as we increase the dimension of our basis?
#
# Modify the code above to visualize it.
# ## 3. Least square for predicting Boston housing prices (optional)
# Consider the case where we have a linear model for predicting housing prices. We are predicting the housing prices based on features in the
# housing dataset. If we collect the features in a vector $\boldsymbol{x}$, and the price of the houses as $y$. Assuming that we have
# a prediction model in the way such that $\hat{y}_i = f(\boldsymbol {x}_i) = \boldsymbol \theta^T\boldsymbol{x}_i$.
#
# If we collect the dataset of $n$ datapoints $\boldsymbol x_i$ in a data matrix $\boldsymbol X$, we can write down our model like this:
#
# $$
# \begin{bmatrix}
# \boldsymbol {x}_1^T \\
# \vdots \\
# \boldsymbol {x}_n^T
# \end{bmatrix} \boldsymbol {\theta} = \begin{bmatrix}
# y_1 \\
# \vdots \\
# y_n
# \end{bmatrix}.
# $$
#
# That is,
#
# $$
# \boldsymbol X\boldsymbol{\theta} = \boldsymbol {y}.
# $$
# where $\boldsymbol y$ collects all house prices $y_1,\dotsc, y_n$ of the training set.
#
# Our goal is to find the best $\boldsymbol \theta$ that minimizes the following (least squares) objective:
#
# $$
# \begin{eqnarray}
# &\sum^n_{i=1}{\lVert \boldsymbol \theta^T\boldsymbol {x}_i - y_i \rVert^2} \\
# &= (\boldsymbol X\boldsymbol {\theta} - \boldsymbol y)^T(\boldsymbol X\boldsymbol {\theta} - \boldsymbol y).
# \end{eqnarray}
# $$
# Note that we aim to minimize the squared error between the prediction $\boldsymbol \theta^T\boldsymbol {x}_i$ of the model and the observed data point $y_i$ in the training set.
#
# To find the optimal (maximum likelihood) parameters $\boldsymbol \theta^*$, we set the gradient of the least-squares objective to $\boldsymbol 0$:
# $$
# \begin{eqnarray}
# \nabla_{\boldsymbol\theta}(\boldsymbol X{\boldsymbol \theta} - \boldsymbol y)^T(\boldsymbol X{\boldsymbol \theta} - \boldsymbol y) &=& \boldsymbol 0 \\
# \iff \nabla_{\boldsymbol\theta}(\boldsymbol {\theta}^T\boldsymbol X^T - \boldsymbol y^T)(\boldsymbol X\boldsymbol {\theta} - \boldsymbol y) &=& \boldsymbol 0 \\
# \iff \nabla_{\boldsymbol\theta}(\boldsymbol {\theta}^T\boldsymbol X^T\boldsymbol X\boldsymbol {\theta} - \boldsymbol y^T\boldsymbol X\boldsymbol \theta - \boldsymbol \theta^T\boldsymbol X^T\boldsymbol y + \boldsymbol y^T\boldsymbol y ) &=& \boldsymbol 0 \\
# \iff 2\boldsymbol X^T\boldsymbol X\boldsymbol \theta - 2\boldsymbol X^T\boldsymbol y &=& \boldsymbol 0 \\
# \iff \boldsymbol X^T\boldsymbol X\boldsymbol \theta &=& \boldsymbol X^T\boldsymbol y.
# \end{eqnarray}
# $$
#
# The solution,\boldsymbol which gives zero gradient solves the __normal equation__
#
# $$\boldsymbol X^T\boldsymbol X\boldsymbol \theta = \boldsymbol X^T\boldsymbol y.$$
#
# If you recall from the lecture on projection onto n-dimensional subspace, this is exactly the same as the normal equation we have for projection (take a look at the notes [here](https://www.coursera.org/teach/mathematics-machine-learning-pca/content/edit/supplement/fQq8T/content) if you don't remember them).
#
# This means our optimal parameter vector, which minimizes our objective, is given by
# $$\boldsymbol \theta^* = (\boldsymbol X^T\boldsymbol X)^{-1}\boldsymbol X^T\boldsymbol y.$$
# Let's put things into perspective and try to find the best parameter $\theta^*$
# of the line $y = \theta x$, where $x,\theta\in\mathbb{R}$ for a given a training set $\boldsymbol X\in\mathbb{R}^n$ and $\boldsymbol y\in\mathbb{R}^n$.
#
# Note that in our example, the features $x_i$ are only scalar, such that the parameter $\theta$ is also only a scalar. The derivation above holds for general parameter vectors (not only for scalars).
#
# Note: This is exactly the same problem as linear regression which was discussed in [Mathematics for Machine Learning: Multivariate Calculus](https://www.coursera.org/teach/multivariate-calculus-machine-learning/content/edit/lecture/74ryq/video-subtitles). However, rather than finding the optimimal $\theta^*$ with gradient descent, we can solve this using the normal equation.
# +
x = np.linspace(0, 10, num=50)
random = np.random.RandomState(42) # we use the same random seed so we get deterministic output
theta = random.randn() # we use a random theta, our goal is to perform linear regression which finds theta_hat that minimizes the objective
y = theta * x + random.rand(len(x)) # our theta is corrupted by some noise, so that we do not get (x,y) on a line
plt.scatter(x, y);
plt.xlabel('x');
plt.ylabel('y');
# +
X = x.reshape(-1,1)
Y = y.reshape(-1,1)
theta_hat = np.linalg.solve(X.T @ X,
X.T @ Y)
# -
# We can show how our $\hat{\theta}$ fits the line.
fig, ax = plt.subplots()
ax.scatter(x, y);
xx = [0, 10]
yy = [0, 10 * theta_hat[0,0]]
ax.plot(xx, yy, 'red', alpha=.5);
ax.set(xlabel='x', ylabel='y');
print("theta = %f" % theta)
print("theta_hat = %f" % theta_hat)
# What would happen to $\lVert {\theta^*} - \theta \rVert$ if we increased the number of datapoints?
#
# Make your hypothesis, and write a small program to confirm it!
N = np.arange(10, 10000, step=10)
# Your code here which calculates θ* for different sample size.
N
# We see how we can find the best $\theta$. In fact, we can extend our methodology to higher dimensional dataset. Let's now try applying the same methodology to the boston housing prices dataset.
# +
from sklearn.datasets import load_boston
boston = load_boston()
boston_X, boston_y = boston.data, boston.target
print("The housing dataset has size {}".format(boston_X.shape))
print("The prices has size {}".format(boston_X.shape))
boston_theta_hat = np.linalg.solve(boston_X.T @ boston_X , boston_X.T @ boston_y) ## EDIT THIS to predict boston_theta_hat
# -
| Secret/maths-for-ml/pca/week3/orthogonal_projections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.6 64-bit
# language: python
# name: python3
# ---
# # PAS
# # Install Python dependencies
# %pip install pandas matplotlib
# # BD model vs BIDE model
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
bd_vs_bide_folder = "bd_vs_bide"
bd_results_folder = "bd_results"
bide_results_folder = "bide_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
# +
bd = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_P_tot{}.csv'), sep=',', names=columns, header=None)
bide = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(bd['Time'], bd['Mean'], label='BD model')
ax.fill_between(bd['Time'], bd['Mean']-bd['SD'], bd['Mean']+bd['SD'], alpha=0.3)
ax.plot(bide['Time'], bide['Mean'], label='BIDE model')
ax.fill_between(bide['Time'], bide['Mean']-bide['SD'], bide['Mean']+bide['SD'], alpha=0.3)
ax.legend()
plt.show()
# +
bd = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_P_tot{}.csv'), sep=',', names=columns, header=None)
bide = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_P_tot{}.csv'), sep=',', names=columns, header=None)
bd_equation = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), 'bd_BD{}.csv'), sep=',', names=columns, header=None)
bide_equation = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), 'bide_BIDE{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population and BD equation')
ax.plot(bd['Time'], bd['Mean'], label='BD model')
ax.plot(bd_equation['Time'], bd_equation['Mean'], label='BD equation')
n0 = bd['Mean'][0]
plt.hlines(y=n0, xmin=0, xmax=len(bd['Mean']), linestyles='dashed', label=f'N0 = {n0}')
ax.legend(loc='upper left')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population and BIDE equation')
ax.plot(bide['Time'], bide['Mean'], label='BIDE model')
ax.plot(bide_equation['Time'], bide_equation['Mean'], label='BIDE equation')
n0 = bide['Mean'][0]
plt.hlines(y=n0, xmin=0, xmax=len(bide['Mean']), linestyles='dashed', label=f'N0 = {n0}')
ax.legend(loc='upper left')
plt.show()
# +
bd_pop = [None for i in range(n_cities)]
bide_pop = [None for i in range(n_cities)]
bd_equation = [None for i in range(n_cities)]
bide_equation = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
bd_equation[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_SINGLE_BD{{i={i}.0}}.csv'), sep=',', names=columns, header=None)
bide_equation[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_SINGLE_BIDE{{i={i}.0}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population and BD equation')
for i in range(n_cities):
# ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
ax.plot(bd_equation[i]['Time'], bd_equation[i]['Mean'], label=f'BD equation city {i+1}')
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population and BIDE equation')
for i in range(n_cities):
# ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.plot(bide_equation[i]['Time'], bide_equation[i]['Mean'], label=f'BIDE equation city {i+1}')
ax.legend()
plt.show()
# +
bd_pop = [None for i in range(n_cities)]
bide_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
for i in range(n_cities):
ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.legend()
plt.show()
# +
bd_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bd_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bd_pop[i]['Time'], bd_pop[i]['Mean'], label=f'BD model city {i+1}')
ax.fill_between(bd_pop[i]['Time'], bd_pop[i]['Mean'] - bd_pop[i]['SD'], bd_pop[i]['Mean'] + bd_pop[i]['SD'], label=f'SD City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
# +
bide_pop = [None for i in range(n_cities)]
for i in range(n_cities):
bide_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(bide_pop[i]['Time'], bide_pop[i]['Mean'], label=f'BIDE model city {i+1}')
ax.fill_between(bide_pop[i]['Time'], bide_pop[i]['Mean'] - bide_pop[i]['SD'], bide_pop[i]['Mean'] + bide_pop[i]['SD'], label=f'SD City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
# +
species = 'PBD'
n_species = len(species)
bd_data = {}
for s in species:
bd_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bd_results_folder), f'bd_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(bd_data[s]['Time'], bd_data[s]['Mean'], label=f'#{s}')
ax.legend()
plt.show()
# +
species = 'PBIDE'
n_species = len(species)
bide_data = {}
for s in species:
bide_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(bd_vs_bide_folder, bide_results_folder), f'bide_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-I-D-E')
for s in species:
ax.plot(bide_data[s]['Time'], bide_data[s]['Mean'], label=f'#{s}')
ax.legend()
plt.show()
# -
# # Balanced vs Unbalanced
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
balanced_vs_unbalanced_folder = "balanced_vs_unbalanced"
balanced_results_folder = "balanced_results"
unbalanced_results_folder = "unbalanced_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
# +
balanced = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, balanced_results_folder), 'balanced_P_tot{}.csv'), sep=',', names=columns, header=None)
unbalanced = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, unbalanced_results_folder), 'unbalanced_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(balanced['Time'], balanced['Mean'], label='Balanced system')
ax.fill_between(balanced['Time'], balanced['Mean']-balanced['SD'], balanced['Mean']+balanced['SD'], alpha=0.3)
ax.plot(unbalanced['Time'], unbalanced['Mean'], label='Unbalanced system')
ax.fill_between(unbalanced['Time'], unbalanced['Mean']-unbalanced['SD'], unbalanced['Mean']+unbalanced['SD'], alpha=0.3)
ax.legend()
plt.show()
# +
balanced_pop = [None for i in range(n_cities)]
unbalanced_pop = [None for i in range(n_cities)]
for i in range(n_cities):
balanced_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, balanced_results_folder), f'balanced_#P[{i}].csv'), sep=',', names=columns, header=None)
unbalanced_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(balanced_vs_unbalanced_folder, unbalanced_results_folder), f'unbalanced_#P[{i}].csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population')
for i in range(n_cities):
ax.plot(balanced_pop[i]['Time'], balanced_pop[i]['Mean'], label=f'Balanced system city {i+1}')
for i in range(n_cities):
ax.plot(unbalanced_pop[i]['Time'], unbalanced_pop[i]['Mean'], label=f'Unbalanced system city {i+1}')
ax.legend()
# plt.axis([0, 2000, 0, 200])
plt.show()
# -
# # Emigrate to Next vs Biggest vs Smallest city
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
next_vs_biggest_vs_smallest_folder = "next_vs_biggest_vs_smallest"
next_results_folder = "next_results"
biggest_results_folder = "biggest_results"
smallest_results_folder = "smallest_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
# +
next = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, next_results_folder), 'next_P_tot{}.csv'), sep=',', names=columns, header=None)
biggest = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, biggest_results_folder), 'biggest_P_tot{}.csv'), sep=',', names=columns, header=None)
smallest = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, smallest_results_folder), 'smallest_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(next['Time'], next['Mean'], label='Emigrate to Next')
ax.fill_between(next['Time'], next['Mean']-next['SD'], next['Mean']+next['SD'], alpha=0.3)
ax.plot(biggest['Time'], biggest['Mean'], label='Emigrate to Biggest')
ax.fill_between(biggest['Time'], biggest['Mean']-biggest['SD'], biggest['Mean']+biggest['SD'], alpha=0.3)
ax.plot(smallest['Time'], smallest['Mean'], label='Emigrate to Smallest')
ax.fill_between(smallest['Time'], smallest['Mean']-smallest['SD'], smallest['Mean']+smallest['SD'], alpha=0.3)
ax.legend()
plt.show()
# -
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(next['Time'], next['Mean'], label='Emigrate to Next')
ax.plot(smallest['Time'], smallest['Mean'], color='green', label='Emigrate to Smallest')
ax.legend()
plt.show()
# +
next_pop = [None for i in range(n_cities)]
biggest_pop = [None for i in range(n_cities)]
smallest_pop = [None for i in range(n_cities)]
for i in range(n_cities):
next_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, next_results_folder), f'next_#P[{i}].csv'), sep=',', names=columns, header=None)
biggest_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, biggest_results_folder), f'biggest_#P[{i}].csv'), sep=',', names=columns, header=None)
smallest_pop[i] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(next_vs_biggest_vs_smallest_folder, smallest_results_folder), f'smallest_#P[{i}].csv'), sep=',', names=columns, header=None)
next = ['Emigrate to Next', next_pop]
biggest = ['Emigrate to Biggest', biggest_pop]
smallest = ['Emigrate to Smallest', smallest_pop]
for strategy in [next, biggest, smallest]:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle(strategy[0])
for i in range(n_cities):
ax.plot(strategy[1][i]['Time'], strategy[1][i]['Mean'], label=f'City {i+1}')
ax.fill_between(strategy[1][i]['Time'], strategy[1][i]['Mean'] - strategy[1][i]['SD'], strategy[1][i]['Mean'] + strategy[1][i]['SD'], label=f'City {i+1}', alpha=0.3)
ax.legend(loc='upper left')
plt.show()
# -
# # Child vs Children
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
child_vs_children_folder = "child_vs_children"
child_results_folder = "child_results"
children_results_folder = "children_results"
n_cities = 3
columns = ['Time', 'Mean', 'SD', 'CI']
# +
child = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, child_results_folder), 'child_P_tot{}.csv'), sep=',', names=columns, header=None)
children = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, children_results_folder), 'children_P_tot{}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Total population')
ax.plot(child['Time'], child['Mean'], label='Child')
ax.fill_between(child['Time'], child['Mean']-child['SD'], child['Mean']+child['SD'], alpha=0.3)
ax.plot(children['Time'], children['Mean'], label='Children')
ax.fill_between(children['Time'], children['Mean']-children['SD'], children['Mean']+children['SD'], alpha=0.3)
ax.legend()
plt.show()
# +
species = 'PBD'
n_species = len(species)
child_data = {}
children_data = {}
for s in species:
child_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, child_results_folder), f'child_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
children_data[s] = pd.read_csv(filepath_or_buffer=os.path.join(os.path.join(child_vs_children_folder, children_results_folder), f'children_{s}_tot{{}}.csv'), sep=',', names=columns, header=None)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(child_data[s]['Time'], child_data[s]['Mean'], label=f'Child #{s}')
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('Population, B-D')
for s in species:
ax.plot(children_data[s]['Time'], children_data[s]['Mean'], label=f'Children #{s}')
ax.legend()
plt.show()
| analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: scweetkernel
# language: python
# name: scweetkernel
# ---
# ### Instance of TwitterAPITools
# * <NAME> Mar 2021
from TwitterAPITools.APIScraper import TimelineScraper, UserFolScraper
# ### Timeline
fdir = ""
btoken = ""
ofile = ""
tscraper = TimelineScraper(btoken)
# + tags=[]
tscraper.workFromUserDir(fdir, ofile)
# -
| TwAPIClassInstance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Gradient Descent (Gradienten-Abstieg):
#
# $x = x - \eta \cdot \nabla f(x)$
# $f(x)$ ist die Funktion (z.B. Fehlerfunktion)
# $x$ ist der Parameter (z.B. Gewicht im NN)
# +
def f(x):
return x**2
def f_prime(x):
return 2 * x
# Globales Minimum bei x = (1, 1)
print("Minimum: ", f(0))
print("Starte bei: ", f(10))
# +
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(-20, 20, 0.1)
y = [f(val) for val in x]
plt.scatter(0, f(0), color="red", marker="*", s=200)
plt.scatter(10, f(10), color="green", marker="o", s=200)
plt.plot(x, y, color="blue")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# +
eta = 1e-3
x = 10
y = f(x)
stop_conv = 1e-6
stop_div = 1e+6
stop_iter = 1e4
it = 0
downhill_points = []
while y > stop_conv and y < stop_div and it < stop_iter:
x = x - eta * f_prime(x)
it += 1
y = f(x)
if it % 100 == 0:
downhill_points.append(x)
print("Solution: ", y)
print("X = ", x)
# +
x = np.arange(-20, 20, 0.1)
y = [f(val) for val in x]
for index, point in enumerate(downhill_points):
plt.scatter(point, f(point), color="green", marker="o", s=200 / (index+1))
plt.scatter(0, f(0), color="red", marker="*", s=200)
plt.scatter(10, f(10), color="green", marker="o", s=200)
plt.plot(x, y, color="blue")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# +
xs = []
ys = []
etas = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
for eta in etas:
x = 10
y = f(x)
stop_conv = 1e-6
stop_div = 1e+6
stop_iter = 1e4
it = 0
while y > stop_conv and y < stop_div and it < stop_iter:
x = x - eta * f_prime(x)
it += 1
y = f(x)
xs.append(x)
ys.append(y)
# +
for index, (x, y) in enumerate(zip(xs, ys)):
plt.scatter(x, y, color="green", marker="o")
plt.annotate("$\eta=$"+str(etas[index]), xy=(x, y))
x = np.arange(-1, 15, 0.1)
y = [f(val) for val in x]
plt.plot(x, y, color="blue", alpha=0.5)
plt.scatter(0, 0, color="red", marker="*")
plt.show()
# -
# #### Teste andere Funktion
# +
def f(x):
return x**4 - 3 * x**3 + 2
def f_prime(x):
return 4 * x**3 - 9 * x**2
# +
minimum = 9/4
start = -1
# Globales Minimum bei x = (1, 1)
print("Minimum: ", f(minimum))
print("Starte bei: ", f(start))
# +
x = np.arange(-2, 5, 0.1)
y = [f(val) for val in x]
plt.scatter(minimum, f(minimum), color="red", marker="*", s=200)
plt.scatter(start, f(start), color="green", marker="o", s=200)
plt.plot(x, y, color="blue")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# -
def lr(eta, it, it_max, decay=True):
if decay:
return eta * ((it_max - it) / it_max)
else:
return eta
# +
import random
from collections import Counter
eta = 1e-3
start = random.randint(-10, 10)
x = start
y = f(x)
print("Satring x = ", x)
stop_conv = 1e-6
stop_div = 1e+6
stop_iter = 1e6
it = 0
downhill_points = []
while y > stop_conv and y < stop_div and it < stop_iter:
x = x - lr(eta, it, stop_iter, True) * f_prime(x)
it += 1
y = f(x)
if it % 1000 == 0:
downhill_points.append(x)
print("Solution: ", y)
print("X = ", x)
print("Found ", len(Counter(downhill_points)), " better points.")
x = np.arange(-10, 10, 0.1)
y = [f(val) for val in x]
for index, point in enumerate(downhill_points):
plt.scatter(point, f(point), color="green", marker="o", s=200 / (index+1))
plt.scatter(minimum, f(minimum), color="red", marker="*", s=200)
plt.scatter(start, f(start), color="green", marker="o", s=200)
plt.plot(x, y, color="blue")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| Chapter12_NN/GradientDescent/GradientDescent_part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 9.3.2 Reading Data from a Text File
with open('accounts.txt', mode='r') as accounts:
print(f'{"Account":<10}{"Name":<10}{"Balance":>10}')
for record in accounts:
account, name, balance = record.split()
print(f'{account:<10}{name:<10}{balance:>10}')
# ### File Method `readlines`
# ### Seeking to a Specific File Position
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| examples/ch09/snippets_ipynb/09_03.02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LINEAR REGRESSION
# ## Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load dataset
data = np.loadtxt("data.txt",delimiter=",")
x = data[:,0]
y = data[:,1].reshape(x.size,1)
x = np.vstack((np.ones((x.size, )), x)).T
print(x.shape)
print(y.shape)
# ## Plot graph to show relationship between x and y
plt.scatter(x[:, 1], y)
plt.show()
# We know that in the linear regression we make predictions by plotting a straight line that approximately fits our data set. It also makes the use of the cost function which will determine the error between the predicting value and the actual value. We want this cost function which is the representation of the error to be minimum. For the minimum value of the cost function we need to use a gradient descent algorithm. And we're going to run this in a loop which will in every iteration decrease the cost values and reach to our local minimum.
# 
def model(X, Y, learning_rate, iteration):
m = Y.size
theta = np.zeros((2, 1))
cost_list = []
for i in range(iteration):
y_pred = np.dot(X, theta)
cost = (1/(2*m))*np.sum(np.square(y_pred - Y))
d_theta = (1/m)*np.dot(X.T, y_pred - Y)
theta = theta - learning_rate*d_theta
cost_list.append(cost)
return theta, cost_list
iteration = 100
learning_rate = 0.00000005
theta, cost_list = model(x, y, learning_rate = learning_rate,
iteration = iteration)
new_houses = np.array([[1, 1547], [1, 1896], [1, 1934], [1,
2800], [1, 3400], [1, 5000]])
for house in new_houses :
print("Our model predicts the price of house with",
house[1], "sq. ft. area as : $", round(np.dot(house, theta)[0],
2))
rng = np.arange(0, iteration)
plt.plot(cost_list, rng)
plt.show()
| Datascience_With_Python/Machine Learning/Tutorials/Mathematics behind Machine Learning models/Machine Learning Algorithms/Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welter issue #2
# ## Spot Check the Pipeline Spectra
# ### Notebook 01
#
# <NAME>
# Wednesday, November 25, 2015
#
# We will make plots of the pipeline spectra.
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
% matplotlib inline
% config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_context('notebook')
# ## Raw standard star spectrum: `20151117/SDCH_20151117_0199.spec.fits`
# Read in the `.fits` files. The `.spec.` are the 1D spectra.
hdu_raw = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec.fits')
hdu_raw.info()
# Header/Data Unit 0 is the $N_{pix} \times N_{orders}$ **spectrum**.
# Header/Data Unit 1 is the $N_{pix} \times N_{orders}$ **wavelength solution**.
# The **metadata** about the observations are saved in the header of the spectrum.
# +
#np.array(list(hdu[0].header.keys()))[0:40]
# -
hdr = hdu_raw[0].header
string = 'This spectrum is of the source {OBJECT}.\n The object type is listed as: "{OBJTYPE}".\n\
The spectra were acquired at {ACQTIME1} UTC. \n The units of the raw spectrum are {UNITS}. \n\
The exposure time was {EXPTIME} seconds. \n The airmass was {AMSTART}.'
formatted_string = string.format(ACQTIME1=hdr['ACQTIME1'], UNITS=hdr['UNITS'], EXPTIME=hdr['EXPTIME'],
OBJECT=hdr['OBJECT'], AMSTART=hdr['AMSTART'], OBJTYPE=hdr['OBJTYPE'])
print(formatted_string)
# ## Single order plot.
# We'll pick a single order and make a plot.
o=10
plt.plot(hdu_raw[1].data[o, :], hdu_raw[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Raw signal (ADU)");
# ...what we really want is the `.spec_flattened.` file.
# ## Flattened A0V Star: 20151117/SDCH_20151117_0199.spec_flattened.fits
hdu_f = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec_flattened.fits')
hdu_f.info()
# The header info for the flattened file is the same as the header for the raw file.
# +
#hdu_f['SPEC_FLATTENED'].header[0:10]
# -
o=10
plt.plot(hdu_raw[1].data[o, :], hdu_f[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Normalized signal");
plt.title('{OBJECT} flattened spectrum'.format(OBJECT=hdr['OBJECT']));
# ## Science data file: `SDCH_20151117_0205.spec.fits`
hdu_tar = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0205.spec.fits')
hdu_tar.info()
hdr = hdu_tar[0].header
string = 'This spectrum is of the source {OBJECT}.\n The object type is listed as: "{OBJTYPE}".\n\
The spectra were acquired at {ACQTIME1} UTC. \n The units of the raw spectrum are {UNITS}. \n\
The exposure time was {EXPTIME} seconds. \n The airmass was {AMSTART}.'
formatted_string = string.format(ACQTIME1=hdr['ACQTIME1'], UNITS=hdr['UNITS'], EXPTIME=hdr['EXPTIME'],
OBJECT=hdr['OBJECT'], AMSTART=hdr['AMSTART'], OBJTYPE=hdr['OBJTYPE'])
print(formatted_string)
o=10
plt.plot(hdu_tar[1].data[o, :], hdu_tar[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Raw signal (ADU)");
plt.title('{OBJECT} raw spectrum'.format(OBJECT=hdr['OBJECT']));
# The next steps are:
#
# - Estimate and divide out the spectral shape attributable to instrument response.
# - Estimate and divide out the telluric transmission.
#
# See [the next notebook](welter_issue001-Spectral_Response_Functions_01.ipynb) for dealing these next steps.
# ### The end.
| notebooks/welter_issue002-01_Spot_Check_the_Pipeline_Spectra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# # 5.7 Priority Queues
#
# A priority queue is a container data structure that manages a set of
# records with totally-ordered keys (for example, a numeric weight
# value) to provide quick access to the record with the smallest or largest
# key in the set
# ### list – Maintaining a Manually Sorted Queue
#
# * insert -> O(n)
# * Maintaining the order by appending to the list and re-sorting -> O(n log n)
#
# +
q = []
q.append((2, 'code'))
q.append((1, 'eat'))
q.append((3, 'sleep'))
# NOTE: Remember to re-sort every time
# a new element is inserted, or use
# bisect.insort().
q.sort(reverse=True)
while q:
next_item = q.pop()
print(next_item)
# -
# ### heapq – List-Based Binary Heaps
#
# This is a binary heap implementation usually backed by a plain list ,
# and it supports insertion and extraction of the smallest element in
# O(log n)
# +
import heapq
q = []
heapq.heappush(q, (2, 'code'))
heapq.heappush(q, (1, 'eat'))
heapq.heappush(q, (3, 'sleep'))
while q:
next_item = heapq.heappop(q)
print(next_item)
# Result:
# (1, 'eat')
# (2, 'code')
# (3, 'sleep')
# -
# ### queue.PriorityQueue – Beautiful Priority Queues
#
# This priority queue implementation uses heapq internally and shares
# the same time and space complexities
#
# +
from queue import PriorityQueue
q = PriorityQueue()
q.put((2, 'code'))
q.put((1, 'eat'))
q.put((3, 'sleep'))
while not q.empty():
next_item = q.get()
print(next_item)
# Result:
# (1, 'eat')
# (2, 'code')
# (3, 'sleep')
| 2022/Python-Tricks-Book/Chapter5-Data-Structures/priority-queue.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Use pandas .read_csv() method to read in classified dataset
# index_col -> argument assigns the index to a particular column
df = pd.read_csv('all_data1all3.csv')
# Use the .head() method to display the first few rows
df
#Label encoding for "ocean_proximity" column, as that column has string values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
label = le.fit_transform(df["label"])
df["label"] = label
df
# Import module to standardize the scale
from sklearn.preprocessing import StandardScaler
# Create instance (i.e. object) of the standard scaler
scaler = StandardScaler()
scaler.fit(df.drop('label', axis=1))
# Use scaler object to conduct a transforms
scaled_features = scaler.transform(df.drop('label',axis=1))
# Review the array of values generated from the scaled features process
scaled_features
# Creating data frame using features
df_features = pd.DataFrame(scaled_features, columns = df.columns[:-1])
df_features
# Dropping ZCR_Mag column
df_features=df_features.drop('ZCR_Mag',axis=1)
df_features
# Import module to split the data
from sklearn.model_selection import train_test_split
# Set the X and ys
X = df_features
y = df['label']
# Use the train_test_split() method to split the data into respective sets
# test_size -> argument refers to the size of the test subset
# random_state -> argument ensures guarantee that the output of Run
# 1 will be equal to the output of Run 2, i.e. your split will be always the same
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# Import module for KNN
from sklearn.neighbors import KNeighborsClassifier
# Create KNN instance
# n_neighbors -> argument identifies the amount of neighbors used to ID classification
knn = KNeighborsClassifier(n_neighbors=8)
# Fit (i.e. traing) the model
knn.fit(X_train, y_train)
# Use the .predict() method to make predictions from the X_test subset
pred = knn.predict(X_test)
# Review the predictions
pred
# Import classification report and confusion matrix to evaluate predictions
from sklearn.metrics import classification_report, confusion_matrix
# Print out classification report and confusion matrix
print(classification_report(y_test, pred))
# Print out confusion matrix
cmat = confusion_matrix(y_test, pred)
#print(cmat)
print('TP - True Negative {}'.format(cmat[0,0]))
print('FP - False Positive {}'.format(cmat[0,1]))
print('FN - False Negative {}'.format(cmat[1,0]))
print('TP - True Positive {}'.format(cmat[1,1]))
print('Accuracy Rate: {}'.format(np.divide(np.sum([cmat[0,0],cmat[1,1]]),np.sum(cmat))))
print('Misclassification Rate: {}'.format(np.divide(np.sum([cmat[0,1],cmat[1,0]]),np.sum(cmat))))
# Import module to split the data
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
# Set the X and ys
X = df_features
y = df['label']
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=101)
models = []
models.append(('KNN', KNeighborsClassifier()))
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
| machinelearning/fall/KNN-Model-for-Fall-Data-Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import time
from xgboost import XGBClassifier
from sklearn.model_selection import KFold
from ml_metrics import rmsle
# suppres some notifications
pd.options.mode.chained_assignment = None
# +
train = pd.read_hdf('../input/diabetic_train.h5')
test = pd.read_hdf('../input/diabetic_test.h5')
test['readmitted'] = -1
df = pd.concat([train, test])
# -
# # Feature Engineering
# +
def present_with_higher_id(x):
if x['patient_nbr'] in global_patients_frequent:
# is frequent
max_id = df[ df['patient_nbr'] == x['patient_nbr'] ]['id'].max()
if max_id > x['id']:
return 1
else:
return 0
return 0
drug_keys_important = [ 'metformin' , 'repaglinide', 'nateglinide', 'glimepiride',
'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin']
top_specialities = ['notSet', 'InternalMedicine', 'Emergency/Trauma',
'Family/GeneralPractice', 'Cardiology', 'Surgery-General']
# IDs of the patients that exists more then once in the train set
train_patients_frequent = train['patient_nbr'].value_counts()
train_patients_frequent = train_patients_frequent[ train_patients_frequent > 1 ]
# IDs of the patients that exists more then once in the test set
test_patients_frequent = test['patient_nbr'].value_counts()
test_patients_frequent = test_patients_frequent[ test_patients_frequent > 1 ]
# IDs of the patients that exists more then once in the whole dataset
global_patients_frequent = df['patient_nbr'].value_counts()
global_patients_frequent = global_patients_frequent[ global_patients_frequent > 1 ]
# dataframe of the frequent IDs
gpdf = global_patients_frequent.to_frame()
# IDs of the patients that are frequent in the train or test sets (combined)
full_frequent = (train_patients_frequent + test_patients_frequent)
#############################################
df['race'] = df['race'].map(lambda x: 'notSet' if x == '?' else x)
df['race_cat'], labels = pd.factorize(df['race'])
df['gender_cat'] = df['gender'].map(lambda x: 1 if x == 'Male' else 0)
df['age'] = pd.to_numeric(df['age'].map(lambda x: x.split('-')[1].split(')')[0]))
df['weight'] = pd.to_numeric(df['weight'].map(lambda x: 0 if x == '?' else x.split('-')[1].split(')')[0]))
df['payer_code_cat'], labels = pd.factorize(df['payer_code'])
df['readmission_not_possible'] = df['discharge_disposition_id'].map(lambda x: 1 if x == 11 else 0)
df['medical_specialty'] = df['medical_specialty'].map(lambda x: 'notSet' if x == '?' else x)
df['med_top'] = df['medical_specialty'].copy()
df.loc[ ~df['med_top'].isin(top_specialities), 'med_top' ] = 'Other'
df_cat = pd.get_dummies(df[ ['med_top'] ], drop_first = True)
df = pd.concat([df, df_cat], axis = 1)
df['medical_specialty_cat'], labels = pd.factorize(df['medical_specialty'])
df['diag_1_cat'], labels = pd.factorize(df['diag_1'])
df['diag_2_cat'], labels = pd.factorize(df['diag_2'])
df['diag_3_cat'], labels = pd.factorize(df['diag_3'])
df['max_glu_serum_cat'], labels = pd.factorize(df['max_glu_serum'])
df['A1Cresult_cat'], labels = pd.factorize(df['A1Cresult'])
for col in drug_keys_important:
colname = str(col) + 'temp'
df[colname] = df[col].apply(lambda x: 0 if (x == 0 or x == 1) else 1)
df['important_drugs_change'] = 0
for col in drug_keys_important:
colname = str(col) + 'temp'
df['important_drugs_change'] = df['important_drugs_change'] + df[colname]
del df[colname]
df['important_drugs_count'] = 0
for col in drug_keys_important:
df['important_drugs_count'] = df['important_drugs_count'] + df[col].map(lambda x: 0 if x == 0 else 1)
df['change'] = pd.to_numeric(df['change'].map(lambda x: 1 if x else 0))
df['diabetesMed'] = pd.to_numeric(df['diabetesMed'].map(lambda x: 1 if x else 0))
df['sum_visits'] = df['number_outpatient'] + df['number_emergency'] + df['number_inpatient']
df['is_frequent'] = df['patient_nbr'].map(lambda x: 1 if x in (full_frequent) else 0)
df['is_global_frequent'] = df['patient_nbr'].map(lambda x: 1 if x in (global_patients_frequent) else 0)
df['global_frequency'] = df['patient_nbr'].map(lambda x: gpdf.loc[x].at['patient_nbr'] if x in (global_patients_frequent) else 1)
df['present_with_higher_id'] = df.apply(present_with_higher_id, axis=1)
#
# cleanup
#
columns_to_remove = [ 'race', 'gender', 'payer_code', 'med_top',
'medical_specialty', 'diag_1', 'diag_2', 'diag_3', 'max_glu_serum', 'A1Cresult',
'payer_code_cat', 'nateglinide', 'glimepiride', 'pioglitazone', 'rosiglitazone',
'chlorpropamide', 'acetohexamide', 'tolbutamide', 'acarbose', 'miglitol', 'troglitazone',
'tolazamide', 'examide', 'citoglipton', 'glyburide-metformin', 'glipizide-metformin',
'glimepiride-pioglitazone', 'metformin-rosiglitazone', 'metformin-pioglitazone',
'metformin' , 'repaglinide', 'nateglinide', 'glimepiride',
'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone',
]
for col_to_remove in columns_to_remove:
if col_to_remove in df: del df[col_to_remove]
# -
train = df[ df.readmitted != -1 ]
test = df[ df.readmitted == -1 ]
train.shape, test.shape
# # Prepare features
# +
black_list = ['readmitted', 'id', 'encounter_id', 'patient_nbr']
bool_features = train.select_dtypes(include=[np.bool]).columns.values.tolist()
cat_feats = [feat for feat in train.columns if 'cat' in feat]
numeric_features = train.select_dtypes(include=[np.float64, np.int64, np.int16, np.int8, np.uint8]).columns.values
numeric_features = [feat for feat in numeric_features if feat not in (black_list + cat_feats) ]
feats = bool_features + numeric_features + cat_feats
feats = [feat for feat in feats if feat not in (black_list)]
X = train[ feats ].values
y = train[ 'readmitted' ].values
print("Selected features: ", feats)
# -
# ### Cross validation
# +
cv = KFold(n_splits=3, shuffle=True, random_state=2018)
xgb_params = {
'n_jobs': 8,
'max_depth': 4,
'n_estimators': 600,
'learning_rate': 0.035,
'random_state': 2019,
}
scores = {
'x1_RMSLE' : []
}
def perform_scoring(m, y_true, y_pred):
score = rmsle(y[test_idx], y_pred)
print(m + " RMSLE: ", score)
scores[m + '_RMSLE'].append(score)
fold = 0
for train_idx, test_idx in cv.split(X):
fold += 1
print("fold: ", fold)
# first model
model = XGBClassifier(**xgb_params)
model.fit(X[train_idx], y[train_idx])
y_pred = model.predict(X[test_idx])
perform_scoring('x1', y[test_idx], y_pred)
for s in scores:
print(s, np.mean(scores[s]), np.std(scores[s]))
# -
# #### The result of the cross validation should be: x1_RMSLE 1.26014412708368 0.024035814289689532
# # Training of the final model
# +
#
# take the full dataset
#
XX = train[ feats ].values
yy = train[ 'readmitted' ].values
Xt = test[feats].values
#
# prepare the final model
#
model = XGBClassifier(**xgb_params)
# %time model.fit(XX, yy)
y_pred = model.predict(Xt)
# -
test['readmitted'] = y_pred
test['readmitted'].value_counts()
# additional adjustment
test['readmitted'] = test.apply(lambda x: 100 if x['present_with_higher_id'] == 1 else x['readmitted'], axis=1)
test['readmitted'] = test.apply(lambda x: 0 if x['readmission_not_possible'] == 1 else x['readmitted'], axis=1)
test['readmitted'].value_counts()
# # Save to the submission file
test[ ['id', 'readmitted'] ].to_csv('../output/predictions.csv', index=False)
| Diabetes readmission predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### setup
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '../src/')
from video_tools import *
from image_tools import *
from notebook_tools import *
# %load_ext autoreload
# %autoreload 1
# %aimport video_tools
# %aimport image_tools
# %aimport notebook_tools
# normal size plots
plt.rcParams['figure.dpi'] = 150
plt.rcParams['figure.figsize'] = (6,6)
video = '../test/Game_20210408T225110.avi'
# video = '../vid_out/Game_20210407T150943.avi'
frame = grab_frame(200, video)
mask = np.load('./manual_mask.npy')
# ### showcasing filters
# +
def identity(img):
return img
def mask_wrapper(callback):
return lambda img: callback(apply_mask(img, mask))
def blur_mask_wrapper(callback):
return lambda img: callback(apply_mask_blur(img, mask))
def merge_sobel_canny(img):
return merge(sobel_edge(img), canny_edge(img))
def find_and_draw_keypoints(img):
pts = find_keypoints(img)
return draw_pts(img.copy(), pts)
def mask_around_keypoints(img):
pts = find_keypoints(img)
masked_around = mask_patches_around_pts(img, pts)
return draw_pts(masked_around, pts)
def keypoints_on_just_y(img):
y = split_channels(to_ycrcb(img))[0]
rgb = to_3channels(y)
pts = find_keypoints(rgb)
return draw_pts(rgb, pts)
def white_keypoints(img):
pts = find_keypoints(img)
return pts_on_black(pts, img.shape)
def white_patches(img):
pts = find_keypoints(img)
return patches_around_pts_on_black(pts, img.shape)
# +
filters = [
identity,
reduce_colors,
canny_edge,
sobel_edge,
lambda img: merge(canny_edge(img), sobel_edge(img)),
lambda img: gradients(img, axis=0),
lambda img: threshold(gradients(img, axis=0), 10),
lambda img: gradients(img, axis=1),
lambda img: threshold(gradients(img, axis=1), 10),
lambda img: merge(gradients(img, axis=0), gradients(img, axis=1)),
find_and_draw_keypoints,
mask_around_keypoints,
white_keypoints,
white_patches,
lambda img: threshold(gradients(img, axis=1), 10) & threshold(gradients(img, axis=0), 10),
]
filters = [resize_wrapper(blur_mask_wrapper(f)) for f in filters]
filters.insert(0, identity) # plain first example, not resized or masked
# -
imshow(*[ f(frame) for f in filters ])
# +
# for f in filters:
# filter_playback(video, f)
# filter_playback(video, resize_wrapper(mask_wrapper(white_keypoints)))
filter_playback(video, resize_wrapper(mask_wrapper(find_and_draw_keypoints)))
# -
# ### showcasing many images
# +
plt.rcParams['figure.figsize'] = (20,10)
imshow(*[
apply_mask_blur(downscale_max_to_min_res(frame), mask)
for frame in range_of_frames(video, range(200,300,5))
])
# +
frames = [
apply_mask_blur(downscale_max_to_min_res(frame), mask)
for frame in range_of_frames(video, range(1000, 1020))
]
plt.rcParams['figure.figsize'] = (60,5)
plot_a_lot(frames, imshow_bgr2rgb, nrows=1)
# -
plt.rcParams['figure.figsize'] = (4,4) # back to normal for others
| notebooks/filters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark regression example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# -
import numpy as np
# +
from scipy.stats import norm
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, DoubleType
def rnorm(n,mean,sd):
"""
same functions as rnorm in r
r: rnorm(n, mean=0, sd=1)
py: rvs(loc=0, scale=1, size=1, random_state=None)
"""
return spark.sparkContext.parallelize(norm.rvs(loc=mean,scale=sd,size=n))
def dnorm(x,mean,sd, log=False):
"""
same functions as dnorm in r
dnorm(x, mean=0, sd=1, log=FALSE)
pdf(x, loc=0, scale=1)
"""
if log:
y = np.log(norm.pdf(x=x.collect(),loc=mean,scale=sd))
return spark.sparkContext.parallelize(y)
else:
y = norm.pdf(x=x.collect(),loc=mean,scale=sd)
return spark.sparkContext.parallelize(y)
def runif(n,min=0, max=1):
"""
r: runif(n, min = 0, max = 1)
py: random.uniform(low=0.0, high=1.0, size=None)
"""
return spark.sparkContext.parallelize(np.random.uniform(min,max,size=n))
def dnorm_np(x,mean,sd, log=False):
"""
same functions as dnorm in r
dnorm(x, mean=0, sd=1, log=FALSE)
pdf(x, loc=0, scale=1)
"""
if log:
return np.log(norm.pdf(x=x,loc=mean,scale=sd))
else:
return norm.pdf(x=x,loc=mean,scale=sd)
# -
import pyspark.sql.functions as F
rnorm(5,10,1).collect()
rnorm(5,10,1).sum()
s2 = 1
t2 = 10
mu = 5
n = 5
y = rnorm(n,10,1)
y.collect()
y.mean()
# mean of the normal posterior
mu_n = (y.mean()*n/s2 + mu/float(t2))/(n/float(s2)+1/float(t2))
mu_n
# +
# variance of the normal posterior
# t2.n<-1/(n/s2+1/t2)
t2_n = 1.0/(n/float(s2)+1.0/t2)
t2_n
# +
# defining the data
# y<-c(9.37, 10.18, 9.16, 11.60, 10.33)
y = spark.sparkContext.parallelize([9.37, 10.18, 9.16, 11.60, 10.33])
# -
type(y.collect())
mu_n = (y.mean()*n/s2 + mu/float(t2))/(n/float(s2)+1/float(t2))
mu_n
# +
####metropolis part####
##S = total num of simulations
# theta<-0 ; delta<-2 ; S<-10000 ; THETA<-NULL ; set.seed(1)
theta = 0
delta = 2
S = 10000
theta_v = []
# -
theta_star = norm.rvs(theta,np.sqrt(delta),1)
theta_star
dnorm(y,theta_star,np.sqrt(s2),log=True).sum()
dnorm(theta_star.rdd,mu,np.sqrt(t2),log=True).sum()
for s in range(S):
theta_star = norm.rvs(theta,np.sqrt(delta),1)
logr = (dnorm(y,theta_star,np.sqrt(s2),log=True).sum() +\
dnorm(theta_star.rdd,mu,np.sqrt(t2),log=True).sum())- \
(dnorm(y,theta,np.sqrt(s2),log=True).sum() + \
dnorm([theta.rdd],mu,np.sqrt(t2),log=True).sum())
#print(logr)
if np.log(runif(1))<logr:
theta = theta_star
#print(theta)
theta_v.append(theta)
theta_v[1:10]
# +
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 8))
plt.subplot(1, 2, 1)
plt.plot(theta_v,'b-.')
plt.subplot(1, 2, 2)
#bins = np.arange(0, S, 10)
plt.hist(theta_v, density=True,bins='auto')
x = np.linspace(min(theta_v),max(theta_v),100)
y = norm.pdf(x,mu_n,np.sqrt(t2_n))
plt.plot(x,y,'y-.')
plt.xlim(right=12) # adjust the right leaving left unchanged
plt.xlim(left=8) # adjust the left leaving right unchanged
plt.show()
# -
| doc/code/mcmc/MetropolisPySpark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import theano
import theano.tensor as T
# -
x1 = T.dscalar('x1')
y1 = T.dscalar('y1')
type(x1), type(y1)
z1 = x1 + y1
u1 = T.exp(z1)
from IPython.display import SVG
SVG(theano.printing.pydotprint(z1, return_image=True, format='svg'))
SVG(theano.printing.pydotprint(u1, return_image=True, format='svg'))
# +
X = T.dmatrix('X')
w = T.dvector('w')
b = T.dvector('b')
y = T.dot(X, w) + b
f1 = theano.function([X, w, b], y)
# -
iris = sns.load_dataset("iris")
f1(iris.iloc[:,:-1].values, [1, 2, 1, 2], iris.iloc[:, 1])
s1 = 1 / (1 + T.exp(-x1))
logistic = theano.function([x1], s1)
logistic(1)
SVG(theano.printing.pydotprint(s1, return_image=True, format='svg'))
# ### theano.In으로 default값 설정
x1, y1 = T.dscalars('x1', 'y1')
z1 = x1 + y1
f1 = theano.function([theano.In(x1, value=1), theano.In(y1, value=2)], z1)
f1()
# update
#
# 가중치 업데이트를 할 때 사용.
#
# 그래프로 사용이 불가하기 때문.
x1 = T.dscalar('x1')
y1 = T.dscalar('y1')
# +
from theano.tensor.shared_randomstreams import RandomStreams
srng = RandomStreams(0)
rv_u = srng.uniform()
rv_n = srng.normal((2,))
f_rv_u = theano.function([], rv_u)
g_rv_n = theano.function([], rv_n, no_default_updates=True)
w1 = theano.shared(0.0, name="w1")
update = theano.function([x1], y1, givens=[(y1, w1)], updates=[(w1, w1 + x1)])
# -
SVG(theano.printing.pydotprint(update, return_image=True, format='svg'))
w1.set_value(f_rv_u())
w1.get_value()
# +
# %matplotlib inline
from sklearn.datasets import load_iris
iris = load_iris()
idx = np.in1d(iris.target, [0, 2])
X_data = iris.data[idx, 0:2]
y_data = iris.target[idx] - 1
plt.figure(figsize=(12, 8), dpi=60)
plt.scatter(X_data[:, 0], X_data[:, 1], c=y_data, s=100, edgecolor='k')
plt.show()
# +
X = T.dmatrix('X')
y = T.dvector('y')
np.random.seed(0)
# w, b는 업데이트 해야되서 메모리값으로 넣어줌, +초깃값
w = theano.shared(0.001 * np.random.randn(2), name="w")
b = theano.shared(0.001 * np.random.randn(1)[0], name="b")
a = T.tanh(T.dot(X, w) + b) # y값 1,-1을 받는 하이퍼탄젠트
cost = T.sum(T.maximum(0, -y * a)) # 퍼셉트론 에러
# -
gw, gb = T.grad(cost, [w, b]) # 미분
eta = 0.0000001 # step size / mu 로 쓰기도 함
gradient = theano.function([X, y], cost, updates=[(w, w - eta * gw), (b, b - eta * gb)])
SVG(theano.printing.pydotprint(gradient, return_image=True, format='svg'))
for i in range(100):
c = gradient(X_data, y_data)
if i % 10 == 0:
print(c)
w.get_value()
b.get_value()
y_pred = T.sgn(a)
y_pred_func = theano.function([X], y_pred)
y_pred = y_pred_func(X_data)
y_pred
# +
from sklearn.metrics import confusion_matrix
confusion_matrix(y_data, y_pred)
| Past/DSS/Neural_Network/01.Theano.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Ray-Pi
#
# A Monte Carlo estimate of Pi in the spirit of Spark-Pi, using Ray with literate programming in Jupyter
# +
#pip install matplotlib
# -
# Ray library comes pre-installed on this notebook image:
import os
import random
import ray
import ray.util
from ray.util.client import ray as rayclient
# Connect to the Ray Server,
# Unless we are already connected
# +
headhost = os.environ['RAY_CLUSTER']
if not rayclient.is_connected():
ray.util.connect('{ray_head}:10001'.format(ray_head=headhost))
# -
# One easy way to estimate the value of pi is to generate some random points on the "unit square", or `[-1,1] X [-1,1]`, and then count the number of those points that landed inside the corresponding unit circle.
# In the illustration below, the points that landed in our circle are red:
#
# 
# The formula for our estimate is simply 4 times the number of points inside the unit circle, divided by the total number of points we created:
#
# 
# In Spark that computation typically looks something like this:
#
# ```python
# c = sc.parallelize(range(n*k), k) \
# .map(lambda _: (random.uniform(-1,1), random.uniform(-1,1))) \
# .filter(lambda p: p[0]*p[0] + p[1]*p[1] <= 1) \
# .count()
# pi = 4 * c / (n*k)
# ```
#
# You can see that we first generate some seed data and push it over the network to our Spark cluster,
# as the starting point of our computation.
#
# The core of our Ray-Pi implementation can be written in a single, equally short function.
# In Ray, we can supply our random points using a python generator, so that we do not need to use `ray.put` across the network.
# Instead we only send individual integer counts back over the network!
@ray.remote
def count_in_circle(n):
# define a generator for points on square
def random_points(n):
while n > 0:
yield (random.uniform(-1,1), random.uniform(-1,1))
n = n - 1
# count the number of those points that landed inside unit circle
return sum(1 for _ in (p for p in random_points(n) if p[0]*p[0] + p[1]*p[1] <= 1))
# Invoking our ray function returns a Ray compute node
c = count_in_circle.remote(1000)
c
# To estimate pi, we `ray.get` our count of points, and apply the formula above:
pi = 4 * ray.get(c) / 1000
print(f"pi= {pi}")
# We can compute our estimate at larger scale by sending replicated calls to our function to Ray
# and letting Ray schedule these replications in parallel:
# %%time
# the number of replications (or partitions):
k = 10
# the number of points to compute at each replication:
n = 1000000
x = [count_in_circle.remote(n) for _ in range(k)]
parts = ray.get(x)
c = sum(parts)
pi = 4 * c / (n * k)
print(f"pi= {pi}")
# Each partition is its own estimate of pi, and we can print these out easily:
parts = [4 * c / n for c in ray.get(x)]
parts
# We can plot our individual estimates and see how closely they agree with each other
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
n = 100000
x = [count_in_circle.remote(n) for _ in range(20)]
parts = [4 * c / n for c in ray.get(x)]
ax.bar(range(len(parts)),parts)
plt.show()
# The differences between each sample are hard to see in the plot above.
#
# To see better, we can narrow the range on our y-axis, and plot the true value of pi as a comparison:
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
n = 100000
x = [count_in_circle.remote(n) for _ in range(20)]
parts = [4 * c / n for c in ray.get(x)]
ax.bar(range(len(parts)),parts)
# zoom in on the y axis:
plt.ylim(3.12, 3.16)
# mark the true value of pi as a line:
plt.axhline(y=3.14159265, linestyle="--", color="red")
plt.show()
| source/ray-rdd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fourier Transforms in Python
#
# A fundamental skill for anyone working on signal/image related data is the ability to analyze the frequencies (and strength of thsoe frequencies making up a signal). There are a few assumptions that we have to consider before taking a Fourier Transform.
#
# 1. The underlying signal is periodic.
# 2. The integral overal the entire input space (from $-\infty$ to $\infty$) is finite.
#
# If you need a primer to remind you about Fourier Transforms, the [Wikipedia](https://en.wikipedia.org/wiki/Fourier_transform) and [Math World](https://mathworld.wolfram.com/FourierTransform.html) articles are a good place to start. The Fourier Transform and Inverse Fourier Transform are defined as
#
# \begin{align}
# H(\omega) &=
# \mathcal{F}\left[h(t)\right] &=
# \int_{-\infty}^{\infty} h(t) e^{-i \omega t} dt \\
# h(t) &=
# \mathcal{F}^{-1}\left[H(\omega)\right] &=
# \frac{1}{2\pi} \int_{-\infty}^{\infty} H(\omega) e^{i \omega t} dt \\
# \end{align}
#
# respectively.
#
# Now, when it comes to numerical programming and data analysis, we do not have a *continuous* signal to analyze (for which the equations above are derived). Instead, we have a *disccrete* signal for which we collect data at regular intervals. Therefore, we need likewise need a *discrete* Fourier Transform (DFT) which is defined as
#
# \begin{align}
# F_n &=
# \sum_{k=0}^{N-1} f_k e^{-2 \pi i n k / N} \\
# f_k &=
# \frac{1}{N} \sum_{n=0}^{N-1} F_n e^{2 \pi i n k / N} \\
# \end{align}
#
# where $f_k$ and $F_n$ are the signals in the two different domains, respectively (such as time and frequency domains).
#
# The final piece of information that we will need is the definition of the power spectrum which is what we will use to measure the strength of each given frequency. For the discreet transforms, the power spectrum is defined as
#
# \begin{equation}
# S = F_n^* F_n.
# \end{equation}
#
# Perhaps this will be more convenient to understand with an example. Let's dive right in.
#
# ## Imports
# +
# Python Imports
# 3rd Party Imports
import numpy as np
import pandas as pd
from scipy.signal import periodogram
from matplotlib import pyplot as plt
# -
# ## Fourier Transform Example
#
# ### Signal Creation
#
# Let's begin by creating a signal to analyze. I'll define the underlying signal as
#
# \begin{equation}
# x(t) = 5 \sin\left( 2 \pi f_1 t \right) + 7 \sin\left( 2 \pi f_2 t \right)
# \end{equation}
#
# where $f_1=2$ Hz and $f_2=5$ Hz. Again, since this is a *discrete* domain, we will also have to define the time step size which we will choose $\Delta t = 0.01$ s and we'll plot the underlying signal below.
# +
# Define the Variables
f1 = 2
f2 = 5
dt = 0.01
t = np.arange(0, 2, dt)
x = 5 * np.sin(2*np.pi*f1*t) + 7 * np.sin(2*np.pi*f2*t)
# Plot the Signal
_ = plt.plot(t, x, linewidth=2)
_ = plt.xlabel('Time (s)')
_ = plt.ylabel('Position (cm)')
_ = plt.title('Underlying Signal')
# -
# Now, to make this a little more realistic, let's add in some random Gaussian noise to this signal.
# +
# Get the Random Number Generator
rng = np.random.default_rng(0)
# Add the Random Numbers to the Signal
x += 4*rng.standard_normal(x.shape)
# Plot the Noisy Signal
_ = plt.plot(t, x, linewidth=2)
_ = plt.xlabel('Time (s)')
_ = plt.ylabel('Position (cm)')
_ = plt.title('Underlying Signal')
# -
# ### Signal Analysis
#
# At this point we are ready to start analyzing the signal. For this, we will use the Numpy Fast Fourier Transform (FFT) library.
# Get the Fourier Transform
xT = np.fft.rfft(x)
# Numpy provides several helper functions to parse through this data. We will use `rfftfreq` to get the frequencies of the transformed signal `xT`.
# Get the measured frequencies
f = np.fft.rfftfreq(x.size, dt)
# Now, if you attempted to plot this signal that has been transformed, you would receive a Numpy warning. This would arise due to the complex nature of the data. Due to the definition of the Fourier transform, the outputs are going to be, in general, complex. Therefore, we need a way to represent the overall magnitude of the transform. To do that, we will compute the square root of the power spectrum.
#
# Now, the [rfft](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html) and [rfftfreq](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfftfreq.html#numpy.fft.rfftfreq) have a few nuances that we have to consider.
#
# 1. The Fourier Transform is defined over all space (positive and negative frequencies), but each of these functions only returns values in the positive frequencies (i.e., half of the possible values). Therefore, we will have to multiply all of the non-zero frequencies by 2.
# 2. The DFT defined gets larger with the more data points we add. Therefore, we will have to divide the transformed signal by $N$ where is $N$ is the number of datapoints in $x$.
#
# +
# Get the Transform Magnitudes
xT[1:] *= 2 # Multiply the non-zero frequencies by 2.
magT = np.abs(xT/x.size) # Get the Magnitude of the scaled transform.
# Plot the
_ = plt.plot(f, magT)
_ = plt.title('Signal Magnitude')
_ = plt.ylabel('Magnitude (cm)')
_ = plt.xlabel('Frequency (Hz)')
# -
# Scipy provides a convenient functon that calculates the RMS Power Spectrum. Therefore, we can use this function to wrap all the steps above into a single function call. However, since this is the *RMS* Power Spectrum, we will have to multiply this by two and take the square root to get the magnitudes we seek.
# +
# Get the Power Spectrum
f, spec = periodogram(x, 1/dt, scaling='spectrum')
# Plot the Magnitudes
_ = plt.plot(f, np.sqrt(spec*2))
_ = plt.title('Signal Magnitude')
_ = plt.ylabel('Magnitude (cm)')
_ = plt.xlabel('Frequency (Hz)')
# -
# Note that the signal we originally created was of the form
#
# \begin{equation}
# x(t) = 5 \sin\left( 2 \pi f_1 t \right) + 7 \sin\left( 2 \pi f_2 t \right)
# \end{equation}
#
# where $f_1=2$ Hz and $f_2=5$ Hz. From the figure you can see that we recovered the frequencies and amplitudes that were used to create this signal. On both of the figures above, there is a peak approximately equal to 5 cm at $f=2$ Hz, and there is a peak approximately equal to 7 cm at $f=5$ Hz.
#
#
# ## Assignment
#
# Your assignment is to study the periodicity of the total number of sunspots. I have provided the data, input lines to read in the data and the lines needed to clean the data below. I downloaded this [data](http://www.sidc.be/silso/INFO/sndtotcsv.php) from the [Sunspot Index and Long-term Solar Observations Website](http://sidc.be/silso/home).
# +
# Read in the Values as a Numpy array
ssDat = pd.read_csv(
'SN_d_tot_V2.0.csv',
sep=';',
header=0,
names=['Year', 'Month', 'Day', 'YearFraction', 'nSpots', 'std', 'nObs', 'Prov'],
usecols=[3, 4],
skiprows=6
).values
# Indicate -1 as missing data
ssN = ssDat[:, 1]
ssN[ssN == -1] = np.NaN
ssDat[:, 1] = ssN
# Interpolate Missing Data
msk = np.isfinite(ssDat[:, 1])
ssDat[:, 1] = np.interp(ssDat[:, 0], ssDat[msk, 0], ssDat[msk, 1])
# Get the Data into the form used above
dt = np.diff(ssDat[:, 0]).mean()
t = ssDat[:, 0]
x = ssDat[:, 1]
# Plot the Data
_ = plt.plot(t, x, linewidth=1)
_ = plt.xlabel('Year')
_ = plt.ylabel('Number of Sunspots')
_ = plt.title('Sunspot Data')
# -
# ### Plot the Magnitude of the Fourier Transform
# +
# Get the Fourier Transform
xT = np.fft.rfft(x)
# Get the measured frequencies
f = np.fft.rfftfreq(x.size, dt)
# Get the Transform Magnitudes
xT[1:] *= 2 # Multiply the non-zero frequencies by 2.
magT = np.abs(xT/x.size) # Get the Magnitude of the scaled transform.
# Plot the
_ = plt.plot(f[:100], magT[:100])
_ = plt.title('Sunspot Spectral Analysis')
_ = plt.ylabel('Magnitude')
_ = plt.xlabel('Frequency (Yr$^{-1}$)')
# -
# ### Plot the Signal Magnitude using Scipy
# +
# Get the Power Spectrum
f, spec = periodogram(x, 1/dt, scaling='spectrum')
# Plot the Magnitudes
_ = plt.loglog(f[1:], np.sqrt(spec*2)[1:])
_ = plt.title('Signal Magnitude')
_ = plt.ylabel('Magnitude')
_ = plt.xlabel('Frequency (Yr$^{-1}$)')
# -
# In the cell below, insert the fundamental period (the inverse of the frequency with the highest magnitude) for the sunspot oscillations. If you are having a difficult time determining the correct frequency, you may want to plot a smaller window of data.
11
| 07-FourierTransforms/FourierTransforms-Complete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
df = pd.read_csv('QVI_data.csv')
df.head()
# ## Set metrics for the dataset
#
# +
# Set Month ID
df['DATE'] = pd.to_datetime(df['DATE'])
df.loc[:, 'MONTH_ID'] = df['DATE'].dt.year.astype('str') + df['DATE'].dt.month.apply(lambda x: str(x).zfill(2))
# Set $ per Unit
df.loc[:, 'price_per_u'] = df['TOT_SALES'] / df['PROD_QTY']
# -
trail_time = pd.to_datetime('20190201', format='%Y%m%d')
df_pre_trail = df[df['DATE'] < trail_time]
df_pre_trail
# +
feat_eng ={
'TOT_SALES' : [np.sum], # Monthly overall sales revenue by stores
'LYLTY_CARD_NBR' : [pd.Series.nunique], # Monthly number of customers
'TXN_ID' : [pd.Series.count], # Monthly number of transactions per customer - First step
'PROD_QTY' : [lambda x: round(np.mean(x),2)], # Chips Qty per Transcation
'price_per_u' : [lambda x: round(np.mean(x),1)] # Average Price per Unit
}
store_metrics = pd.DataFrame(df_pre_trail.groupby(['STORE_NBR','MONTH_ID']).agg(feat_eng))
store_metrics.reset_index(inplace=True)
store_metrics.drop('MONTH_ID',axis=1, inplace=True)
store_metrics = pd.DataFrame(store_metrics.groupby(['STORE_NBR']).agg(lambda x: round(np.mean(x),2)))
store_metrics.columns = [
'ALL_SALES',
'NUM_CUST',
'NUM_TRAN_CUST',
'QTY_TRANS',
'AVG_PRICE_U'
]
# Monthly number of transactions per customer - Second step
store_metrics['NUM_TRAN_CUST'] = (store_metrics['NUM_TRAN_CUST'] / store_metrics['NUM_CUST']).apply(lambda x: round(x,2))
store_metrics.reset_index(inplace=True)
store_metrics['STORE_NBR'] = store_metrics['STORE_NBR'].astype(str)
store_metrics
# -
# ## Find the control store for trail store 77, 86 and 88
# +
# Similarity between stores - Using KNN
train_x = store_metrics[['ALL_SALES','NUM_CUST']]
train_y = np.zeros(store_metrics.shape[0])
knn = KNeighborsClassifier(n_neighbors=1, algorithm='ball_tree', p=6)
knn.fit(train_x, train_y)
distance, similar_points = knn.kneighbors(
train_x, n_neighbors=2, return_distance=True
)
distance = distance.astype("int", copy=False)
similar_points = pd.DataFrame(similar_points)
similar_points.index = store_metrics['STORE_NBR']
control_store = similar_points.loc[['77','86','88'], 1]
for i in range(3):
print(f'The most similar store to Store {control_store.index.values[i]} is Store {store_metrics.iloc[control_store[i],0]}')
# -
# ## Check the trial store and control store before trail period
# ### Store 77
# +
# Visualize the trail stroe and control before trail period
store77 = store_metrics[(store_metrics['STORE_NBR'] == '77') |\
(store_metrics['STORE_NBR'] == f'{store_metrics.iloc[control_store[0],0]}')]
# For total dollars of sales
store77_bar = sns.barplot(data=store77, x='STORE_NBR', y='ALL_SALES')
plt.ylim(ymax= 300)
store77_bar.bar_label(store77_bar.containers[0])
plt.show()
#For monthly number of customers
store77_bar = sns.barplot(data=store77, x='STORE_NBR', y='NUM_CUST')
plt.ylim(ymax= 60)
store77_bar.bar_label(store77_bar.containers[0])
plt.show()
# -
# ### Store 86
# +
# Visualize the trail stroe and control before trail period
store86 = store_metrics[(store_metrics['STORE_NBR'] == '86') |\
(store_metrics['STORE_NBR'] == f'{store_metrics.iloc[control_store[1],0]}')]
# For total dollars of sales
store86_bar = sns.barplot(data=store86, x='STORE_NBR', y='ALL_SALES')
plt.ylim(ymax= 1000)
store86_bar.bar_label(store86_bar.containers[0])
plt.show()
#For monthly number of customers
store86_bar = sns.barplot(data=store86, x='STORE_NBR', y='NUM_CUST')
plt.ylim(ymax= 120)
store86_bar.bar_label(store86_bar.containers[0])
plt.show()
# -
# ### Store 88
# +
# Visualize the trail stroe and control before trail period
store88 = store_metrics[(store_metrics['STORE_NBR'] == '88') |\
(store_metrics['STORE_NBR'] == f'{store_metrics.iloc[control_store[2],0]}')]
# For total dollars of sales
store86_bar = sns.barplot(data=store88, x='STORE_NBR', y='ALL_SALES')
plt.ylim(ymax= 1600)
store86_bar.bar_label(store86_bar.containers[0])
plt.show()
#For monthly number of customers
store86_bar = sns.barplot(data=store88, x='STORE_NBR', y='NUM_CUST')
plt.ylim(ymax= 140)
store86_bar.bar_label(store86_bar.containers[0])
plt.show()
# -
| Quantium Data Analytics Project/Forage quantium project part 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="01f361ddc47e0b386595316fe3d7f4dabbd260db"
# # Dealing with Class Imbalance with SMOTE
#
# ### In this kernel, I will use a simple Deep Learning model and compare its performance on normal data and data augmented with SMOTE
#
# > Check https://arxiv.org/pdf/1106.1813.pdf
#
# I use SMOTE to add **sentence level** noise to our data.
#
# #### The model is the following one :
# * GloVe Embedding
# * Bidirectional GRU
# * MaxPool
# * Dense
# * Probably some Dropouts
#
#
# #### Feel free to give any feedback, it is always appreciated.
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np
import pandas as pd
import keras
import seaborn as sns
import matplotlib.pyplot as plt
from time import time
from collections import Counter
# + [markdown] _uuid="2837c162d484eca77c55913e8e1106643f33db34"
# ## How does SMOTE work ?
#
# > " The minority class is over-sampled by taking each minority class sample and introducing synthetic examples along the line segments joining any/all of the k minority class nearest neighbors "
#
# > " Synthetic samples are generated in the following way: Take the difference between the feature vector (sample) under consideration and its nearest neighbor. Multiply this difference by a random number between 0 and 1, and add it to the feature vector under consideration. This causes the selection of a random point along the line segment between two specific features. This approach effectively forces the decision region of the minority class to become more general. "
#
# I am using the class from imblearn, see https://imbalanced-learn.org/en/stable/generated/imblearn.over_sampling.SMOTE.html
# + _uuid="9342385261f03ece86a8aea59cb24c2e59387d30"
from imblearn.over_sampling import SMOTE
# -
sum([0.2, 0.6, 0.15, 0.05])
# + _uuid="cad772858c76e97b7e590088a99ab9be47ea7d7c"
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=600, n_features=4, n_informative=4, n_redundant=0, n_classes=4, weights=[0.2, 0.6, 0.15, 0.05], class_sep=0.95, random_state=0)
# + _uuid="bba6b36c5d42c0caef6771bda9f7afa2370050da"
plt.figure(figsize=(12, 8))
plt.title('Repartition before SMOTE')
plt.scatter(X[y==3][:, 0], X[y==3][:, 1], label='class 3')
plt.scatter(X[y==2][:, 0], X[y==2][:, 1], label='class 2')
plt.scatter(X[y==1][:, 0], X[y==1][:, 1], label='class 1')
plt.scatter(X[y==0][:, 0], X[y==0][:, 1], label='class 0')
plt.legend()
plt.grid(False)
plt.show()
# + _uuid="f4e7f65cd8ec7394585f5a6d87a5e7418b87b5a6"
smt = SMOTE()
X_smote, y_smote = smt.fit_resample(X, y)
# + _uuid="c3543fc2e288975b0592a14875c987b2ffc4d953"
plt.figure(figsize=(12, 8))
plt.title('Repartition after SMOTE')
plt.scatter(X_smote[y_smote==3][:, 0], X_smote[y_smote==3][:, 1], label='class 3')
plt.scatter(X_smote[y_smote==2][:, 0], X_smote[y_smote==2][:, 1], label='class 2')
plt.scatter(X_smote[y_smote==1][:, 0], X_smote[y_smote==1][:, 1], label='class 1')
plt.scatter(X_smote[y_smote==0][:, 0], X_smote[y_smote==0][:, 1], label='class 0')
plt.legend()
plt.grid(False)
plt.show()
# + [markdown] _uuid="63530632439404a85540565eb31c6390bddb33e9"
# ## Loading data
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
df = pd.read_csv("./data/Train_process.csv")
print("Number of texts: ", df.shape[0])
# -
df.head()
# + _uuid="96012bbe09075239c432cf48a204ca2a35f85c16"
df = df.sample(616)
# -
df.head()
# + [markdown] _uuid="a1eccf4f87c29c6216d3d6d4f971bffea1641e44"
# ## Class imbalance
# + _uuid="d7d54c0edc5228654daa5d1bbb3f7e17b509249c"
plt.figure(figsize = (10, 8))
sns.countplot(df['label'])
plt.show()
# + _uuid="9dc7a4b01b8118224a6346527007a786fa4d24e4"
print(Counter(df['label']))
# + [markdown] _uuid="2e42542a1bb153dd51f845a76d82b7fe29e78ab8"
# There is way more 0s than 1s in our dataset, data is very unbalanced and one should consider using oversampling or undersampling.
#
# I don't recommand undersampling in Kaggle competitions, because you want to have as much data as possible for your training.
# + [markdown] _uuid="b7c5a37c7151305c44764243c4e097b135b1afc2"
# ## Making Data for the network
# We apply the following steps :
# * Splitting
# * Tokenizing
# * Padding
# + _uuid="07b3163ffb71e5f94cd67075028dd75dac24cc3b"
max_len = 50
len_voc = 620
# + [markdown] _uuid="7b23b1f6cebdb65ad5ea2627a2958f5810c447f1"
# ### Train/Test split
# It is important to split before oversampling !
# + _uuid="324de03daf7579588961f5bc10aa91e2d65a420f"
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.5)
# + [markdown] _uuid="87fc62fa686f54ac3531eddfadedb639d15c0ffd"
# ### Tokenizing
# + _uuid="363b9ece53b8e1800d03b188a50ddf84d0cb12a5"
def make_tokenizer(texts, len_voc):
from keras.preprocessing.text import Tokenizer
t = Tokenizer(num_words=len_voc)
t.fit_on_texts(texts)
return t
# + _uuid="017eb151a1d1e927a491879bf068fabd0a7edc4d"
tokenizer = make_tokenizer(df['text'], len_voc)
# + _uuid="35dbfe75cd164f9623acce773ed217533a848414"
X_train = tokenizer.texts_to_sequences(df_train['text'])
X_test = tokenizer.texts_to_sequences(df_test['text'])
# + [markdown] _uuid="fbc447350c0ff1e2a8c736a4a1e9214ee0b53663"
# ### Padding
# + _uuid="cd157c268771f2ee711d7fd6b10314322b882ab6"
from keras.preprocessing.sequence import pad_sequences
X_train = pad_sequences(X_train, maxlen=max_len, padding='post', truncating='post')
X_test = pad_sequences(X_test, maxlen=max_len, padding='post', truncating='post')
# + [markdown] _uuid="8e82910797b3369bb821a1641e3bbeee9b015c2f"
# ### Targets
# + _uuid="7b14b15365d6e3ec8b86a8e4705754cbb3e78c5f"
y_train = df_train['label'].values
y_test = df_test['label'].values
# + [markdown] _uuid="fce47b803d2f5b9261cc5c4720ff502a1a3f9f8a"
# ### Embeddings
# + _uuid="be89625f116b26209f996d1b7ad1eb5d2ed8269f"
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
def load_embedding(file):
if file == './glove.6B.300d.txt':
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(file) if len(o)>100)
else:
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(file, encoding='latin'))
return embeddings_index
# + _uuid="1a0f98cd5cd6dcd656673e7fee48f3bc514cdf9f"
def make_embedding_matrix(embedding, tokenizer, len_voc):
all_embs = np.stack(embedding.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
embedding_matrix = np.random.normal(emb_mean, emb_std, (len_voc, embed_size))
for word, i in word_index.items():
if i >= len_voc:
continue
embedding_vector = embedding.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
# + _uuid="3effb5c72b4cecfe756d167d16f095dc548dff7e"
glove = load_embedding('./glove.6B.300d.txt')
# + _uuid="64a13c6fd58e00a17e0c9f6a517c5b1e7ba43422"
embed_mat = make_embedding_matrix(glove, tokenizer, len_voc)
# + _uuid="e044b352fb3310e16e8e666efbddf3ab93585a91"
X_train_emb = embed_mat[X_train]
X_test_emb = embed_mat[X_test]
# -
X_train_emb.shape
# + [markdown] _uuid="46e8882712c42236e854b6017933ca423381b712"
# ## Oversampling
# + _uuid="88ea22938e3f0ee22490172e2135b7e95ee3f1b1"
train_size, max_len, embed_size = X_train_emb.shape
X_train_emb_r = X_train_emb.reshape(train_size, max_len*embed_size)
# +
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(y_train)
y_train_ = le.transform(y_train)
# le.inverse_transform(y_) == train.label
# +
# SMOTE?
# + _uuid="cb0de2db258f119045b8ee1595a186bcbe29f15d"
# smt = SMOTE(sampling_strategy=0.2)
smote=SMOTE("not majority" )
X_smote, y_smote = smote.fit_sample(X_train_emb_r, y_train_)
# + _uuid="587161813116583afa38da043b43665ef0be27d9"
X_smote = X_smote.reshape((X_smote.shape[0], max_len, embed_size))
# + _uuid="e896da77df8b9df281a588134c7b341adaa90d6c"
plt.figure(figsize = (10, 8))
plt.subplot(1, 2, 1)
sns.countplot(y_train)
plt.title('Reparition before SMOTE')
plt.subplot(1, 2, 2)
sns.countplot(y_smote)
plt.title('Reparition after SMOTE')
plt.show()
# -
X_smote.shape
# + [markdown] _uuid="57533e8b1621dd25eff1196e9a183e2ef94a13bd"
# ## Now let us train a model
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from pymagnitude import *
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import log_loss
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from keras.preprocessing.text import Tokenizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
import ipdb
# -
X_smote.shape
# +
X = X_smote
y = y_smote
from sklearn.model_selection import KFold, StratifiedKFold
# kfold=KFold(n_splits=5)
kfold=StratifiedKFold(n_splits=5, random_state=None, shuffle=False)
losses = []
acc = []
precision = []
for i, (train_index, validate_index) in enumerate(kfold.split(X, y)):
#print("train index:", train_index, "validate index:", validate_index)
# train_data_i = torch.utils.data.Subset(train_data, train_index)
# validation_data_i = torch.utils.data.Subset(train_data, validate_index)
# ipdb.set_trace()
X_train = X[train_index]
y_train = y[train_index]
X_valid = X[validate_index]
y_valid = y[validate_index]
# models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0, solver='lbfgs'),
# SVC(kernel='rbf', C=9, degree=9, max_iter=2000, probability=True), #'linear', 'poly', 'rbf', 'sigmoid'
# ]
models = [
RandomForestClassifier(n_estimators=100, max_depth=100, random_state=0),
LinearSVC(fit_intercept=True, intercept_scaling=2),
# MultinomialNB(),
LogisticRegression(random_state=0),
SVC(kernel='linear', C=1.0, degree=15, max_iter=1000), #'linear', 'poly', 'rbf', 'sigmoid'
XGBClassifier(max_depth=2,
gamma=2,
eta=0.8,
reg_alpha=0.5,
reg_lambda=0.5),
SGDClassifier(loss = 'log', alpha = 1e-4,
n_jobs = -1, penalty = 'l2'),
KNeighborsClassifier(n_jobs = -1),
RandomForestClassifier(n_jobs = -1)]
clf = models[-4].fit(X_train, y_train)
# y_pred = clf.predict_proba(X_valid)
# y_pred = clf._predict_proba_lr(X_valid)
y_pred = clf.predict(X_valid)
# ipdb.set_trace()
# losses.append(log_loss(y_valid, y_pred))
acc.append(accuracy_score(y_valid, y_pred))
precision.append(precision_score(y_valid, y_pred, average='macro'))
# print(f"Loss epoch {i+1} : {log_loss(y_valid, y_pred)}")
print(f"Accuracy epoch {i+1} : {accuracy_score(y_valid, y_pred)}")
print(f"Accuracy precision {i+1} : {precision_score(y_valid, y_pred, average='macro')}")
# print("Number of Samples in Train: ",len(train))
# print("Number of Samples in Valid: ",len(validation))
# print(f"\n#############################\nAverage loss {np.mean(losses)}")
print(f"\n#############################\nAverage loss {np.mean(acc)}")
# -
# + [markdown] _uuid="2a5f324273d8e4726a6f0f9206170845d5ead890"
# ### Making model
# -
# + _uuid="14a2bfd9832b4f7f7fb89a8bd987f93e759f3cdb"
from keras.models import Model
# from keras.layers import Dense, Bidirectional, GlobalMaxPool1D, Input, Dropout
from tensorflow.compat.v1.keras.layers import CuDNNGRU, Dense, Bidirectional, GlobalMaxPool1D, Input, Dropout
from keras.optimizers import Adam
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# + _uuid="4584596a85f8380a0c25eea1e6364f672b114e25"
def make_model(max_len, len_voc=50000, embed_size=300):
inp = Input(shape=(max_len, 300))
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(inp)
x = GlobalMaxPool1D()(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# + _uuid="54d59f934f48046cca805f2d7684dba540e7eaa1"
model = make_model(max_len)
model_smote = make_model(max_len)
# + _uuid="032df52e96dda468647fc10a138c3005127a01b6"
model.summary()
# + [markdown] _uuid="d2fcaba4e1d4c7e02f21bad4d3eb7440d17090e5"
# ### Callbacks
# + _uuid="6d516d861a3a0c347401d688ca394b1333233c90"
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=2, verbose=1, min_lr=0.000001)
checkpoints = ModelCheckpoint('weights.hdf5', monitor="val_acc", mode="max", verbose=True, save_best_only=True)
reduce_lr_smote = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=2, verbose=1, min_lr=0.000001)
checkpoints_smote = ModelCheckpoint('smote_weights.hdf5', monitor="val_acc", mode="max", verbose=True, save_best_only=True)
# + [markdown] _uuid="80580a6745b44b8b63ab725672c8ffafbab57e75"
# ### Fitting
# + _uuid="8e1e47353ba3171ba1d798ee7c7c3e36577729c8"
model.fit(X_train_emb, y_train, batch_size=128, epochs=3, validation_data=[X_test_emb, y_test], callbacks=[checkpoints, reduce_lr])
# + _uuid="028cf6ef1b94a8ec4c5120d42d1fa6ae861205c8"
model_smote.fit(X_smote, y_smote, batch_size=128, epochs=3, validation_data=[X_test_emb, y_test], callbacks=[checkpoints_smote, reduce_lr_smote])
# + _uuid="42d8b19a31626ec522acd15575754f073d662d80"
model.load_weights('weights.hdf5')
model_smote.load_weights('smote_weights.hdf5')
# + [markdown] _uuid="2aebfe6e5889091b2d97f446373f5a1e2d4f015e"
# ### Predictions
# + _uuid="35bd2078e02117d7fca467cc7068be4e3ae41822"
pred_test = model.predict([X_test_emb], verbose=1)
pred_test_smote = model_smote.predict([X_test_emb], batch_size=256, verbose=1)
# + [markdown] _uuid="603d53b9394c43f2afefc6f46e0abee52d8755fd"
# ### Tweaking threshold
# + _uuid="217d8ac6f655b35c7c0c82bffcc76ba50fee51dd"
def tweak_threshold(pred, truth):
from sklearn.metrics import f1_score
scores = []
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
score = f1_score(truth, (pred>thresh).astype(int))
scores.append(score)
return round(np.max(scores), 4)
# + _uuid="77bdaceb60a5840c1561d8b9560ad7b29316f9b3"
print(f"Scored {tweak_threshold(pred_test, y_test)} without SMOTE (test data)")
# + _uuid="3347d752a8b671019b448d092234bd786fa06bfb"
print(f"Scored {tweak_threshold(pred_test_smote, y_test)} with SMOTE (test data)")
# + [markdown] _uuid="50698a4b9b2cf21d6370d8e88d90d265fb3af1fc"
# ## Conclusion
#
# It appears that SMOTE does not help improve the results. However, it makes the network learning faster.
#
# **Moreover, there is one big problem, this method is not compatible larger datasets.**
#
# You have to apply SMOTE on embedded sentences, which takes way too much memory.
#
# A solution is to use a generator for our training, which realizes oversampling on batches. I've tried it, but my generator was very slow.
#
# So I'm going to stick with these results for now, and try another data augmentation technique.
#
# If you have any improvement idea feel free to let me know.
#
# #### Thanks for reading !
#
| Competitions/Basic-Needs-Basic-Rights-Kenya---Tech4MentalHealth/Salomon/dealing-with-class-imbalance-with-smote.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NECESSARY IMPORTS
#NECESSARY IMPORTS
import json
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#To show all columns
pd.options.display.max_columns = None
# !pip install nltk
import nltk
#to import all functions from functions_t.ipnyb created file
# !pip install nbimporter
import nbimporter
from functions_t import *
# ## GETTING THE DATA
# ## BARCELONA
# +
OUTPUT_FILENAME = "./Data/barcelonian_data/BarcelonianProjects.json"
with open(OUTPUT_FILENAME, 'r', encoding='utf-8') as f:
list = json.load(f)
dataframe_BCN=pd.DataFrame.from_dict(list)
dataframe_BCN[:3]
# -
#ADD ID COLUMN (INDEX) and drop url columns
dataframe_BCN=dataframe_BCN.reset_index()
#dataframe_BCN=dataframe_BCN.drop(columns=["bcn_url", "official_url"])
# ## Text mining: analysing the description fields: getting the wordcloud, the length distribution and the most relevant words by using tf-idf
# +
dataframe_BCN['description_clean'] = dataframe_BCN.description.apply(lambda text: clean_text(text))
# Get the nltk.Text object
description_tokens = convert_to_nltk_text(dataframe_BCN, "description_clean")
description_std_tokens = standardize_text(description_tokens, "spanish")
dataframe_BCN['description_clean'] = dataframe_BCN.description_clean.apply(lambda text: standardize_text(text, "spanish"))
plt.figure(figsize = (8,5))
nltk.FreqDist(description_std_tokens).plot(50)
nltk.FreqDist(description_std_tokens).most_common(20)
description_std_tokens.concordance("educ")
plot_text_length_distribution(dataset = dataframe_BCN,
text_field = 'description_clean')
bag_of_words_description, stemmized_description=get_bagofwords(dataframe_BCN, "description", "spanish")
plot_wordcloud("BCN projects stemmed descriptions wordcloud", stemmized_description)
#sorted(stemmized_description.items(), key= lambda x: x[1], reverse=True)
# +
tfidfs = []
dataframe_BCN["description_resultwords"]=np.empty((len(dataframe_BCN), 0)).tolist()
document_size=len(dataframe_BCN)
# calculate tf
for index, row in dataframe_BCN.iterrows():
tfidf = TFIDF(row["description_clean"])
tfidf.tf()
tfidfs.append(tfidf)
# calculate idf
all_text = [tfidf.text for tfidf in tfidfs]
for i, tfidf in enumerate(tfidfs):
tfidf.idf(all_text, document_size)
tfidf.tf_idf()
result_df = tfidf.text_df.sort_values('tfidf', ascending=False).head(5) # sort and take top 5
result_words = []
for index, row in result_df.iterrows():
result_words.append((row.word, row.tfidf))
dataframe_BCN.loc[i, "description_resultwords"].append((row.word, row.tfidf))
# -
# ## DEDUCE if they require a phone
# +
phone=["dispositivo", "móvil", "aplicación", "app"]
dataframe_BCN["phone"]=False
phones=0
for index, row in dataframe_BCN.iterrows():
if any(x.lower() in row['description'].lower() for x in phone):
dataframe_BCN.loc[index,["phone"]]=True
phones=phones+1
print(phones, " out of ",len(dataframe_BCN), " projects mention phones.")
# -
# ### TOPICS
dataframe_BCN.topic.unique()
plot_pie(dataframe_BCN, "topic", "BCN projects topics")
# ### STATUS
plot_pie(dataframe_BCN, "status", "BCN projects status")
# ### ACTIVITIES
# +
#dataframe_BCN['activities'] = dataframe_BCN.activities.apply(lambda text: re.sub(".", "", text))
df_actividades_bcn = pd.DataFrame(columns=['act', 'num_times_appears', 'list_projects'])
for index, row in dataframe_BCN.iterrows():
dataframe_BCN.loc[index, "activities"]= dataframe_BCN.loc[index, "activities"].replace('.', '')
dataframe_BCN.loc[index, "activities"]= dataframe_BCN.loc[index, "activities"].replace('Programa los Barrios', 'Programa en los Barrios')
#clean= dataframe_BCN.loc[index, "activities"].replace('.', '')
for act in dataframe_BCN.loc[index, "activities"].split(", "):
if act not in df_actividades_bcn['act'].unique():
new_row={'act':act, 'num_times_appears':1, 'list_projects':[row["index"]]}
df_actividades_bcn= df_actividades_bcn.append(new_row, ignore_index=True)
else:
index = df_actividades_bcn.index[df_actividades_bcn['act'] == act]
df_actividades_bcn.loc[index,['num_times_appears']] = df_actividades_bcn[df_actividades_bcn['act'] == act].num_times_appears +1
df_actividades_bcn = df_actividades_bcn.sort_values(by=['num_times_appears'], ascending=False)
df_actividades_bcn
# -
#EXPORT TO EXCEL
with pd.ExcelWriter('dataframe_BCN.xlsx', options={'strings_to_urls': False}) as writer:
dataframe_BCN.to_excel(writer, 'dataframe_BCN')
dataframe_BCN[:3]
| scripts/analysing/Barcelonian_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bias on Wikipedia
#
# The aim of this experiment is to analyze the bias in Wikiepdia in data about countries, especially related to data about their politicians. We analyse both quantitatively where we compare the count of articles related to the country's population as well as the qualitatively where we compare the relative quality of the article as returned by the ORES service.
#
# # Data Acquisition
# The first source of our data is the Wikipedia data for politicians by their country and the Revision ID of the latest edit of the Wikipedia article on them. This data can be downloaded at https://figshare.com/articles/Untitled_Item/5513449
#
# We download this data and read the csv (page_data.csv) present in country/data folder from the downloaded ZIP. This code is the original given to us as part of the assignment from the course Data 512 as it does what we need perfectly.
# +
## getting the data from the CSV files and converting into a list
import csv
import pandas as pd
data = []
with open('page_data.csv', encoding='utf8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
data.append([row[0],row[1],row[2]])
# -
# The next source of our data is ORES API: https://ores.wikimedia.org/v3/scores/{project}/{revid}/{model}
# This API gives the quality of the Wikipedia article as shown here in this example.
# +
import requests
import json
#The endpoint for ORES API
endpoint = 'https://ores.wikimedia.org/v3/scores/{project}/{revid}/{model}'
headers = {'User-Agent' : 'https://github.com/SalikWarsi', 'From' : '<EMAIL>'}
params = {'project' : 'enwiki',
'model' : 'wp10',
'revid' : '797882120'
}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
print(json.dumps(response, indent=4, sort_keys=True))
# -
# We can query this API and parse the JSON response to get the predicted Quality Score of the revision edit from ORES as shown next in this example.
# +
endpoint = 'https://ores.wikimedia.org/v3/scores/{project}/{revid}/{model}'
headers = {'User-Agent' : 'https://github.com/SalikWarsi', 'From' : '<EMAIL>'}
params = {'project' : 'enwiki',
'model' : 'wp10',
'revid' : '797882120'
}
response = requests.get(endpoint.format(**params))
json_data = json.loads(response.text)
print(json_data['enwiki']['scores'][params['revid']]['wp10']['score']['prediction'])
# -
# We can see that we were able to extract the prediction as "Start"
#
# Now we read the CSV we read earlier (except first row which is the column titles) and for each of the Revision Ids in that table we query the ORES API and after parsing the JSON store the result (Quality) in a new list.
#
# Finally we combine all the data into a pandas data frame and write it to a csv file: **en-wikipedia_politician_article.csv**
#
# **Note: This takes hours to run so be careful running it.** The stored CSV file is there so we can directly start after this step. This is also why I put it under try-except as I was occaisonally getting an error. This error was due to 2 revision IDs (807367030 : Jalal Movaghar of Iran,807367166: Mohsen Movaghar of Iran) that returned that these revision IDs couldn't be identified by ORES. These IDs I have skipped.
# +
#Now we read each data from csv row (except the first which is the header)
#and query the ORES service for Quality score finally converting it into a csv
countries, politicians, revids, quality = [], [], [], []
#Reading each row from the csv file earlier read
#We start from row 1 not 0 as row 0 is the heading of the columns
for row in data[1:]:
try:
# We call the ORES API with the revision ID
params = {'project' : 'enwiki',
'model' : 'wp10',
'revid' : row[2]
}
response = requests.get(endpoint.format(**params))
json_data = json.loads(response.text)
#This step parses the JSON and extracts the score and adds it to the list
quality.append(json_data['enwiki']['scores'][params['revid']]['wp10']['score']['prediction'])
countries.append(row[0])
politicians.append(row[1])
revids.append(row[2])
rowcount = rowcount+1
except:
rowcount = rowcount+1
print("Exception Occured for rev_ID", row[2])
#Write to a file
#First convert to data frame
print("Writing to File")
dataFrame = pd.DataFrame([countries, politicians, revids, quality]).T
dataFrame.columns = ["politician", "country", "revid", "quality"]
#Write Data frame to csv
dataFrame.to_csv('en-wikipedia_politician_article.csv',index=False)
print("Written")
# -
# The Third data source is the Population data by Country from the Population Referece Bureau: http://www.prb.org/DataFinder/Topic/Rankings.aspx?ind=14
#
# It can be downloaded directly from that website using the Excel button on top right. This is stored as: **Population Mid-2015.csv**
# # Data Processing
# We now have 2 csv files that we will combine and clean. These are
# 1. en-wikipedia_politician_article.csv
# 2. Population Mid-2015.csv
# +
#Reading Politician article data
Politicians = pd.read_csv('en-wikipedia_politician_article.csv')
#Reading Population CSV. Here the first line is not part of the table so we skip it
# Also by defining the separartor and thousands we read the population as an integer
Population = pd.read_csv('Population Mid-2015.csv', header=1, sep= ",", thousands=',')
# Keeping only the columns we need: country and data (Population of the country in mid 2015)
Population = Population[['Location', 'Data']]
# We combine the 2 data sets
# We use an inner join as we only want those rows where we have data from both tables
Combined = Politicians.merge(Population, left_on='country', right_on='Location', how='inner')
# -
# Here we notice that some politicians are part of multiple countries. However it is possible that 2 different people have the same name. Therefore we use Revision ID of the wikipedia article as a unique identifier.
#
# We count number of rows per Revision ID and we only take those revision ids that have 1 row. Then we do an inner join to only include those which are unique
# +
# Counting rows per Revision ID
rowsPerPolitician = pd.DataFrame({'count' : Combined.groupby( ['revid'] ).size()}).reset_index()
#Only keeping those revision ID with 1 row only
uniquePoliticians = rowsPerPolitician[rowsPerPolitician['count'] == 1]
#Merging with Combined with an inner join to keep only unique politicians
Combined = Combined.merge(uniquePoliticians, left_on='revid', right_on='revid', how='inner')
# -
# Finally we extract only the columns we need and rename them before writing to a csv file: **PoliticiansArticleQualityWithCountryPopulation.csv**
# +
Combined = Combined[['country', 'politician', 'revid', 'quality', 'Data']]
Combined.columns = ['country', 'article_name', 'revision_id', 'article_quality', 'population']
Combined.to_csv('PoliticiansArticleQualityWithCountryPopulation.csv',index=False, encoding='utf8')
# -
# # Data Analysis
#
# The first thing we do is read the data into a panda dataframe
# +
#Importing Library
import pandas as pd
#Reading the csv
data = pd.read_csv('PoliticiansArticleQualityWithCountryPopulation.csv', encoding='utf8')
# -
# Now we try to find number of Politician Articles divided by the population of the country for each country.
# As earlier during data processing we have removed any article that exists more than once, by counting the number of rows in our data set per country we get number of Politician Articles for that country.
#
# Similarly we could find the mean population value per country as for each country we already have a unique population value.
# +
articlesPerCountry = pd.DataFrame({'articles' : data.groupby( ['country'] ).size(),
'population' : data.groupby( ['country'] )['population'].mean()}).reset_index()
# Find the ratio of Articles per Population
articlesPerCountry['PoliticianArticlesPerPopulation'] = articlesPerCountry['articles'] / articlesPerCountry['population']
# -
# We now sort the data according this calculated ratio
articlesPerCountry.sort_values(by='PoliticianArticlesPerPopulation', ascending=False, inplace=True)
# Now as its sorted in descending order we take the first 10 values as the top 10 ratios and plot a bar chart
# +
import matplotlib.pyplot as plt
# %pylab inline
pylab.rcParams['figure.figsize'] = (15, 8)
#Taking top 10 values
articlesPerCountryTop = articlesPerCountry[:10]
#Creating plot
articlesPerCountryTop.plot.bar(x='country', y = 'PoliticianArticlesPerPopulation',fontsize = 8)
plt.subplots_adjust(bottom=0.5)
plt.xlabel('Country') # Add a label to the x-axis
plt.ylabel('Politician Articles per Capita') # Add a label to the y-axis
plt.title('English Wikipedia Articles about politicians per capita (Top 10 Countries)') # Add a plot title
plt.savefig('Top10ArticlesPerCapita.png')
plt.show()
# -
# Similarly we create a barchart for the bottom 10 countries for that ratio and also show the sorted list
# +
articlesPerCountryBottom = articlesPerCountry[-10:]
articlesPerCountryBottom.plot.bar(x='country', y = 'PoliticianArticlesPerPopulation',fontsize = 8)
plt.subplots_adjust(bottom=0.5,left=0.2)
plt.xlabel('Country') # Add a label to the x-axis
plt.ylabel('Politician Articles per Capita') # Add a label to the y-axis
plt.title('English Wikipedia Articles about politicians per capita (Bottom 10 Countries)') # Add a plot title
plt.savefig('Bottom10ArticlesPerCapita.png')
plt.show()
# -
# ### Analysis
#
# We can see the highest ranked countries for those are some of the smallest countries in population while most of the countries at the bottom of this ranking have a high population. Even so countries like USA which have a very high population (3rd highest) still don't figure at the very bottom while some countries like Uzbekistan ( which has a realtively small population) still have a very low ratio. This could indicate bias against countries like Uzbekistan indicating that they might have lesser number of politicains listed in English Wikipedia than they should.
#
# However it is important to note that the size of a country's parliament (or other governing body) which might indicate number of famous politicians is not directly proportional to their population. For example Tuvalu's population is less than 12000 while that of India is more 1.2 billion. However, as seen in https://en.wikipedia.org/wiki/Parliament_of_Tuvalu while their parliament has 15 members, India's doesn't have 150,000 members of parliaments. This could indicate why smaller countries tend to have higher ratios.
# ## Quality Articles
#
# We then say that a article is high quality if it's quality rating is **FA** or **GA**.
#
# Then for each country we calculate how much proportion of the articles are highquality and we plot the top 10 and the bottom 10.
#
# We first create a new Variable in our data that denotes this articles is high quality
data['IsHighQuality'] = (data['article_quality'] == 'FA') | (data['article_quality'] == 'GA')
# We then calculate the proportion of articles which are high quality for each country and we sort by this proportion. We use population of the country to break ties.
qualityPerCountry = pd.DataFrame({'population' : data.groupby( ['country'] )['population'].mean(),
'HighQualityProportion' : data.groupby( ['country'] )['IsHighQuality'].mean()}).reset_index()
qualityPerCountry.sort_values(by=['HighQualityProportion', 'population'], ascending=[False,True], inplace=True)
# Now we take the top 10 and plot a bar chart
# +
qualityPerCountryTop = qualityPerCountry[:10]
qualityPerCountryTop.plot.bar(x='country', y = 'HighQualityProportion',fontsize = 8)
plt.subplots_adjust(bottom=0.5)
plt.xlabel('Country') # Add a label to the x-axis
plt.ylabel('Proportion of Hiqh Quality Articles') # Add a label to the y-axis
plt.title('Proportion of English Wikipedia Articles about politicians that are high quality (Top 10 Countries)') # Add a plot title
plt.savefig('Top10QualityArticleRatio.png')
plt.show()
# -
# Instead of showing a barchart, I observed that 39 countries have 0 High Quality articles about their politicians.
# These are listed here
bottomCountries = qualityPerCountry[qualityPerCountry['HighQualityProportion'] == 0 ]
print("The countries with 0 high quality articles are:")
print(bottomCountries['country'])
# ### Analysis
#
# Here we see that while for some countries almost 1/8th of the articles about their politicians have a high quality, for many others there are 0 articles that have a high quality. This indicates the imbalance in coverage in English Wikipedia articles and shows bias
#
# # Possible Issues
# There could be some issues in this analysis. As shown in https://figshare.com/articles/Untitled_Item/5513449 only 1 level of nesting was used to get Wikipedia articles from English Wikipedia. This might have led to us missing some articles and therefore corrupted our analysis.
#
# Also when joining using countries between the data sources some countries had minor difference in names, which meant that they were removed from the analysis. Also there are some countries in either data source, not present in the other.
| hcds-a2-bias_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import signal
from scipy.fft import fft, fftfreq
import pylab
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.metrics import mean_squared_error
# ## Import Data
# Available paths:
#
# EMG DATA:
# - subject1_open_5.xlsx XXX
# - subject1_smile_1.xlsx Spike in right ocul.
# - subject1_smile_4.xlsx Spike in right ocul.
# FAME DATA:
# - subject1_smile_1_landmarks.csv
# - subject1_smile_4_landmarks.csv
# ## Sensor Labels
# EMG 1 = a; left side of face
# EMG 2 = b; right side of face
# Ch a-d = 1-4; a-d, indicated by sensor color
path_emg = 'subject1_smile_1.xlsx'
path_kin = 'subject1_smile_1_landmarks.csv'
fs = 2000
fps = 100
# +
m1 = "Dep. anguli oris"
m2 = "Orbicularis Oris (l)"
m3 = "Zyg. Major"
m4 = "Orbiculis Oculi"
a1 = "L.Dep. anguli oris"
a2 = "L.Orbicularis Oris (l)"
a3 = "L.Zyg. Major"
a4 = "L.Orbiculis Oculi"
b1 = "R.Dep. anguli oris"
b2 = "R.Orbicularis Oris (l)"
b3 = "R.Zyg. major"
b4 = "R.Orbiculis Oculi"
l = "left"
r = "right"
# +
def import_emg(path_emg = path_emg):
df = pd.read_excel(path_emg)
return df
def import_kin(path_kin = path_kin):
kin = pd.read_csv(path_kin)
kin = kin.reindex(kin['Frame_number'].index[::-1])
return kin
# -
# ## Characteristics
# +
def print_char_emg(df, fs = fs):
t_t_emg = df['time'].iloc[-1]
N_samples = int(fs*t_t_emg)
char = {
"seconds": t_t_emg,
"N": N_samples,
"fs": fs
}
intro = "EMG data:"+"\n"+"%s seconds long with a total of %s samples at %s Hz."%(t_t_emg,N_samples,fs)
print(intro)
def print_char_kin(kin, fps = fps):
t_t_frames = kin['Frame_number'].iloc[-1]/100
N_frames = int(fps*t_t_frames)
char = {
"seconds": t_t_frames,
"N": N_frames,
"fps": fps
}
intro = "Kinematic data:"+"\n"+"%s seconds long with a total of %s frames at %s Hz."%(t_t_frames,N_frames,fps)
print(intro)
# -
# ## Call and Assign variables for EMG and convert to microV
def set_params_emg(df, mag = 6):
for i in range(1,5):
df[f'a{i}'] = df[f'a{i}']*(10**mag)
df[f'b{i}'] = df[f'b{i}']*(10**mag)
return df
# ## Plot Raw EMG Data
def plot_raw_emg(df):
for i in range(1,5):
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].plot(raw_emg['time'],raw_emg[f'a{i}'] , color='blue',label = '%s'%l)
ax[0].set_title("Raw Signal:" + "\n" + "%s"%f'm{i}')
ax[0].set_ylabel("Voltage (microV)")
ax[0].set_xlabel("time(s)")
ax[0].grid()
ax[0].autoscale()
leg = ax[0].legend()
ax[1].plot(raw_emg['time'],raw_emg[f'b{i}'] , color='green', label = '%s'%r)
ax[1].set_title("Raw Signal:" + "\n" + "%s"%f'm{i}')
ax[1].set_ylabel("Voltage (microV)")
ax[1].set_xlabel("time(s)")
ax[1].grid()
ax[1].autoscale()
leg = ax[1].legend()
# ## Design Bandpass
def bandpass_create(low_band = 3, high_band = 400, order_bp = 2, fs = fs):
z1, w1 = sp.signal.bessel(order_bp, [low_band,high_band], btype='bandpass',analog = False, output= 'ba', fs = fs)
return z1, w1, low_band, high_band
def bandpass_bode(z1 ,w1,low_band,high_band, order_bp = 2,fs = 2000):
sys_bp = signal.dlti(z1, w1, dt = 1/fs)
w_bp, mag_bp, phase_bp = signal.dbode(sys_bp, w = np.logspace(-2,3,100)*2*np.pi)
w_bp, mag_bp, phase_bp = sys_bp.bode(w = (np.logspace(-2,2.7,100)*2*np.pi)/(fs/2))
low_cut = low_band
high_cut = high_band
freq_bp = w_bp/(2*np.pi)
fig, ax = plt.subplots(2,1,figsize = (6,8))
ax[0].semilogx(freq_bp, mag_bp)
ax[0].set_title(r'Bode Diagram:'+ '\n' + '%snd Order %s and %s Hz BP'%(order_bp,low_band,high_band), fontsize=16)
ax[0].set_xlabel('Frequency (Hz)')
ax[0].set_ylabel('Magnitude (dB)')
#ax[0].autoscale()
ax[0].grid()
ax[1].semilogx(freq_bp, phase_bp)
ax[1].set_ylabel("Phase (degree)")
ax[1].set_xlabel("Freq (hz)")
#ax[1].autoscale()
ax[1].grid()
plt.show()
def bandpass_apply(df, z1, w1):
band = df
for i in range(1,5):
band[f'a{i}'] = sp.signal.filtfilt(z1, w1, band[f'a{i}'])
band[f'b{i}'] = sp.signal.filtfilt(z1, w1, band[f'b{i}'])
return band
def plot_bandpassed(df):
for i in range(1,5):
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].plot(df['time'],df[f'a{i}'] , color='blue',label = '%s'%l)
ax[0].set_title("Rectified Signal:" + "\n" + "%s"%f'm{i}')
ax[0].set_ylabel("Voltage (microV)")
ax[0].set_xlabel("time(s)")
ax[0].grid()
ax[0].autoscale()
leg = ax[0].legend()
ax[1].plot(df['time'],df[f'b{i}'] , color='green', label = '%s'%r)
ax[1].set_title("Rectified Signal:" + "\n" + "%s"%f'm{i}')
ax[1].set_ylabel("Voltage (microV)")
ax[1].set_xlabel("time(s)")
ax[1].grid()
ax[1].autoscale()
leg = ax[1].legend()
# ## Rectify signal
def rectify(df):
for i in range(1,5):
df[f'a{i}'] = np.abs(df[f'a{i}'])
df[f'b{i}'] = np.abs(df[f'b{i}'])
rectified = df
return rectified
def plot_rectified(df):
for i in range(1,5):
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].plot(df['time'],df[f'a{i}'] , color='blue',label = '%s'%l)
ax[0].set_title("Rectified Signal:" + "\n" + "%s"%f'm{i}')
ax[0].set_ylabel("Voltage (microV)")
ax[0].set_xlabel("time(s)")
ax[0].grid()
ax[0].autoscale()
leg = ax[0].legend()
ax[1].plot(df['time'],df[f'b{i}'] , color='green', label = '%s'%r)
ax[1].set_title("Rectified Signal:" + "\n" + "%s"%f'm{i}')
ax[1].set_ylabel("Voltage (microV)")
ax[1].set_xlabel("time(s)")
ax[1].grid()
ax[1].autoscale()
leg = ax[1].legend()
# ## Linear Envelope
def linear_envelope(df, window_size = 100):
window = np.ones(window_size)/float(window_size)
for i in range(1,5):
df[f'a{i}'] = np.sqrt(np.convolve(df[f'a{i}']**2,window,'same'))
df[f'b{i}'] = np.sqrt(np.convolve(df[f'b{i}']**2,window,'same'))
rms = df
return rms
def plot_rms(df):
for i in range(1,5):
fig, ax = plt.subplots(2,1,figsize = (8,6))
ax[0].plot(df['time'], df[f'a{i}'], label = '%s'%l)
ax[0].set_title(r'RMS:'+ '\n' + '%s'%(f'm{i}'), fontsize=16)
ax[0].set_xlabel('time(s)')
ax[0].set_ylabel('EMG (microV)')
ax[0].autoscale()
ax[0].grid()
leg = ax[0].legend()
ax[1].plot(df['time'], df[f'b{i}'],'g',label = '%s'%r)
ax[1].set_ylabel("V (microV)")
ax[1].set_xlabel("time (s)")
ax[1].autoscale()
ax[1].grid()
leg = ax[1].legend()
# ## Full EMG Processing
def process_emg(df):
for i in range(1,5):
df[f'a{i}'] = linear_envelope(rectify(bandpass_apply(df)))[f'a{i}']
df[f'b{i}'] = linear_envelope(rectify(bandpass_apply(df)))[f'b{i}']
output = df
return output
def plot_emg(df):
for i in range(1,5):
fig, ax = plt.subplots()
ax.plot(df['time'], df[f'a{i}'], label = '%s'%l)
ax.plot(df['time'], df[f'b{i}'],'g',label = '%s'%r)
ax.set_title(r'Left vs. Right RMS: Smile' +'\n'+'%s'%f'm{i}', fontsize=16)
ax.set_xlabel('time(s)')
ax.set_ylabel('Distance (px)')
ax.grid()
leg = ax.legend()
def compare_to_rect(df,rect, window_size = 100, fs = 2000):
window = np.ones(window_size)/float(window_size)
r = "Right"
l = "Left"
for idx in range(1,5):
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('time (s)')
ax1.set_ylabel('EMG (microV)')
ax1.plot(df['time'], df[f'a{idx}'],label = 'Filtered', color = 'r')
ax1.set_title('RMS vs. Raw EMG:' +'\n' + '%s'%f'a{i}')
ax1.grid()
leg1 = ax1.legend()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Mouth (px)') # we already handled the x-label with ax1
ax2.plot(df['time'], rect[f'a{idx}'],label = 'Rect', color = 'b', alpha = 0.2)
ax2.tick_params(axis='y', labelcolor='r')
leg2 = ax2.legend()
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
def plot_rect(df,rect):
df = process_emg(df)
for i in range(1,5):
fig, ax = plt.subplots()
ax.plot(df['time'], df[f'a{i}'], label = '%s'%'Final')
ax.plot(df['time'], rect[f'a{i}'],'g',label = '%s'%'Rect')
ax.set_title(r'Left vs. Right RMS: Smile' +'\n'+'%s'%f'm{i}', fontsize=16)
ax.set_xlabel('time(s)')
ax.set_ylabel('Distance (px)')
ax.grid()
leg = ax.legend()
# ## Power EMG
def emg_power(df,fs = 2000):
t_t_emg = df['time'].iloc[-1]
N_samples = int(fs*t_t_emg)
for i in range(1,5):
df[f'a{i}'] = (df[f'a{i}']**2)/(2*N_samples+1)
df[f'b{i}'] = (df[f'a{i}']**2)/(2*N_samples+1)
Power = df
return Power
# ## Introducing Kinematic Data - Low Pass
# +
def rawVert(df = raw_kin):
mouthFrame = df['Frame_number']
pos_top = df[['landmark_62_x', 'landmark_62_y']].to_numpy()
pos_bottom = df[['landmark_66_x', 'landmark_66_y']].to_numpy()
dispVertical = np.linalg.norm(pos_top - pos_bottom, axis=1)
return dispVertical
def rawHoriz(df = raw_kin):
mouthFrame = df['Frame_number']
pos_derecha = df[['landmark_51_x', 'landmark_51_y']].to_numpy()
pos_izquierda = df[['landmark_57_x', 'landmark_57_y']].to_numpy()
dispHorizontal = np.linalg.norm(pos_derecha - pos_izquierda, axis = 1)
return dispHorizontal
# -
def filter_kin_create(low_pass_frames = 20, fps = 100, lp_order = 2):
z3, w3 = sp.signal.bessel(lp_order, Wn = low_pass_frames/ (fps / 2), btype = 'lowpass')
return z3, w3
# + slideshow={"slide_type": "-"}
def filterHoriz(df,z3, w3):
# filt_distanceVertical = sp.signal.filtfilt(z3, w3, distanceVertical)
dispHoriz = sp.signal.filtfilt(z3, w3, df)
return dispHoriz
def filterVert(df,z3, w3):
# filt_distanceVertical = sp.signal.filtfilt(z3, w3, distanceVertical)
dispVert = sp.signal.filtfilt(z3, w3, df)
return dispVert
# -
def lowpass_bode(z3, w3, low_pass_frames = 20):
sys = signal.TransferFunction(z3, w3)
w, mag, phase = signal.bode(sys)
fig, ax = plt.subplots(2,1,figsize = (6,8))
ax[0].semilogx(w, mag)
ax[0].set_title(r'Bode Diagram:'+ '\n' + ' %s Hz 2nd Order Bessel LP'%low_pass_frames, fontsize=16)
ax[0].set_xlabel('Frequency (rad/s)')
ax[0].set_ylabel('Magnitude (dB)')
ax[0].autoscale()
ax[0].grid()
ax[1].semilogx(w, phase)
ax[1].set_ylabel("Phase (degree)")
ax[1].set_xlabel("Frequency (rad/s)")
ax[1].autoscale()
ax[1].grid()
plt.show()
def compare_kin(df,raw, window_size = 100, fps = 100, t_low = 1, t_high = 27):
fig, ax = plt.subplots()
ax.plot(mouthFrame/fps, raw,'--b',label = "Raw" )
ax.plot(mouthFrame/fps, df,'--r',label = "Filtered")
ax.set_title(r'Raw vs Filtered Vertical Distance: Smile ', fontsize=16)
ax.set_xlabel('time(s)')
ax.set_ylabel('Distance (px)')
ax.grid()
leg = ax.legend()
# ## Plotting RMS vs Landmarks (Mouth Horizontal)
def plot_emg_horiz(df,horiz,fps = 100):
for i in range(1,5):
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('time (s)')
ax1.set_ylabel('EMG (microV)')
ax1.plot(df['time'], df[f'a{i}'],label = '%s'%m3)
ax1.set_title('RMS vs. Mouth Horizontal distance:' +'\n' + '%s'%f'm{i}')
ax1.autoscale()
ax1.tick_params(axis='y')
ax1.grid()
leg1 = ax1.legend()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Mouth (px)') # we already handled the x-label with ax1
ax2.plot(mouthFrame/fps, horiz,'--o',label = 'Dist', color = 'r')
ax2.tick_params(axis='y', labelcolor='r')
leg2 = ax2.legend()
# z1, w1, low_band, high_band = bandpass_create()
# filtered = bandpass_apply(raw_emg)
# rectified = rectify(filtered)
# emg = linear_envelope(rectified)
# ## Plot Power, RMS, and filtered Kinematic Data: Whole period (Mouth Horizontal)
def plot_all(df,Power,kin):
for i in range(1,5):
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('time (s)')
ax1.set_ylabel('EMG (microV)')
ax1.plot(df['time'], df[f'a{i}'],label = 'RMS', color = 'g')
ax1.set_title('RMS vs. Mouth Horizontal distance:' +'\n' + '%s'%a3)
# ax1.tick_params(axis='y')
leg1 = ax1.legend()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Mouth (px)') # we already handled the x-label with ax1
ax2.plot(mouthFrame/fps, kin,'--o',label = 'Dist', color = 'r')
ax2.tick_params(axis='y', labelcolor='r')
leg2 = ax2.legend()
ax3 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax3.set_ylabel('Mouth (px)') # we already handled the x-label with ax1
ax3.plot(Power['time'], Power[f'a{i}'], label = 'Power', color = 'b',alpha = 0.3)
ax3.tick_params(axis='y', labelcolor='r')
ax1.grid()
leg3 = ax3.legend()
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
def find_thresholds(df):
threshold = 71
var1 = []
var2 = []
for i in range(len(df)):
if ((df[i] <= threshold) and (df[i-1] < threshold)):
var1.append(i)
if ((df[i-1] <= threshold) and (df[i] > threshold)):
var2.append(i)
return var1, var2
var1, var2 = find_thresholds(Horiz)
def vertical_displacement(df,var1,var2):
displacement = []
for i in range(len(var1)):
low_pos[i] = df[var1:var1+10].mean()
high_pos[i] = df[int((var2-var1)/2 + var1)-10:int((var2-var1)/2 + var1)+10].mean()
displacement.append(high_pos-low_pos)
return displacement
v = vertical_displacement(df = Vert,var1 = var1,var2 = var2)
# distanceVertical_average = pd.DataFrame({'Displacement': distanceVertical_average})
#
# Power_average = pd.DataFrame({'Power': Power_average})
#
# # the default behaviour is join='outer'
# # inner join
#
# result = pd.concat([Power_average, distanceVertical_average], axis=1, join='inner')
# display(result)
from scipy import sparse
from scipy.sparse.linalg import spsolve
def baseline_als_optimized(y, lam, p, niter=10):
L = len(y)
D = sparse.diags([1,-2,1],[0,-1,-2], shape=(L,L-2))
D = lam * D.dot(D.transpose()) # Precompute this term since it does not depend on `w`
w = np.ones(L)
W = sparse.spdiags(w, 0, L, L)
for i in range(niter):
W.setdiag(w) # Do not create a new matrix, just update diagonal values
Z = W + D
z = spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
z = baseline_als_optimized(Horiz,lam = 10**2,p = 0.01)
# %matplotlib qt
plt.plot(z)
plt.grid()
# ## WORKING
raw_emg = import_emg()
raw_kin = import_kin()
z1, w1, low_band, high_band = bandpass_create()
band = bandpass_apply(raw_emg, z1, w1)
plot_bandpassed(band)
print(band==import_emg)
| main-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reads multiple files
# This jupyter notebook shows how to use the **open_mfdataset** function of **GOES** package, to read and get the information from **multiple** GOES-16/17 files of **GLM sensor**.
# \
# Please, keep in mind that **open_mfdataset** is intended to open multiple files of GLM sensor, whereas [open_dataset](https://github.com/joaohenry23/GOES/blob/master/examples/v3.1/reads_files.ipynb) open just one file no matter if is a file from ABI or GLM sensor.
#
# Index:
# - [Reads multiple files](#reads_multiple_files)
# - [Gets attribute from file](#gets_attribute)
# - [Gets variable from file](#gets_variable)
# - [Gets dimension from file](#gets_dimension)
# <a id='reads_multiple_files'></a>
# ## Reads multiple files
# Sets path and name of file that will be read.
files = ['/home/joao/Downloads/OR_GLM-L2-LCFA_G16_s20200802030000_e20200802030200_c20200802030227.nc',
'/home/joao/Downloads/OR_GLM-L2-LCFA_G16_s20200802030200_e20200802030400_c20200802030430.nc',
'/home/joao/Downloads/OR_GLM-L2-LCFA_G16_s20200802030400_e20200802031000_c20200802031031.nc']
# Import the GOES package.
import GOES
# Reads files.
ds = GOES.open_mfdataset(files)
# Display the content of files.
print(ds)
# <a id='gets_attribute'></a>
# ## Gets attribute from files
# **Attribute** is a string parameters with information about the files. To get a concatenated attribute from multiple files, write the follow:
title = ds.attribute('title')
print(title)
orbital = ds.attribute('orbital_slot')
print(orbital)
resol = ds.attribute('spatial_resolution')
print(resol)
# <a id='gets_variable'></a>
# ## Gets variable from file
# **Variable** is a python class that content a parameter with theirs attributes. To get a concatenated variable from multiple files, write the follow:
ptime = ds.variable('product_time')
print(ptime)
# Print one attribute of parameter.
print(ptime.long_name)
# Print the parameter value:
print(ptime.data)
# **The information shown above is the result of concatenating the values of each file.** Use the position index to select one of them.
print(ptime.data[0])
# \
# **Getting other variable:**
flash_lon = ds.variable('flash_lon')
print(flash_lon)
# Print one attribute of parameter.
print(flash_lon.long_name)
# Print the parameter value:
print(flash_lon.data)
# **The information shown above is the result of concatenating the values of each file.**
# <a id='gets_dimension'></a>
# ## Gets dimension from file
# **Dimension** is a class with the spatial attributes of the variables. To get a concatenated dimension from multiple files, write the follow:
num_flashes = ds.dimension('number_of_flashes')
print(num_flashes)
# Theirs attributes are **name** and **size**.
print(num_flashes.name)
print(num_flashes.size)
| examples/v3.2/reads_multiple_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np #linear algebra
import pandas as pd # a data processing and CSV I/O library
import warnings # current version of seaborn generates a bunch of warnings that will be ignore
warnings.filterwarnings('ignore')
# Data Visualization
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set(style='white', color_codes=True)
# -
# load Iris Flower dataset
iris = pd.read_csv('Iris.csv')
iris.head(10)
iris['Species'].value_counts()
iris.plot(kind='scatter',x='SepalLengthCm', y='SepalWidthCm') # use this to make a scatterplot of the Iris features.
# A seaborn jointplot shows bivariate scatterplots and univariate histograms in the same figure
sns.jointplot(x='SepalLengthCm',y='SepalWidthCm', data=iris, size=5)
iris.shape
iris.info()
# use seaborn's FacetGrid to color the scatterplot by species
sns.FacetGrid(iris, hue = 'Species', size=5) \
.map(plt.scatter, 'SepalLengthCm','SepalWidthCm') \
.add_legend()
# We can look at an individual feature in Seaborn through a boxplot
sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
# One way we can extend this plot is adding a layer of individual points on top of
# it through Seaborn's striplot
#
# use jitter=True so that all the points don't fall in single vertical lines
# above the species
#
# Saving the resulting axes as ax each time causes the resulting plot to be shown
# on top of the previous axes
ax = sns.boxplot(data=iris, x = 'Species',y = 'PetalLengthCm')
ax = sns.stripplot(data=iris, x='Species', y='PetalLengthCm', jitter=True, edgecolor='green')
# A violin plot combines the benefits of the previous two plots and simplifies them
# Denser regions of the data are fatter, and sparser thiner in a violin plot
sns.violinplot(x='Species',y='PetalLengthCm', data=iris, size=6)
# +
# A useful seaborn plot for looking at univariate relations is the kdeplot,
# which creates and visualizes a kernel density estimate of the underlying feature
sns.FacetGrid(iris, hue="Species", size=6) \
.map(sns.kdeplot, "PetalLengthCm") \
.add_legend()
# -
iris.head()
sns.pairplot(iris.drop('Id', axis=1), hue='Species', size=3)
# Another useful seaborn plot is the pairplot, which shows the bivariate relation
# between each pair of features
#
# From the pairplot, we'll see that the Iris-setosa species is separataed from the other
# two across all feature combinations
# +
# The diagonal elements in a pairplot show the histogram by default
# We can update these elements to show other things, such as a kde
sns.pairplot(iris.drop('Id', axis=1), hue='Species', size=3, diag_kind='kde')
# +
# make a boxplot with Pandas on each feature split out by species
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12,6))
# -
# One cool more sophisticated technique pandas has available is called Andrews Curves
# Andrews Curves involve using attributes of samples as coefficients for Fourier series
# and then plotting these
from pandas.tools.plotting import andrews_curves
andrews_curves(iris.drop("Id", axis=1), "Species")
# Another multivariate visualization technique pandas has is parallel_coordinates
# Parallel coordinates plots each feature on a separate column & then draws lines
# connecting the features for each data sample
from pandas.tools.plotting import parallel_coordinates
parallel_coordinates(iris.drop("Id", axis=1), "Species")
# A final multivariate visualization technique pandas has is radviz
# Which puts each feature as a point on a 2D plane, and then simulates
# having each sample attached to those points through a spring weighted
# by the relative value for that feature
from pandas.tools.plotting import radviz
radviz(iris.drop("Id", axis=1), "Species")
sns.factorplot('SepalLengthCm', data=iris, hue='Species', kind='count' )
| iris data/github project completed/Machine-Learning-with-Iris-Dataset-master/Iris Species Dataset Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # Weather Data Analytics
# This notebook performs some basic weather data analytics using the PySpark RDD interface.
# ## Helper Methods
# First we need some helper methods for converting the raw data into something that we can work with. We decide to use Python dictionaries instead of classes, since custom classes cannot be used within Zeppelin due to serialization issues
# +
def _get_float(str):
"""
Helper method for converting a string to a float. If this is not possible, None will be returned instead
"""
if len(str) == 0:
return None
try:
return float(str)
except ValueError:
return None
def extract_station(line):
"""
Extract weather station data from a raw CSV line
"""
raw_columns = line.split(',')
columns = [c.replace('"', '') for c in raw_columns]
usaf = columns[0]
wban = columns[1]
name = columns[2]
country = columns[3]
state = columns[4]
icao = columns[5]
latitude = _get_float(columns[6])
longitude = _get_float(columns[7])
elevation = _get_float(columns[8])
date_begin = columns[9]
date_end = columns[10]
return {
'usaf': usaf,
'wban': wban,
'name': name,
'country': country,
'state': state,
'icao': icao,
'latitude': latitude,
'longitude': longitude,
'elevation': elevation,
'date_begin': date_begin,
'date_end': date_end,
}
def extract_weather(line):
"""
Extract weather data from a raw data line.
"""
date = line[15:23]
time = line[23:27]
usaf = line[4:10]
wban = line[10:15]
airTemperatureQuality = line[92] == '1'
airTemperature = float(line[87:92]) / 10
windSpeedQuality = line[69] == '1'
windSpeed = float(line[65:69]) / 10
return {
'date': date,
'time': time,
'usaf': usaf,
'wban': wban,
'airTemperatureQuality': airTemperatureQuality,
'airTemperature': airTemperature,
'windSpeedQuality': windSpeedQuality,
'windSpeed': windSpeed,
}
# -
# ## Test extraction methods
# +
# Load stations from 's3://dimajix-training/data/weather/isd-history'.
# Transform the data into Python dictionary using extract_station
stations = sc.textFile('s3://dimajix-training/data/weather/isd-history').map(
extract_station
)
# Print a couple of elements from the transformed RDD
for s in stations.take(5):
print(s)
# +
# Load weather from 's3://dimajix-training/data/weather/2014'.
# Transform the data into Python dictionary using extract_weather
weather = sc.textFile('s3://dimajix-training/data/weather/2014').map(extract_weather)
# Print a couple of elements from the transformed RDD
for w in weather.take(5):
print(w)
# -
# # Join Data Sets
#
# In order to analyse the data, we need to join the weather data with the station data, so we can get more detailed information where the weather actually was recorded.
# +
# Create a key for every weather station using the values for 'usaf' and 'wban' from every record.
# This can be done using the keyBy method.
station_index = stations.keyBy(lambda data: data['usaf'] + data['wban'])
# Create a key for every weather measurement element using the values for 'usaf' and 'wban' from every record.
# This can be done using the keyBy method.
weather_index = weather.keyBy(lambda data: data['usaf'] + data['wban'])
# Now join weather and stations together using the keyed data. This can be done using the join method
joined_weather = weather_index.join(station_index)
# Print some elements from joined_weather.
for d in joined_weather.take(5):
print(d)
# -
# ## Caching Data
#
# The join was really expensive. Before continuing you might want to cache the data and give it a nice name (for example "joined weather data") before continuing with the next steps.
# Cache the data for next operations
joined_weather.setName("joined weather data").cache()
# ## Create appropriate Keys
# We want to analyze the data grouped by country and year. So we need to create appropriate keys.
#
# This will be done using a helper methid extract_country_year_weather, which should return a tuple
#
# ((country, year), weather)
#
# for every record in joined_weather.
#
# Pay attention to the layout of the elements in joined_weather, as can been see from the output above
# +
def extract_country_year_weather(data):
# data is a nested tuple, so we first need to extract the weather and the station data
station = data[1][1]
weather = data[1][0]
# Now extract country from station
country = station['country']
# and the year from the weather measurement data
year = weather['date'][0:4]
return ((country, year), weather)
weather_per_country_and_year = joined_weather.map(extract_country_year_weather)
# -
# ## Perform Aggregation
# We want to extract minimum and maximum of wind speed and of temperature per year and country (i.e. using the joined data above). We also want to consider cases where data is not valid (i.e. windSpeedQuality is False or airTemperature is False).
#
# We will implement custom aggregation functions that work on dictionaries
# +
def nullsafe_min(a, b):
"""
Helper method for taking the min of two values. Also gracefully handles None values
"""
from builtins import min
if a is None:
return b
if b is None:
return a
return min(a, b)
def nullsafe_max(a, b):
"""
Helper method for taking the max of two values. Also gracefully handles None values
"""
from builtins import max
if a is None:
return b
if b is None:
return a
return max(a, b)
# Neutral value used in aggregation
zero_wmm = {
'minTemperature': None,
'maxTemperature': None,
'minWindSpeed': None,
'maxWindSpeed': None,
}
def reduce_wmm(wmm, data):
"""
Used for merging in a new weather data set into an existing WeatherMinMax object. The incoming
objects will not be modified, instead a new object will be returned.
:param wmm: A Python dictionary representing min/max information
:param data: A Python dictionary representring weather measurement information
:returns: A new Python dictionary representing min/max information
"""
if data['airTemperatureQuality']:
minTemperature = nullsafe_min(wmm['minTemperature'], data['airTemperature'])
maxTemperature = nullsafe_max(wmm['maxTemperature'], data['airTemperature'])
else:
minTemperature = wmm['minTemperature']
maxTemperature = wmm['maxTemperature']
if data['windSpeedQuality']:
minWindSpeed = nullsafe_min(wmm['minWindSpeed'], data['windSpeed'])
maxWindSpeed = nullsafe_max(wmm['maxWindSpeed'], data['windSpeed'])
else:
minWindSpeed = wmm['minWindSpeed']
maxWindSpeed = wmm['maxWindSpeed']
return {
'minTemperature': minTemperature,
'maxTemperature': maxTemperature,
'minWindSpeed': minWindSpeed,
'maxWindSpeed': maxWindSpeed,
}
def combine_wmm(left, right):
"""
Used for combining two WeatherMinMax objects into a new WeatherMinMax object
:param self: First Python dictionary representing min/max information
:param other: Second Python dictionary representing min/max information
:returns: A new Python dictionary representing combined min/max information
"""
minTemperature = nullsafe_min(left['minTemperature'], right['minTemperature'])
maxTemperature = nullsafe_max(left['maxTemperature'], right['maxTemperature'])
minWindSpeed = nullsafe_min(left['minWindSpeed'], right['minWindSpeed'])
maxWindSpeed = nullsafe_max(left['maxWindSpeed'], right['maxWindSpeed'])
return {
'minTemperature': minTemperature,
'maxTemperature': maxTemperature,
'minWindSpeed': minWindSpeed,
'maxWindSpeed': maxWindSpeed,
}
# +
# Aggregate min/max information per year and country
weather_minmax = weather_per_country_and_year.aggregateByKey(
zero_wmm, reduce_wmm, combine_wmm
)
for m in weather_minmax.take(5):
print(m)
# -
# # Format Output
#
# We want to create CSV data, so we need to reformat the Python dicts to nicely looking strings
# +
def format_result(row):
# Every row contains the key and the data.
# key is (country, year)
# value is Python dictionary containing min/max information
(k, v) = row
country = k[0]
year = k[1]
minT = v['minTemperature'] or 0.0
maxT = v['maxTemperature'] or 0.0
minW = v['minWindSpeed'] or 0.0
maxW = v['maxWindSpeed'] or 0.0
# Create a CSV line containing 'country,year,minTemperature,maxTemperature,minWindSpeed,maxWindSpeed'
line = "%s,%s,%f,%f,%f,%f" % (country, year, minT, maxT, minW, maxW)
# Encode as UTF-8, or we might experience some problems
return line.encode('utf-8')
result = weather_minmax.map(format_result).collect()
for l in result:
print(l)
# -
# # Bonus: Process all Years
# +
def load_year(year):
dirname = 'data/weather/%d' % year
return sc.textFile(dirname).map(extract_weather)
years_data = [load_year(year) for year in range(2004, 2015)]
all_weather_data = sc.union(years_data)
weather_index = all_weather_data.keyBy(lambda data: data['usaf'] + data['wban'])
# Now join weather and stations together using the keyed data. This can be done using the join method
joined_weather = weather_index.join(station_index)
weather_per_country_and_year = joined_weather.map(extract_country_year_weather)
weather_minmax = weather_per_country_and_year.aggregateByKey(
zero_wmm, reduce_wmm, combine_wmm
)
result = weather_minmax.map(format_result).collect()
for l in result:
print(l)
# -
| spark-training/spark-python/jupyter-weather-rdd/Weather Analysis Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
import pickle
import sklearn
# +
haar=cv2.CascadeClassifier('D:/data science/Module-2/model/haarcascade_frontalface_default.xml')
model_svm=pickle.load(open('D:/data science/Module-2/data/model_svm.pickle','rb'))
model_pca=pickle.load(open('D:/data science/Module-2/data/pca_50.pickle','rb'))
# -
data1=np.load('D:/data science/Module-2/data/data_10000_norm.npz')
data1.files
d=data1['arr_0']
mean1=d.mean
mean=pickle.load(open('D:/data science/Module-2/model/mean_preprocess.pickle','rb'))
gender_pre=['Male','Female']
font=cv2.FONT_HERSHEY_SIMPLEX
def pipeline_model(img,color='bgr'):
#convert to grayscale
if color=='bgr':
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
else:
gray=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#step 3 crop using haar classifier
faces=haar.detectMultiScale(gray,1.5,3)
for x,y,w,h in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
roi=gray[y:y+h,x:x+w]
#step4 Normalization(0-1)
roi=roi/255.0
#step5:Resize Image(100,100)
if roi.shape[1]>100:
roi_resize=cv2.resize(roi,(100,100),cv2.INTER_AREA)
else:
roi_resize=cv2.resize(roi,(100,100),cv2.INTER_CUBIC)
#step6 Flatenning(1x10,000)
roi_reshape=roi_resize.reshape(1,10000)
#step7 subtract with mean
roi_mean=roi_reshape-mean
#step 8 get eigen image
eigen_image=model_pca.transform(roi_mean)
#step 9 pass to ml model
results=model_svm.predict_proba(eigen_image)[0]
#step10
predict=results.argmax()
score=results[predict]
#step 11
text='%s:%0.2f'%(gender_pre[predict],score)
cv2.putText(img,text,(x,y),font,1,(0,255,0),2)
return img
from PIL import Image
#test data
test_data=('D:/data science/Module-2/data/male_000281.jpg')
color='bgr'
#step1 read image
img=Image.open(test_data)
#convert into array
img=np.array(img)
#pass into pipeline model
img=pipeline_model(img)
plt.imshow(img)
# +
cap = cv2.VideoCapture('D:/data science/Module-2/data/video.mp4')
while True:
ret, frame = cap.read() # bgr
if ret == False:
break
frame = pipeline_model(frame,color='bgr')
cv2.imshow('Gender Detector',frame)
if cv2.waitKey(10) == ord('s'): # press s to exit --#esc key (27),
break
cv2.destroyAllWindows()
cap.release()
# -
| Machine Learning Model/Pipeline Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: ipykernel_py2
# ---
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
sec_data = pd.read_csv('D:/Python/PG_BEI.DE_2007_2017.csv', index_col='Date')
sec_data.tail()
sec_returns = np.log(sec_data / sec_data.shift(1))
sec_returns
# ## PG
sec_returns['PG'].mean()
sec_returns['PG'].mean() * 250
sec_returns['PG'].std()
sec_returns['PG'].std() * 250 ** 0.5
# ## Beiersdorf
sec_returns['BEI.DE'].mean()
sec_returns['BEI.DE'].mean() * 250
sec_returns['BEI.DE'].std()
sec_returns['BEI.DE'].std() * 250 ** 0.5
# ***
print sec_returns['PG'].mean() * 250
print sec_returns['BEI.DE'].mean() * 250
sec_returns['PG', 'BEI.DE'].mean() * 250
sec_returns[['PG', 'BEI.DE']].mean() * 250
sec_returns[['PG', 'BEI.DE']].std() * 250 ** 0.5
# ## Covariance and Correlation
#
# \begin{eqnarray*}
# Covariance Matrix: \ \
# \Sigma = \begin{bmatrix}
# \sigma_{1}^2 \ \sigma_{12} \ \dots \ \sigma_{1I} \\
# \sigma_{21} \ \sigma_{2}^2 \ \dots \ \sigma_{2I} \\
# \vdots \ \vdots \ \ddots \ \vdots \\
# \sigma_{I1} \ \sigma_{I2} \ \dots \ \sigma_{I}^2
# \end{bmatrix}
# \end{eqnarray*}
PG_var = sec_returns['PG'].var()
PG_var
BEI_var = sec_returns['BEI.DE'].var()
BEI_var
PG_var_a = sec_returns['PG'].var() * 250
PG_var_a
BEI_var_a = sec_returns['BEI.DE'].var() * 250
BEI_var_a
# ***
cov_matrix = sec_returns.cov()
cov_matrix
cov_matrix_a = sec_returns.cov() * 250
cov_matrix_a
# ***
corr_matrix = sec_returns.corr()
corr_matrix
# ## Calculating Portfolio Risk
# Equal weigthing scheme:
weights = np.array([0.5, 0.5])
# Portfolio Variance:
pfolio_var = np.dot(weights.T, np.dot(sec_returns.cov() * 250, weights))
pfolio_var
# Portfolio Volatility:
pfolio_vol = (np.dot(weights.T, np.dot(sec_returns.cov() * 250, weights))) ** 0.5
pfolio_vol
print str(round(pfolio_vol, 5) * 100) + ' %'
# ## Calculating Diversifiable and Non-Diversifiable Risk of a Portfolio
weights = np.array([0.5, 0.5])
weights[0]
weights[1]
# ***
# Diversifiable Risk:
PG_var_a = sec_returns[['PG']].var() * 250
PG_var_a
BEI_var_a = sec_returns[['BEI.DE']].var() * 250
BEI_var_a
dr = pfolio_var - (weights[0] ** 2 * PG_var_a) - (weights[1] ** 2 * BEI_var_a)
dr
float(PG_var_a)
PG_var_a = sec_returns['PG'].var() * 250
PG_var_a
BEI_var_a = sec_returns['BEI.DE'].var() * 250
BEI_var_a
dr = pfolio_var - (weights[0] ** 2 * PG_var_a) - (weights[1] ** 2 * BEI_var_a)
dr
print str(round(dr*100, 3)) + ' %'
# Non-Diversifiable Risk:
n_dr_1 = pfolio_var - dr
n_dr_1
n_dr_2 = (weights[0] ** 2 * PG_var_a) + (weights[1] ** 2 * BEI_var_a)
n_dr_2
n_dr_1 == n_dr_2
| Python for Finance - Code Files/79 Diversifiable and Non-Diversifiable Risk/CSV/Python 2 CSV/Diversifiable and Non-Diversifiable Risk of a Portfolio - Lecture_CSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from nlabpy.parse.seq import parse_fasta
# ## Download and extract `hg19` assembly
# ls -lah ../ref
# + language="bash"
#
# wget ftp://igenome:<EMAIL>/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz \
# --directory-prefix=../ref
# tar -xzvf ../ref/Homo_sapiens_UCSC_hg19.tar.gz -C ../ref
# rm ../ref/Homo_sapiens_UCSC_hg19.tar.gz
# -
# ## Build `GTF` dataframe from the `lncRNA` annotation file
def gtf_df(filename):
res = []
with open(filename, 'rt') as fi:
for line in fi:
fields = line.strip().split('\t')
if fields[2] == 'exon':
rec = {}
idfields = fields[8].strip().split(';')
for idfield in idfields:
if idfield:
key, val = idfield.split()
if key == 'transcript_id' or key == 'exon_number':
rec.update({key: val.strip('"')})
rec.update({'chr': fields[0],
'start': int(fields[3]),
'end': int(fields[4])})
res.append(rec)
return pd.DataFrame.from_records(res)
gtf = gtf_df('../ref/lncRNA.gtf')
gtf
# ## Extract the sequence of the locus annotated in `lncRNA.gtf` plus 500 bp on each side
# +
parser = parse_fasta('../ref/Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr2.fa')
_, chr2 = next(parser)
def get_seqs(rec):
return chr2[rec.start:rec.end]
gtf['sequence'] = gtf.apply(get_seqs, axis=1)
gtf['seq_length'] = gtf['end'] - gtf['start']
gtf
# +
fa_tpl = '>{}'
with open('../ref/ref_locus.fa', 'wt') as fo:
header = fa_tpl.format('lnrCXCR4')
fo.write('{}\n{}\n'.format(header, chr2[gtf.start.min()-500:gtf.end.max()+500]))
# !head ../ref/ref_locus.fa
# -
# ## Build `bowtie2` index for the locus reference
# + language="bash"
#
# bowtie2-build ../ref/ref_locus.fa ../ref/lncRNA_locus
# -
| notebooks/00 - Build reference.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia (8 threads) 1.7.3
# language: julia
# name: julia-(8-threads)-1.7
# ---
# # 2D Hydro simulation with Volume penalization method
# This notebook aims to show the workflow of setting up aπ Hydro simulation with Volume penalization method in the cylindrical coordinates. ([Morales et al. 2012](https://www.sciencedirect.com/science/article/pii/S002199911400401X))
#
# We pick the classical Taylor Couette experiment in low Re $(Re\sim 1)$ with the comparsion between the analytical and numerical result.
using MHDFlows,PyPlot,CUDA
using LinearAlgebra: mul!, ldiv!
device()
#parameters
N = 128;
Nz= 4;
Lx = 2π;
ν,η = 1,1;
dt = 2e-4;
# Testing the problem
nothingfunction(args...) = nothing;
CPUprob = Problem(CPU();
# Numerical parameters
nx = N,
Lx = 2π,
ny = N,
nz = Nz,
# Drag and/or hyper-viscosity for velocity/B-field
ν = ν,
nν = 1,
η = η,
# VP method
VP_method = true,
# Timestepper and equation options
dt = dt,
stepper = "RK4",
# Force Driving parameters
calcF = nothingfunction,
# Float type and dealiasing
T = Float32)
function Taylor_Couette_Cylindrical_Mask_Function(grid;R₂=0.82π,R₁=0.32π)
nx,ny,nz = grid.nx,grid.ny,grid.nz;
x,y,z = grid.x,grid.y,grid.z;
S = BitArray(undef, nx,ny,nz);
for k ∈ 1:nz, j ∈ 1:ny,i ∈ 1:nx
xᵢ,yᵢ,zᵢ = x[i],y[j],z[k];
Rᵢ = √(xᵢ^2+yᵢ^2);
# S = 0 if inside fluid domain while S = 1 in the solid domain
S[i,j,k] = (R₂ >= Rᵢ >= R₁) ? 0 : 1;
end
S
end
function ProblemGeneratorTC!(prob;L0=2π,T=Float32)
# Output Setting
x = Array(prob.grid.x);
y = Array(prob.grid.y);
z = Array(prob.grid.z);
nx,ny,nz = prob.grid.nx,prob.grid.ny,prob.grid.nz;
ux,uy,uz = zeros(T,nx,ny,nz),zeros(T,nx,ny,nz),zeros(T,nx,ny,nz);
Ux,Uy,Uz = zeros(T,nx,ny,nz),zeros(T,nx,ny,nz),zeros(T,nx,ny,nz);
V₀ = 1;
r₀ = 0.32π;
# Setup: Uθ = 1 if r ∈ 0.32π
# Uθ = r(dθ/dt) ê_θ
# ̂e_θ = - sinθ ̂i + cosθ ̂j;
prob.vars
χ = Taylor_Couette_Cylindrical_Mask_Function(prob.grid;R₂=0.82π,R₁=r₀)
for k ∈ 1:nz,j ∈ 1:ny,i ∈ 1:nx
r = sqrt(x[i]^2+y[j]^2);
θ = atan(y[j],x[i]) ;
θ = isnan(θ) ? π/2 : θ
sinθ = sin(θ);
cosθ = cos(θ);
#sinθ = θ < 0 ? sin(-θ) : sin(θ)
if r <= r₀
Ux[i,j,k] = -sinθ*r/r₀
Uy[i,j,k] = cosθ*r/r₀
end
end
copyto!(prob.vars.ux, deepcopy(ux));
copyto!(prob.vars.uy, deepcopy(uy));
copyto!(prob.vars.uz, deepcopy(uz));
copyto!(prob.params.χ, χ);
copyto!(prob.params.U₀x, Ux);
copyto!(prob.params.U₀y, Uy);
copyto!(prob.params.U₀z, Uz);
#Update V + B Fourier Conponment
uxh = prob.sol[:, :, :, prob.params.ux_ind];
uyh = prob.sol[:, :, :, prob.params.uy_ind];
uzh = prob.sol[:, :, :, prob.params.uz_ind];
mul!(uxh, prob.grid.rfftplan, prob.vars.ux);
mul!(uyh, prob.grid.rfftplan, prob.vars.uy);
mul!(uzh, prob.grid.rfftplan, prob.vars.uz);
prob.sol[:, :, :, prob.params.ux_ind] .= uxh;
prob.sol[:, :, :, prob.params.uy_ind] .= uyh;
prob.sol[:, :, :, prob.params.uz_ind] .= uzh;
return nothing
end
# Setting up the Initial condition for both domain
ProblemGeneratorTC!(CPUprob);
Ux,Uy = CPUprob.params.U₀x,CPUprob.params.U₀y;
Ur,Uθ = xy_to_polar(Ux,Uy);
# ## The Solid Domain and Initial condition illustration
A = ones(size(Ux));
χ = CPUprob.params.χ;
A[χ.==1].=NaN;
figure(figsize=(12,6))
subplot(121);
imshow(χ[:,:,1]);
title(L"Domin\:function\:\chi");
subplot(122);
imshow((A.*Uθ)[:,:,1]);
title(L"U_\theta");
# Set up the initial condition
TimeIntegrator!(CPUprob,5.0,50000;
usr_dt = dt,
diags = [],
loop_number = 500,
save = false,
save_loc = "",
filename = "",
dump_dt = 0)
# # Comparsion Between Numerical & Analytical Soultion
function TCFlowSolution(L,N;R₁ = 0.32*π, R₂ = 0.95π, Ω₁ = 1, Ω₂ = 0)
dev = CPU();
Lx = Ly = L;
nx = ny = N;
T = Float32;
grid = TwoDGrid(dev, nx, Lx, ny, Ly; T=T)
Uθ = zeros(nx,ny)
for j ∈ 1:ny, i ∈ 1:nx
r = sqrt(grid.x[i]^2+grid.y[j]^2);
Uθ[i,j] = (Ω₂*R₂^2 - Ω₁*R₁^2)/(R₂^2-R₁^2)*r + ((Ω₁-Ω₂)*R₁^2*R₂^2)/(R₂^2-R₁^2)/r
end
return Uθ
end
# +
function xy_to_polar_(ux,uy;Lx=2π,Ly=Lx,T=Float32)
nx,ny,nz = size(ux);
dev = CPU();
grid = TwoDGrid(dev, nx, Lx, ny, Ly; T=T)
Ur,Uθ = xy_to_polar_(ux,uy,grid;Lx=2π,Ly=Lx,T=Float32);
return Ur,Uθ;
end
function xy_to_polar_(ux::Array,uy::Array,grid;Lx=2π,Ly=Lx,T=Float32)
#=
Function for converting x-y vector to r-θ vector, using linear transform
[x'] = [cos(θ) -rsin(θ)][r']
[y'] [sin(θ) rcos(θ)][θ']
So e_r = cosθ ̂i + sinθ ̂j
e_θ = -sinθ ̂j + cosθ ̂j
=#
nx,ny,nz = size(ux);
Ur = zeros(T,nx,ny,nz);
Uθ = zeros(T,nx,ny,nz);
for j ∈ 1:ny, i ∈ 1:nx
r = sqrt(grid.x[i]^2+grid.y[j]^2);
θ = atan(grid.y[j],grid.x[i]) ;
θ = isnan(θ) ? π/2 : θ;
sinθ = sin(θ);
cosθ = cos(θ);
Uθ[i,j,:] .= @. -sinθ*ux[i,j,:] + cosθ*uy[i,j,:];
Ur[i,j,:] .= @. cosθ*ux[i,j,:] + sinθ*uy[i,j,:];
end
return Ur,Uθ;
end
# +
figure(figsize=(21,6))
A = ones(size(Ux));
A = ones(size(Ux));
χ = CPUprob.params.χ;
A[χ.==1].=NaN;
subplot(131)
title(L"U_\theta\:\:Analytical\:\:Solution\:(v\:= 1)",size=16)
Lx,nx = 2π,128;
TA = TCFlowSolution(Lx,nx;R₁ = 0.32*π, R₂ = 0.82π, Ω₁ = 1, Ω₂ = 0)
TA = (A[:,:,1]).*TA;
imshow(TA,cmap="jet",vmin=0,vmax=1);colorbar()
subplot(132)
title(L"U_\theta\:\:Numerical\:\:Solution\:(v\:= 1)",size=16)
Ux,Uy = CPUprob.vars.ux,CPUprob.vars.uy;
Ur,Uθ = xy_to_polar_(Ux,Uy);
TN = (A.*Uθ)[:,:,1];
imshow(TN,cmap="jet",vmin=0,vmax=1);colorbar()
subplot(133)
AA = (A.*TA);
NN = (A.*TN);
title(L"U_\theta\:Radial\:profile",size=16)
plot(NN[:,64,1],"kx",label="Numerical Simulation")
plot(AA[:,64,1],"b-",label="Solution of v = 1")
xlabel("L (code Unit)",size=16)
ylabel(L"U_{\theta}",size=16)
legend()
# -
| example/2D_VP_HDExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Manna model
import SOC.models
model = SOC.models.Manna(L=50)
model.run(20000)
model.animate_states(notebook=True)
model.plot_histogram(num=100);
model.get_exponent(hist_num=100, smooth_width = 25, plot = True)['exponent']
model.get_exponent(hist_num=100, smooth_width = 25, plot = False)
| research/Manna_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CO
# ## Abstract:
# There are 2 different types of reinforcement Learning methods,
# - Value based
# - Policy based
#
# - Both of these have some drawbacks, where comes a new method, which is acctually a hybrid method called "Actor Critic Method". In this notebook we will try to go through this new Method and try to implement it in one of the Gym Environments.
# # Policy Based Methods:
# **Policy:** A policy is defined as the probability distribution of actions given a state
# $$P(A|S)$$
# In Policy-based, there is not need to learn a value function. It select an action without using a value fuction. In this method we directly try to optimze the value function $\pi$ .
# - $\pi$ is the probability distribution of the actions
# $$\pi_\theta(a|s) = P(a|s)$$
# ## There are 2 types of Policy:
# 1. **Deterministic:**
# - It maps a state to an action. A single action is returned by the policy to be taken.
# - They are used in deterministic environment. ex chess.
# 1. **Stochastic:**
# - In stochastic environment we have a probability distribution of the actions. There is a probability we will take a different action.
# - It is used when an environemtn is uncertain
# ## Advantages:
# - They have ``better convergence properties.`` Value based methods oscilate alot. In policy based methods we follow a bepolicy gradient, to find the best parameters. Because we follow the gradient here, we are guaranteed to converge with the local maximum or global maximum.
# - Policy based methods are ``better in high dimensional action spaces.`` When there is continuous action spaces, they work better. In DQN we try to assign a score to the definte action, at each time step, but when the action space is continuous, this becomes very complicated, ex driving a car, where the angle of the wheel 15,15.1, 15.2 etc are possibilities. Policy methods adjust the parameters directly.
# - Policy based methods can ``learn stochastic policy``. We dont need to implement the exploration/exploitation tradeoff, in this. In stochastic policy the agent explored the state space without always taking the same action. The output space here is a probability distribution over actions.
# ## Disadvantages:
# - They take ``alot of time to converge, often getting stuck on the local maximum rather than global optimum. `` They take slow step by step
# - ``Evaluating a policy is inefficient and has high variance``
# ## Check if the policy is Good or Not
# TTo measure how good a policy is we use a function called,`` Objective function`` that calculates the expected reward tof the policy. In Policy based methods we are trying to optimize the best parameters($\theta$). $J\theta$ will tell us how good the policy is and the policy ascent will help us find the best policy parameters to maximize the good actions
# $$J(\theta) = E_(\pi\theta) [\sum \gamma r]$$
# - we want to check the quality of the policy $\pi$ with a score function $J(\theta)$
# - Use policy gradient ascent to find the best parameters $\theta$ that improves $\pi$
# ### Policy gradient Ascent
# Once we know how good our policy is, we want to maximize the parameters $\theta$ that maximizes the score function. Maximizing this score function means finding the optimal policy. Now, for maximizing this score function $J(\theta)$, we do gradient ascent on policy parameters. Gradient ascent is just the inverse of gradient descent. In gradient ascent we take the direction of the steepest ascent. We want to find the gradient to the current policy $\pi$ that updates the parameters in the direction of greatest increase, and then iterate.
# $$Policy : \pi_\theta$$
# $$Pbjective function : J(\theta)$$
# $$Gradient : \triangledown_\theta J(\theta)$$
# $$Update : \theta \leftarrow \theta + \alpha \triangledown_\theta J(\theta)$$
# we want to find the policy that maximizes the score:
# $$\theta^* = argmax J(\theta) = argmax E_(\pi \theta) [\sum R(s_t,a_t]$$
# which is the total summation of expected rewar given policy. So we want to differenciate the sore function $J(\theta)$
# - Example: [Cartpole](https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Policy%20Gradients/Cartpole/Cartpole%20REINFORCE%20Monte%20Carlo%20Policy%20Gradients.ipynb)
# <img src= "images/epolicy.png">
# - Value based: Here we learn a value function that maps a state to a action. It is useful when we have finite action space.
# - Policy Based: Here we directly try to learn the optimal policy using the value function. It is useful when we have a coninuous or stochastic actions.
# # Actor Critic Method
# A hybrid between value-based algorithms and policy based algorithms
# ### what is Actor and Critic
# 1. The **Critic** estimates the value function. Which could be either an action-value (Q-value) or a State-value(Value)
# $$ q\hat (s,a,w) $$
# 1. The **Actor** updates the policy distribution i the direction suggested by the critic, which is the policy gradients
# $$\pi(s,a,\theta)$$
# As we can see here we have 2 neural networks here!!
# ## Imports
# +
import gym
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input
from keras.layers.merge import Add, Multiply
from keras.optimizers import Adam
import keras.backend as K
import tensorflow as tf
import random
from collections import deque
# -
# ### Creating the ActorCritic Class
# - Chain rule: find the gradient of chaging the actor network params in #
# - getting closest to the final value network predictions, i.e. de/dA #
# - Calculate de/dA as = de/dC * dC/dA, where e is error, C critic, A act
# +
class ActorCritic:
def __init__(self, env, sess):
self.env = env
self.sess = sess
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = .995
self.gamma = .95
self.tau = .125
self.memory = deque(maxlen=2000)
self.actor_state_input, self.actor_model = self.create_actor_model()
_, self.target_actor_model = self.create_actor_model()
# this is where we will feed from critic
self.actor_critic_grad = tf.placeholder(tf.float32,
[None, self.env.action_space.shape[0]])
actor_model_weights = self.actor_model.trainable_weights
self.actor_grads = tf.gradients(self.actor_model.output,
actor_model_weights, -self.actor_critic_grad) # dC/dA (from actor)
grads = zip(self.actor_grads, actor_model_weights)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(grads)
##the critic model will help us check the perfoamnce by actor
self.critic_state_input, self.critic_action_input, \
self.critic_model = self.create_critic_model()
_, _, self.target_critic_model = self.create_critic_model()
self.critic_grads = tf.gradients(self.critic_model.output,
self.critic_action_input) # where we calcaulte de/dC for feeding above
# Initialize for later gradient calculations
self.sess.run(tf.initialize_all_variables())
## Model definations
# actor model
## In a current state what is the best action
def create_actor_model(self):
state_input = Input(shape=self.env.observation_space.shape)
h1 = Dense(24, activation='relu')(state_input)
h2 = Dense(48, activation='relu')(h1)
h3 = Dense(24, activation='relu')(h2)
output = Dense(self.env.action_space.shape[0], activation='relu')(h3)
model = Model(input=state_input, output=output)
adam = Adam(lr=0.001)
model.compile(loss="mse", optimizer=adam)
return state_input, model
# critic model
# the q scores are calculated seperately in the critic model
# It input the action space and state space, and outputs the value
def create_critic_model(self):
state_input = Input(shape=self.env.observation_space.shape)
state_h1 = Dense(24, activation='relu')(state_input)
state_h2 = Dense(48)(state_h1)
action_input = Input(shape=self.env.action_space.shape)
action_h1 = Dense(48)(action_input)
# a layer in the middle to merge the two
merged = Add()([state_h2, action_h1])
merged_h1 = Dense(24, activation='relu')(merged)
output = Dense(1, activation='relu')(merged_h1)
model = Model(input=[state_input,action_input], output=output)
adam = Adam(lr=0.001)
model.compile(loss="mse", optimizer=adam)
return state_input, action_input, model
# Model Training
# the updates are happenening at every time step
# this is our memory
def remember(self, cur_state, action, reward, new_state, done):
self.memory.append([cur_state, action, reward, new_state, done])
## lets trainig the actor
def _train_actor(self, samples):
for sample in samples:
cur_state, action, reward, new_state, _ = sample
predicted_action = self.actor_model.predict(cur_state)
grads = self.sess.run(self.critic_grads, feed_dict={
self.critic_state_input: cur_state,
self.critic_action_input: predicted_action
})[0]
self.sess.run(self.optimize, feed_dict={
self.actor_state_input: cur_state,
self.actor_critic_grad: grads
})
## lets trainig the critic
def _train_critic(self, samples):
for sample in samples:
cur_state, action, reward, new_state, done = sample
if not done:
target_action = self.target_actor_model.predict(new_state)
future_reward = self.target_critic_model.predict(
[new_state, target_action])[0][0]
reward += self.gamma * future_reward
self.critic_model.fit([cur_state, action], reward, verbose=0)
def train(self):
batch_size = 32
if len(self.memory) < batch_size:
return
rewards = []
samples = random.sample(self.memory, batch_size)
self._train_critic(samples)
self._train_actor(samples)
## Target Model Updating
## we want to determine what change in parameters (in the actor model)
## would result in the largest increase in the Q value (predicted by the critic model)
def _update_actor_target(self):
actor_model_weights = self.actor_model.get_weights()
actor_target_weights = self.target_critic_model.get_weights()
for i in range(len(actor_target_weights)):
actor_target_weights[i] = actor_model_weights[i]
self.target_critic_model.set_weights(actor_target_weights)
def _update_critic_target(self):
critic_model_weights = self.critic_model.get_weights()
critic_target_weights = self.critic_target_model.get_weights()
for i in range(len(critic_target_weights)):
critic_target_weights[i] = critic_model_weights[i]
self.critic_target_model.set_weights(critic_target_weights)
def update_target(self):
self._update_actor_target()
self._update_critic_target()
## Model predictions
# similar to DQN
def act(self, cur_state):
self.epsilon *= self.epsilon_decay
if np.random.random() < self.epsilon:
return self.env.action_space.sample()
return self.actor_model.predict(cur_state)
# -
# ### Environment
# We will be using i
# The inverted pendulum swingup problem is a classic problem in the control literature. In this version of the problem, the pendulum starts in a random position, and the goal is to swing it up so it stays upright.
# The pendulum Env has a infinite input space!! the number of action space in inf!!
# +
# determines how to assign values to each state, i.e. takes the state
# and action (two-input model) and determines the corresponding value
def main():
sess = tf.Session()
K.set_session(sess)
env = gym.make("Pendulum-v0")
actor_critic = ActorCritic(env, sess)
# Hyper parameters
num_trials = 1000
trial_len = 500
cur_state = env.reset()
# sample random actions
action = env.action_space.sample()
while True:
# env.render()
# current state
cur_state = cur_state.reshape((1, env.observation_space.shape[0]))
# the acord learn the steps
action = actor_critic.act(cur_state)
action = action.reshape((1, env.action_space.shape[0]))
new_state, reward, done, _ = env.step(action)
new_state = new_state.reshape((1, env.observation_space.shape[0]))
print("Action: ", action, "Reward: ", reward)
actor_critic.remember(cur_state, action, reward, new_state, done)
actor_critic.train()
cur_state = new_state
if done ==True:
break
main()
# -
# ## Conclusion
# - A critic measures how good the action taken is, "value-based", where as an "Actor" controls how the agent behaves.
# ## Resources:
# 1. https://www.freecodecamp.org/news/an-introduction-to-policy-gradients-with-cartpole-and-doom-495b5ef2207f/
# 1. https://towardsdatascience.com/policy-gradients-in-a-nutshell-8b72f9743c5d
# 1. https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69
| Actor Critic/.ipynb_checkpoints/Actor Critic Method-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="93mZkuKENFjy"
# # Baseline for MNIST Handwritten Digit Classification using Pixel Similarity
#
# To create a baseline model for the MNIST handwritten digits classification problem, we expand the approach used in [chapter 4 of fastbook](https://github.com/fastai/fastbook/blob/master/04_mnist_basics.ipynb).
#
# In the book, the average pixel value for every pixel of two numbers - 3 and 7 is calculated. The pixel values of images in the test set are then compared to these averages (using the L1-norm and the RMSE values) and the digit is classified as the average to which the new image is "closer".
#
# We can use the same approach to calculate the average pixel value of every pixel for each of the 10 digits in the MNIST handwritten digits dataset. This gives us 10 "mean pixel images". And then for each image in the test set, we calculate the distance of its pixels from each of the 10 mean pixel images and classify the image as the one from which it is the shortest distance away.
# + id="u-ut4tV3d1vR"
from fastai.vision import *
# + [markdown] id="2BD__vGDQ8RM"
# First, we download and extract the entire MNIST handwritten digits dataset instead of the sample dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="cnkVzeJYeygD" outputId="ab6ad24f-e410-4285-ee79-3d639a9b7775"
path = untar_data(URLs.MNIST)
# + colab={"base_uri": "https://localhost:8080/"} id="ExG3QSX7flNj" outputId="297becab-0898-4975-921d-ede07eb6df16"
path.ls()
# + id="dyV1MlVigGQZ"
training_paths = [(path/'training'/str(i)) for i in range(10)]
testing_paths = [(path/'testing'/str(i)) for i in range(10)]
# + colab={"base_uri": "https://localhost:8080/"} id="fVrx50o2grGQ" outputId="ec3101a9-38f0-4b8c-a30a-8159a2af98c3"
training_paths
# + [markdown] id="Vu1t26wbO8og"
# We then convert the images to tensors.
# + id="Mdf5uOZugshv"
training_tensors = [torch.stack([open_image(l).data[0] for l in p.ls()]) for p in training_paths]
testing_tensors = [torch.stack([open_image(l).data[0] for l in p.ls()]) for p in testing_paths]
# + colab={"base_uri": "https://localhost:8080/"} id="bsPX0ARDjunt" outputId="be1c4108-4a85-46f2-da65-973f87e541cb"
len(training_tensors), len(testing_tensors)
# + [markdown] id="tVKUyqZ0O19x"
# We now calculate the mean value of each pixel for each of the digits, using the images in the training set.
# + colab={"base_uri": "https://localhost:8080/", "height": 88} id="j5tOe20P-sW7" outputId="4e09a8f8-b6f7-4d70-a9c4-77df93fe1560"
mean_tensors = [tr.mean(0) for tr in training_tensors]
mean_images = [Image(1 - mtr.repeat(3, 1, 1)) for mtr in mean_tensors]
show_all(mean_images)
# + colab={"base_uri": "https://localhost:8080/"} id="4oTxMkDj-IZu" outputId="6aed2071-cebd-4ccc-babc-9480049a2860"
testing_tensors[0].shape, mean_tensors[0].shape
# + [markdown] id="awnjFIV2PEet"
# We then iterate over every image in the test set and calculate their distance from each of the 10 images we generated above. We use RMSE to calculate the distance.
#
# We keep track of how many images are correctly classified using the `correct` list and the total number of images in the class using the `total` list.
# + colab={"base_uri": "https://localhost:8080/"} id="vID2OrcEKvQa" outputId="9c49ed51-5996-4cab-f0c0-9ba5961bfd7e"
correct = []
total = []
for i in range(10):
total.append(testing_tensors[i].shape[0])
preds = torch.Tensor([
torch.stack(
[
F.mse_loss(testing_tensors[i][imgidx], mean_tensors[midx]).sqrt()
for midx in range(10)
]
).argmin()
for imgidx in range(testing_tensors[i].shape[0])
])
correct.append((preds == i).sum())
correct, total
# + [markdown] id="M4QTlrvcPi3k"
# We can then sum the count of correct predictions for each class and divide that by the total number of images in the test set to get the accuracy of this baseline model.
# + colab={"base_uri": "https://localhost:8080/"} id="_gctUnL9MVuL" outputId="2dd16135-7c09-4ab4-925c-258d39aa6423"
torch.Tensor(correct).sum(), torch.Tensor(total).sum()
# + colab={"base_uri": "https://localhost:8080/"} id="WFKUxLpRBL4h" outputId="b7aef383-6f22-4289-d08f-02a6bb7e5795"
print('Accuracy: ', torch.Tensor(correct).sum()/torch.Tensor(total).sum())
# + [markdown] id="6cxxm_cyQgYS"
# This baseline model gives us an accuracy of 82.03% on this dataset.
| content/notebooks/2021-07-05-MNIST_Baseline.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction #
#
# In Lessons 2 and 3, we treated forecasting as a simple regression problem with all of our features derived from a single input, the time index. We could easily create forecasts for any time in the future by just generating our desired trend and seasonal features.
#
# When we added lag features in Lesson 4, however, the nature of the problem changed. Lag features require that the lagged target value is known at the time being forecast. A lag 1 feature shifts the time series forward 1 step, which means you could forecast 1 step into the future but not 2 steps.
#
# In Lesson 4, we just assumed that we could always generate lags up to the period we wanted to forecast (every prediction was for just one step forward, in other words). Real-world forecasting typically demands more than this, so in this lesson we'll learn how to make forecasts for a variety of situations.
#
# # Defining the Forecasting Task #
#
# There are two things to establish before designing a forecasting model:
# - what information is available at the time a forecast is made (features), and,
# - the time period during which you require forecasted values (target).
#
# The **forecast origin** is time at which you are making a forecast. Practically, you might consider the forecast origin to be the last time for which you have training data for the time being predicted. Everything up to he origin can be used to create features.
#
# The **forecast horizon** is the time for which you are making a forecast. We often describe a forecast by the number of time steps in its horizon: a "1-step" forecast or "5-step" forecast, say. The forecast horizon describes the target.
#
# <figure style="padding: 1em;">
# <img src="https://i.imgur.com/xwEgcOk.png" width=500, alt="">
# <figcaption style="textalign: center; font-style: italic"><center>A three-step forecast horizon with a two-step lead time, using four lag features. The figure represents what would be a single row of training data -- data for a single prediction, in other words.
# </center></figcaption>
# </figure>
#
# The time between the origin and the horizon is the **lead time** (or sometimes *latency*) of the forecast. A forecast's lead time is described by the number of steps from origin to horizon: a "1-step ahead" or "3-step ahead" forecast, say. In practice, it may be necessary for a forecast to begin multiple steps ahead of the origin because of delays in data acquisition or processing.
#
# # Preparing Data for Forecasting #
#
# In order to forecast time series with ML algorithms, we need to transform the series into a dataframe we can use with those algorithms. (Unless, of course, you are only using deterministic features like trend and seasonality.)
#
# We saw the first half of this process in Lesson 4 when we created a feature set out of lags. The second half is preparing the target. How we do this depends on the forecasting task.
#
# Each row in a dataframe represents a single forecast. The time index of the row is the first time in the forecast horizon, but we arrange values for the entire horizon in the same row. For multistep forecasts, this means we are requiring a model to produce multiple outputs, one for each step.
# +
#$HIDE_INPUT$
import numpy as np
import pandas as pd
N = 20
ts = pd.Series(
np.arange(N),
index=pd.period_range(start='2010', freq='A', periods=N, name='Year'),
dtype=pd.Int8Dtype,
)
# Lag features
X = pd.DataFrame({
'y_lag_2': ts.shift(2),
'y_lag_3': ts.shift(3),
'y_lag_4': ts.shift(4),
'y_lag_5': ts.shift(5),
'y_lag_6': ts.shift(6),
})
# Multistep targets
y = pd.DataFrame({
'y_step_3': ts.shift(-2),
'y_step_2': ts.shift(-1),
'y_step_1': ts,
})
data = pd.concat({'Targets': y, 'Features': X}, axis=1)
data.head(10).style.set_properties(['Targets'], **{'background-color': 'LavenderBlush'}) \
.set_properties(['Features'], **{'background-color': 'Lavender'})
# -
# The above illustrates how a dataset would be prepared similar to the *Defining a Forecast* figure: a three-step forecasting task with a two-step lead time using five lag features. The original time series is `y_step_1`. The missing values we could either fill-in or drop.
#
# # Multistep Forecasting Strategies #
#
# There are a number of strategies for producing the multiple target steps required for a forecast. We'll outline four common strategies, each with strengths and weaknesses.
#
# ### Multioutput model
#
# Use a model that produces multiple outputs naturally. Linear regression and neural networks can both produce multiple outputs. This strategy is simple and efficient, but not possible for every algorithm you might want to use. XGBoost can't do this, for instance.
#
# <figure style="padding: 1em;">
# <img src="https://i.imgur.com/uFsHiqr.png" width=300, alt="">
# <figcaption style="textalign: center; font-style: italic"><center>
# </center></figcaption>
# </figure>
#
# ### Direct strategy
#
# Train a separate model for each step in the horizon: one model forecasts 1-step ahead, another 2-steps ahead, and so on. Forecasting 1-step ahead is a different problem than 2-steps ahead (and so on), so it can help to have a different model make forecasts for each step. The downside is that training lots of models can be computationally expensive.
#
# <figure style="padding: 1em;">
# <img src="https://i.imgur.com/HkolNMV.png" width=900, alt="">
# <figcaption style="textalign: center; font-style: italic"><center>
# </center></figcaption>
# </figure>
#
# ### Recursive strategy
#
# Train a single one-step model and use its forecasts to update the lag features for the next step. With the recursive method, we feed a model's 1-step forecast back in to that same model to use as a lag feature for the next forecasting step. We only need to train one model, but since errors will propagate from step to step, forecasts can be inaccurate for long horizons.
#
# <figure style="padding: 1em;">
# <img src="https://i.imgur.com/sqkSFDn.png" width=300, alt="">
# <figcaption style="textalign: center; font-style: italic"><center>
# </center></figcaption>
# </figure>
#
# ### DirRec strategy
#
# A combination of the direct and recursive strategies: train a model for each step and use forecasts from previous steps as *new* lag features. Step by step, each model gets an additional lag input. Since each model always has an up-to-date set of lag features, the DirRec strategy can capture serial dependence better than Direct, but it can also suffer from error propagation like Recursive.
#
# <figure style="padding: 1em;">
# <img src="https://i.imgur.com/B7KAvAO.png" width=900, alt="">
# <figcaption style="textalign: center; font-style: italic"><center>
# </center></figcaption>
# </figure>
#
# # Example - Flu Trends #
#
# In this example we'll apply the MultiOutput and Direct strategies to the *Flu Trends* data from Lesson 4, this time making true forecasts for multiple weeks beyond the training period.
#
# We'll define our forecasting task to have an 8-week horizon with a 1-week lead time. In other words, we'll be forecasting eight weeks of flu cases starting with the following week.
#
# The hidden cell sets up the example and defines a helper function `plot_multistep`.
# +
#$HIDE_INPUT$
from pathlib import Path
from warnings import simplefilter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
simplefilter("ignore")
# Set Matplotlib defaults
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True, figsize=(11, 4))
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=16,
titlepad=10,
)
plot_params = dict(
color="0.75",
style=".-",
markeredgecolor="0.25",
markerfacecolor="0.25",
)
# %config InlineBackend.figure_format = 'retina'
def plot_multistep(y, every=1, ax=None, palette_kwargs=None):
palette_kwargs_ = dict(palette='husl', n_colors=16, desat=None)
if palette_kwargs is not None:
palette_kwargs_.update(palette_kwargs)
palette = sns.color_palette(**palette_kwargs_)
if ax is None:
fig, ax = plt.subplots()
ax.set_prop_cycle(plt.cycler('color', palette))
for date, preds in y[::every].iterrows():
preds.index = pd.period_range(start=date, periods=len(preds))
preds.plot(ax=ax)
return ax
data_dir = Path("../input/ts-course-data")
flu_trends = pd.read_csv(data_dir / "flu-trends.csv")
flu_trends.set_index(
pd.PeriodIndex(flu_trends.Week, freq="W"),
inplace=True,
)
flu_trends.drop("Week", axis=1, inplace=True)
# -
# First we'll prepare our target series (weekly office visits for the flu) for multistep forecasting. Once this is done, training and prediction will be very straightfoward.
# +
def make_lags(ts, lags, lead_time=1):
return pd.concat(
{
f'y_lag_{i}': ts.shift(i)
for i in range(lead_time, lags + lead_time)
},
axis=1)
# Four weeks of lag features
y = flu_trends.FluVisits.copy()
X = make_lags(y, lags=4).fillna(0.0)
def make_multistep_target(ts, steps):
return pd.concat(
{f'y_step_{i + 1}': ts.shift(-i)
for i in range(steps)},
axis=1)
# Eight-week forecast
y = make_multistep_target(y, steps=8).dropna()
# Shifting has created indexes that don't match. Only keep times for
# which we have both targets and features.
y, X = y.align(X, join='inner', axis=0)
# -
# ### Multioutput model
#
# We'll use linear regression as a MultiOutput strategy. Once we have our data prepared for multiple outputs, training and prediction is the same as always.
# +
#$HIDE_INPUT$
# Create splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=False)
model = LinearRegression()
model.fit(X_train, y_train)
y_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns)
y_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns)
# -
# Remember that a multistep model will produce a complete forecast for each instance used as input. There are 269 weeks in the training set and 90 weeks in the test set, and we now have an 8-step forecast for each of these weeks.
# +
#$HIDE_INPUT$
train_rmse = mean_squared_error(y_train, y_fit, squared=False)
test_rmse = mean_squared_error(y_test, y_pred, squared=False)
print((f"Train RMSE: {train_rmse:.2f}\n" f"Test RMSE: {test_rmse:.2f}"))
palette = dict(palette='husl', n_colors=64)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 6))
ax1 = flu_trends.FluVisits[y_fit.index].plot(**plot_params, ax=ax1)
ax1 = plot_multistep(y_fit, ax=ax1, palette_kwargs=palette)
_ = ax1.legend(['FluVisits (train)', 'Forecast'])
ax2 = flu_trends.FluVisits[y_pred.index].plot(**plot_params, ax=ax2)
ax2 = plot_multistep(y_pred, ax=ax2, palette_kwargs=palette)
_ = ax2.legend(['FluVisits (test)', 'Forecast'])
# -
# ### Direct strategy
#
# XGBoost can't produce multiple outputs for regression tasks. But by applying the Direct reduction strategy, we can still use it to produce multi-step forecasts. This is as easy as wrapping it with scikit-learn's `MultiOutputRegressor`.
# +
from sklearn.multioutput import MultiOutputRegressor
model = MultiOutputRegressor(XGBRegressor())
model.fit(X_train, y_train)
y_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns)
y_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns)
# -
# XGBoost here is clearly overfitting on the training set. But on the test set it seems it was able to capture some of the dynamics of the flu season better than the linear regression model. It would likely do even better with some hyperparameter tuning.
# +
#$HIDE_INPUT$
train_rmse = mean_squared_error(y_train, y_fit, squared=False)
test_rmse = mean_squared_error(y_test, y_pred, squared=False)
print((f"Train RMSE: {train_rmse:.2f}\n" f"Test RMSE: {test_rmse:.2f}"))
palette = dict(palette='husl', n_colors=64)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 6))
ax1 = flu_trends.FluVisits[y_fit.index].plot(**plot_params, ax=ax1)
ax1 = plot_multistep(y_fit, ax=ax1, palette_kwargs=palette)
_ = ax1.legend(['FluVisits (train)', 'Forecast'])
ax2 = flu_trends.FluVisits[y_pred.index].plot(**plot_params, ax=ax2)
ax2 = plot_multistep(y_pred, ax=ax2, palette_kwargs=palette)
_ = ax2.legend(['FluVisits (test)', 'Forecast'])
# -
# To use the DirRec strategy, you would only need to replace `MultiOutputRegressor` with another scikit-learn wrapper, `RegressorChain`. The Recursive strategy we would need to code ourselves.
#
# # Your Turn #
#
# [**Create a forecasting dataset**](#$NEXT_NOTEBOOK_URL$) for *Store Sales* and apply the DirRec strategy.
| notebooks/time_series/raw/tut6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
import ktrain
from ktrain import text
# # Building a Chinese-Language Sentiment Analyzer
#
# In this notebook, we will build a Chinese-language text classification model in 3 simple steps. More specifically, we will build a model that classifies Chinese hotel reviews as either positive or negative.
#
# The dataset can be downloaded from <NAME>'s GitHub repository [here](https://github.com/Tony607/Chinese_sentiment_analysis/tree/master/data/ChnSentiCorp_htl_ba_6000).
#
# (**Disclaimer:** I don't speak Chinese. Please forgive mistakes.)
# ## STEP 1: Load and Preprocess the Data
#
# First, we use the `texts_from_folder` function to load and preprocess the data. We assume that the data is in the following form:
# ```
# ├── datadir
# │ ├── train
# │ │ ├── class0 # folder containing documents of class 0
# │ │ ├── class1 # folder containing documents of class 1
# │ │ ├── class2 # folder containing documents of class 2
# │ │ └── classN # folder containing documents of class N
# ```
# We set `val_pct` as 0.1, which will automatically sample 10% of the data for validation. Since we will be using a pretrained BERT model for classification, we specifiy `preprocess_mode='bert'`. If you are using any other model (e.g., `fasttext`), you should either omit this parameter or use `preprocess_mode='standard'`).
#
# **Notice that there is nothing speical or extra we need to do here for non-English text.** *ktrain* automatically detects the language and character encoding and prepares the data and configures the model appropriately.
#
#
trn, val, preproc = text.texts_from_folder('/home/amaiya/data/ChnSentiCorp_htl_ba_6000',
maxlen=75,
max_features=30000,
preprocess_mode='bert',
train_test_names=['train'],
val_pct=0.1,
classes=['pos', 'neg'])
# ## STEP 2: Create a Model and Wrap in Learner Object
model = text.text_classifier('bert', trn, preproc=preproc)
learner = ktrain.get_learner(model,
train_data=trn,
val_data=val,
batch_size=32)
# ## STEP 3: Train the Model
#
# We will use the `fit_onecycle` method that employs a [1cycle learning rate policy](https://arxiv.org/pdf/1803.09820.pdf) for four epochs. We will save the weights from each epoch using the `checkpoint_folder` argument, so that we can go reload the weights from the best epoch in case we overfit.
learner.fit_onecycle(2e-5, 4, checkpoint_folder='/tmp/saved_weights')
# Although Epoch 03 had the lowest validation loss, the final validation accuracy at the end of the last epoch is still the highest (i.e., **93.24%**), so we will just leave the model weights as they are this time.
# ### Inspecting Misclassifications
learner.view_top_losses(n=1, preproc=preproc)
# Using Google Translate, the above roughly translates to:
# ```
#
# Hotel location is good, access to West Street is more convenient; viewing room is the view of Guanxi Street, although the night is noisy, but it will not affect sleep; the opposite side of the hotel is the Jiujiu car line booking bicycles on Ctrip, easy to pick up. By Ctrip
#
# ```
#
# Although there is a minor negative comment embedded in this review about noise, the review appears to be overall positive and was predicted as positive by our classifier. The ground-truth label, however, is negative, which may be a mistake and may explain the high loss.
#
# ### Making Predictions on New Data
p = ktrain.get_predictor(learner.model, preproc)
# Predicting label for the text
# > "*The view and the service of this hotel were both quite terrible.*"
p.predict("这家酒店的风景和服务都非常糟糕")
# Predicting label for:
# > "*I like the service of this hotel.*"
p.predict('我喜欢这家酒店的服务')
# ### Save Predictor for Later Deployment
p.save('/tmp/mypred')
p = ktrain.load_predictor('/tmp/mypred')
# still works
p.predict('我喜欢这家酒店的服务')
| examples/text/ChineseHotelReviews-BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df=pd.read_excel("sample2.xlsx")
df
new_df=df.dropna()
new_df
df.dropna()
df
df.dropna(inplace = True)
df
df=pd.read_excel("sample.xlsx")
df
df.loc[3, 'Apple'] = 40
df
# +
for x in df.index:
if df.loc[x, "Apple"] > 30 and df.loc[x, "Apple"] < 80:
print(df.loc[x, "Apple"])
# -
for x in df.index:
if df.loc[x, "Apple"] > 80 and df.loc[x, "Bannana"] < 60:
print(df.loc[x, "Days"])
for x in df.index:
if df.loc[x, "Apple"] < 30:
if df.loc[x, "Bannana"] < 30:
if df.loc[x, "Rice"] < 30:
if df.loc[x, "Biscuits"] < 30:
if df.loc[x, "Chips"] < 30:
print(df.loc[y, "Days"])
| day13 (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/karnwatcharasupat/latte/blob/main/examples/morphomnist/morphomnist-lightning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="0SEjpqcL0FHh"
# # Using Latte with Pytorch Lightning
# + [markdown] id="AEOGkFg4Wmff"
# This example notebook demonstrates the use of [Latte](https://github.com/karnwatcharasupat/latte) with [PyTorch Lightning](https://www.pytorchlightning.ai/).
#
# The code in this notebook is adapted from the [AR-VAE](https://github.com/ashispati/ar-vae) implementation:
# > <NAME> and <NAME>, Attribute-based regularization of latent spaces for variational auto-encoders. Neural Computing & Applications, 33, 4429–4444 (2021). https://doi.org/10.1007/s00521-020-05270-2
#
#
# For this notebook, we will be using the [Morpho-MNIST](https://github.com/dccastro/Morpho-MNIST) dataset which is a disentanglement dataset built on the usual MNIST dataset.
#
# **Before you begin, please turn on GPU accelerator at `Runtime > Change runtime type > Hardware accelerator > GPU`.**
# + id="9SVyQz3nO-Mk"
HOME = '/content'
# + [markdown] id="8v4buxl60Lfv"
# ## Installing Latte and Dependencies
# + id="iY1z4qYe0D5i"
# This command automatically install PyTorch and TorchMetrics.
# For users with existing pytorch>=1.3.1 and torchmetrics>=0.2.0 installation,
# use `pip install latte-metrics` with no extras
# !pip install latte-metrics[pytorch] --upgrade
# Pytorch Lightning is installed independently
# !pip install pytorch-lightning
# + [markdown] id="jbZfmlo18PXg"
# ## Preparing data
# + [markdown] id="VXx5z1SK1GCl"
# ### Downloading dataset
# + id="-nmis2eO1gIt" language="bash"
#
# export DSET_PATH="/content/dataset"
# mkdir -p $DSET_PATH
# gdown --id "1fFGJW0IHoBmLuD6CEKCB8jz3Y5LJ5Duk" -O $DSET_PATH/morphomnist.zip
# unzip -o "$DSET_PATH/morphomnist.zip" -d $DSET_PATH
# + [markdown] id="HCGLIL6K5Bkq"
# ### Cloning Morpho-MNIST code
# + id="GQSicjdF0b57"
# !git clone https://github.com/dccastro/Morpho-MNIST
# + id="CweifwUB8JF_"
import os, sys
sys.path.append(os.path.join(HOME, 'Morpho-MNIST'))
# + [markdown] id="sGfmt5hR8JnF"
# ### Creating a LightningDataModule
# + id="_eiisEft71kO"
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from morphomnist import io, morpho
import pytorch_lightning as pl
class MorphoMnistDataset(pl.LightningDataModule):
def __init__(self, root_dir=os.path.join(HOME, 'dataset/global'), batch_size=32):
super().__init__()
self.kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
self.root_dir = root_dir
self.data_path_str = "-images-idx3-ubyte.gz"
self.label_path_str = "-labels-idx1-ubyte.gz"
self.morpho_path_str = "-morpho.csv"
self.batch_size = batch_size
self.train_dataset = self._create_dataset(dataset_type="train")
self.val_dataset = self._create_dataset(dataset_type="t10k")
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
**self.kwargs
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
)
def _create_dataset(self, dataset_type):
data_path = os.path.join(
self.root_dir,
dataset_type + self.data_path_str
)
morpho_path = os.path.join(
self.root_dir,
dataset_type + self.morpho_path_str
)
images = io.load_idx(data_path)
images = np.expand_dims(images, axis=1).astype('float32') / 255.0
morpho_labels = pd.read_csv(morpho_path).values.astype('float32')[:, 3:]
dataset = TensorDataset(
torch.from_numpy(images),
torch.from_numpy(morpho_labels)
)
return dataset
# + [markdown] id="FsHMwY__5TNa"
# ## Creating a simple AR-VAE
# + [markdown] id="8K237NXYRLUW"
# ### Defining the Loss Function
# + id="sI9EBkI-AqrT"
from torch.nn import functional as F
def ar_signed_loss(z, a, factor=10.0):
n_attr = a.shape[-1]
# compute latent distance matrix
lc_dist_mat = z[:, None, :n_attr] - z[None, :, :n_attr]
# compute attribute distance matrix
attribute_dist_mat = a[:, None, ...] - a[None, :, :]
# compute regularization loss
lc_tanh = torch.tanh(lc_dist_mat * factor)
attribute_sign = torch.sign(attribute_dist_mat)
batch_size = z.shape[0]
ar_loss = F.l1_loss(lc_tanh, attribute_sign.float(), reduction='sum')/(batch_size ** 2 - batch_size)
return ar_loss
def compute_loss(x, xhat, zd, z0, z, a, beta=1.0, gamma=1.0):
recon_loss = F.mse_loss(x, torch.sigmoid(xhat), reduction='sum')/z.shape[0]
kld_loss = distributions.kl.kl_divergence(zd, z0).sum(-1).mean()
ar_loss = ar_signed_loss(z, a)
return {
'loss': recon_loss + beta * kld_loss + gamma * ar_loss,
'recon_loss': recon_loss,
'kld_loss': kld_loss,
'ar_loss': ar_loss
}
# + [markdown] id="RQCdJp1PaoRs"
# ### Defining base VAE class
# + id="alAggbUd5VBk"
from torch import nn, distributions
class ImageVAE(nn.Module):
def __init__(self):
super().__init__()
self.input_size = 784
self.z_dim = 16
self.inter_dim = 19
self.enc_conv = nn.Sequential(
nn.Conv2d(1, 64, 4, 1),
nn.SELU(),
nn.Dropout(0.5),
nn.Conv2d(64, 64, 4, 1),
nn.SELU(),
nn.Dropout(0.5),
nn.Conv2d(64, 8, 4, 1),
nn.SELU(),
nn.Dropout(0.5),
)
self.enc_lin = nn.Sequential(
nn.Linear(2888, 256),
nn.SELU()
)
self.enc_mean = nn.Linear(256, self.z_dim)
self.enc_log_std = nn.Linear(256, self.z_dim)
self.dec_lin = nn.Sequential(
nn.Linear(self.z_dim, 256),
nn.SELU(),
nn.Linear(256, 2888),
nn.SELU()
)
self.dec_conv = nn.Sequential(
nn.ConvTranspose2d(8, 64, 4, 1),
nn.SELU(),
nn.Dropout(0.5),
nn.ConvTranspose2d(64, 64, 4, 1),
nn.SELU(),
nn.Dropout(0.5),
nn.ConvTranspose2d(64, 1, 4, 1),
)
self.xavier_initialization()
def xavier_initialization(self):
for name, param in self.named_parameters():
if 'weight' in name:
nn.init.xavier_normal_(param)
def encode(self, x):
hidden = self.enc_conv(x)
hidden = hidden.view(x.size(0), -1)
hidden = self.enc_lin(hidden)
z_mean = self.enc_mean(hidden)
z_log_std = self.enc_log_std(hidden)
z_distribution = distributions.Normal(loc=z_mean, scale=torch.exp(z_log_std) + 1e-16)
return z_distribution
def decode(self, z):
hidden = self.dec_lin(z)
hidden = hidden.view(z.size(0), -1, self.inter_dim, self.inter_dim)
hidden = self.dec_conv(hidden)
return hidden
def reparametrize(self, z_dist):
# sample from distribution
z_tilde = z_dist.rsample()
# compute prior
prior_dist = torch.distributions.Normal(
loc=torch.zeros_like(z_dist.loc),
scale=torch.ones_like(z_dist.scale)
)
return z_tilde, prior_dist
def forward(self, x):
# compute distribution using encoder
z_dist = self.encode(x)
# reparametrize
z_tilde, prior_dist = self.reparametrize(z_dist)
# compute output of decoding layer
output = self.decode(z_tilde).view(x.size())
return output, z_dist, prior_dist, z_tilde
# + [markdown] id="1dPtAOEXad54"
# ### Convert the module into a LightningModule with Latte Metrics
#
# See Latte documentation [here](https://latte.readthedocs.io/).
# + id="oUE2mDKEadOd"
from latte.metrics.torch.bundles import DependencyAwareMutualInformationBundle
class ImageVAEwithLatte(pl.LightningModule, ImageVAE):
def __init__(self):
super().__init__()
# define metrics here
self.dami = DependencyAwareMutualInformationBundle(reg_dim=range(4))
def training_step(self, data, idx):
inputs, attributes = data
recon, z_dist, prior_dist, z_tilde = self(inputs)
loss = compute_loss(
inputs, recon, z_dist, prior_dist, z_tilde, attributes
)
self.log_dict({f"train/{k}": loss[k] for k in loss}, prog_bar=True)
# for training, we calculate the metrics every 8 batches
if idx % 8 == 0:
self.dami.update(z_tilde, attributes)
train_metrics = self.dami.compute()
self.dami.reset()
# We only put the mean metrics over the attributes here on the progress bar for demonstration
self.log_dict({f"train/{k}": train_metrics[k].mean() for k in train_metrics}, prog_bar=True)
return loss['loss']
def validation_step(self, data, idx):
inputs, attributes = data
recon, z_dist, prior_dist, z_tilde = self(inputs)
loss = compute_loss(
inputs, recon, z_dist, prior_dist, z_tilde, attributes
)
# for validation, we will calculate the metrics only once at the end of the validation epoch
self.dami.update(z_tilde, attributes)
self.log_dict({f"val/{k}": loss[k] for k in loss}, prog_bar=True)
return loss['loss']
def validation_epoch_end(self, outputs):
val_metrics = self.dami.compute()
# We only put the mean metrics over the attributes here on the progress bar for demonstration
self.log_dict({f"val/{k}": val_metrics[k].mean() for k in val_metrics}, prog_bar=True)
self.dami.reset()
def configure_optimizers(self, lr=1e-4):
return torch.optim.Adam(self.parameters(), lr=lr)
def forward(self, x):
return super().forward(x)
def interpolate(self, x, dz):
# compute distribution using encoder
z_dist = self.encode(x)
# reparametrize
z_tilde, prior_dist = self.reparametrize(z_dist)
# compute output of decoding layer
if dz.ndim > 1:
output = []
for i in range(dz.shape[-1]):
output.append(self.decode(z_tilde + dz[None, :, i]).view(x.size()))
output = torch.stack(output, axis=-1)
else:
output = self.decode(z_tilde + dz[None, :]).view(x.size())
return output
# + [markdown] id="6803GEY-RQJE"
# ## Training the model
#
# For your convenience, we have prepared pretrained weights for the model. This notebook will only train for one more epoch as an example.
# + id="8GQV8uB1O-Mu"
# !wget https://github.com/karnwatcharasupat/latte/raw/main/examples/morphomnist/weights/morphomnist-lightning-weights.ckpt
# + id="8hvsM1wP6lqk"
import latte
pl.seed_everything(42)
latte.seed(42)
# there is no need for thisc
# this is just to demonstrate that you can manually set a seed
# Latte uses seed=42 by default anyway
model = ImageVAEwithLatte()
datamodule = MorphoMnistDataset()
trainer = pl.Trainer(
resume_from_checkpoint=os.path.join(HOME, "morphomnist-lightning-weights.ckpt"),
max_epochs=101,
limit_train_batches=0.1,
limit_val_batches=0.1,
gpus=-1
)
trainer.fit(model, datamodule)
# + [markdown] id="wNNv493RHwzH"
# ## Visualizing the outputs
# + id="ICKfPFxNO-Mv"
atttribute_dict = {
"thickness": 0,
"slant": 1,
"width": 2,
"height": 3
}
# + id="6m1JcPDmO-Mw"
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
def interpolate_and_display(model, attribute):
model.eval()
model = model.cuda()
f = plt.figure(figsize=(16, 16))
ax = ImageGrid(
f, 111, # similar to subplot(111)
nrows_ncols=(11, 8), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
inputs, _ = datamodule.val_dataset[torch.randint(10000, (8,))]
dz = torch.zeros((16, 10))
dz[atttribute_dict[attribute], :] = torch.linspace(-2.0, 2.0, 10)
gen = model.interpolate(inputs.cuda(), dz.cuda())
gen = torch.sigmoid(gen)
for i in range(8):
ax[i].imshow(inputs.detach().cpu()[i, 0, :, :], cmap='summer')
for j in range(10):
ax[(j+1)*8+i].imshow(gen.detach().cpu()[i, 0, :, :, j], cmap='gray', vmin=0, vmax=1)
for i in range(8*11):
ax[i].axis('off')
plt.show()
# + [markdown] id="mhmaXqVbO-Mw"
# The top row is the original input images. The subsequent rows have the specified attribute changed in increasing order.
# + id="ELzcFOqQO-Mx"
interpolate_and_display(model, 'thickness')
# + id="EZpkUrrJJJm-"
interpolate_and_display(model, 'slant')
# + id="d0aIYjFYO-My"
interpolate_and_display(model, 'width')
| examples/morphomnist/morphomnist-lightning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Keras debugging tips
#
# **Author:** [fchollet](https://twitter.com/fchollet)<br>
# **Date created:** 2020/05/16<br>
# **Last modified:** 2020/05/16<br>
# **Description:** Four simple tips to help you debug your Keras code.
# + [markdown] colab_type="text"
# ## Introduction
#
# It's generally possible to do almost anything in Keras *without writing code* per se:
# whether you're implementing a new type of GAN or the latest convnet architecture for
# image segmentation, you can usually stick to calling built-in methods. Because all
# built-in methods do extensive input validation checks, you will have little to no
# debugging to do. A Functional API model made entirely of built-in layers will work on
# first try -- if you can compile it, it will run.
#
# However, sometimes, you will need to dive deeper and write your own code. Here are some
# common examples:
#
# - Creating a new `Layer` subclass.
# - Creating a custom `Metric` subclass.
# - Implementing a custom `train_step` on a `Model`.
#
# This document provides a few simple tips to help you navigate debugging in these
# situations.
#
# + [markdown] colab_type="text"
# ## Tip 1: test each part before you test the whole
#
# If you've created any object that has a chance of not working as expected, don't just
# drop it in your end-to-end process and watch sparks fly. Rather, test your custom object
# in isolation first. This may seem obvious -- but you'd be surprised how often people
# don't start with this.
#
# - If you write a custom layer, don't call `fit()` on your entire model just yet. Call
# your layer on some test data first.
# - If you write a custom metric, start by printing its output for some reference inputs.
#
# Here's a simple example. Let's write a custom layer a bug in it:
#
# + colab_type="code"
import tensorflow as tf
from tensorflow.keras import layers
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
# Take the positive part of the input
pos = tf.nn.relu(inputs)
# Take the negative part of the input
neg = tf.nn.relu(-inputs)
# Concatenate the positive and negative parts
concatenated = tf.concat([pos, neg], axis=0)
# Project the concatenation down to the same dimensionality as the input
return tf.matmul(concatenated, self.kernel)
# + [markdown] colab_type="text"
# Now, rather than using it in a end-to-end model directly, let's try to call the layer on
# some test data:
#
# ```python
# x = tf.random.normal(shape=(2, 5))
# y = MyAntirectifier()(x)
# ```
#
# We get the following error:
#
# ```
# ...
# 1 x = tf.random.normal(shape=(2, 5))
# ----> 2 y = MyAntirectifier()(x)
# ...
# 17 neg = tf.nn.relu(-inputs)
# 18 concatenated = tf.concat([pos, neg], axis=0)
# ---> 19 return tf.matmul(concatenated, self.kernel)
# ...
# InvalidArgumentError: Matrix size-incompatible: In[0]: [4,5], In[1]: [10,5] [Op:MatMul]
# ```
#
# Looks like our input tensor in the `matmul` op may have an incorrect shape.
# Let's add a print statement to check the actual shapes:
#
# + colab_type="code"
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
print("pos.shape:", pos.shape)
print("neg.shape:", neg.shape)
concatenated = tf.concat([pos, neg], axis=0)
print("concatenated.shape:", concatenated.shape)
print("kernel.shape:", self.kernel.shape)
return tf.matmul(concatenated, self.kernel)
# + [markdown] colab_type="text"
# We get the following:
#
# ```
# pos.shape: (2, 5)
# neg.shape: (2, 5)
# concatenated.shape: (4, 5)
# kernel.shape: (10, 5)
# ```
#
# Turns out we had the wrong axis for the `concat` op! We should be concatenating `neg` and
# `pos` alongside the feature axis 1, not the batch axis 0. Here's the correct version:
#
# + colab_type="code"
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
print("pos.shape:", pos.shape)
print("neg.shape:", neg.shape)
concatenated = tf.concat([pos, neg], axis=1)
print("concatenated.shape:", concatenated.shape)
print("kernel.shape:", self.kernel.shape)
return tf.matmul(concatenated, self.kernel)
# + [markdown] colab_type="text"
# Now our code works fine:
#
# + colab_type="code"
x = tf.random.normal(shape=(2, 5))
y = MyAntirectifier()(x)
# + [markdown] colab_type="text"
# ## Tip 2: use `model.summary()` and `plot_model()` to check layer output shapes
#
# If you're working with complex network topologies, you're going to need a way
# to visualize how your layers are connected and how they transform the data that passes
# through them.
#
# Here's an example. Consider this model with three inputs and two outputs (lifted from the
# [Functional API
# guide](https://keras.io/guides/functional_api/#manipulate-complex-graph-topologies)):
#
# + colab_type="code"
from tensorflow import keras
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred],
)
# + [markdown] colab_type="text"
# Calling `summary()` can help you check the output shape of each layer:
#
# + colab_type="code"
model.summary()
# + [markdown] colab_type="text"
# You can also visualize the entire network topology alongside output shapes using
# `plot_model`:
#
# + colab_type="code"
keras.utils.plot_model(model, show_shapes=True)
# + [markdown] colab_type="text"
# With this plot, any connectivity-level error becomes immediately obvious.
#
# + [markdown] colab_type="text"
# ## Tip 3: to debug what happens during `fit()`, use `run_eagerly=True`
#
# The `fit()` method is fast: it runs a well-optimized, fully-compiled computation graph.
# That's great for performance, but it also means that the code you're executing isn't the
# Python code you've written. This can be problematic when debugging. As you may recall,
# Python is slow -- so we use it as a staging language, not as an execution language.
#
# Thankfully, there's an easy way to run your code in "debug mode", fully eagerly:
# pass `run_eagerly=True` to `compile()`. Your call to `fit()` will now get executed line
# by line, without any optimization. It's slower, but it makes it possible to print the
# value of intermediate tensors, or to use a Python debugger. Great for debugging.
#
# Here's a basic example: let's write a really simple model with a custom `train_step`. Our
# model just implements gradient descent, but instead of first-order gradients, it uses a
# combination of first-order and second-order gradients. Pretty trivial so far.
#
# Can you spot what we're doing wrong?
#
# + colab_type="code"
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
# + [markdown] colab_type="text"
# Let's train a one-layer model on MNIST with this custom training loop.
#
# We pick, somewhat at random, a batch size of 1024 and a learning rate of 0.1. The general
# idea being to use larger batches and a larger learning rate than usual, since our
# "improved" gradients should lead us to quicker convergence.
#
# + colab_type="code"
import numpy as np
# Construct an instance of MyModel
def get_model():
inputs = keras.Input(shape=(784,))
intermediate = layers.Dense(256, activation="relu")(inputs)
outputs = layers.Dense(10, activation="softmax")(intermediate)
model = MyModel(inputs, outputs)
return model
# Prepare data
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)) / 255
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(x_train, y_train, epochs=3, batch_size=1024, validation_split=0.1)
# + [markdown] colab_type="text"
# Oh no, it doesn't converge! Something is not working as planned.
#
# Time for some step-by-step printing of what's going on with our gradients.
#
# We add various `print` statements in the `train_step` method, and we make sure to pass
# `run_eagerly=True` to `compile()` to run our code step-by-step, eagerly.
#
# + colab_type="code"
class MyModel(keras.Model):
def train_step(self, data):
print()
print("----Start of step: %d" % (self.step_counter,))
self.step_counter += 1
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
print("Max of dl_dw[0]: %.4f" % tf.reduce_max(dl_dw[0]))
print("Min of dl_dw[0]: %.4f" % tf.reduce_min(dl_dw[0]))
print("Mean of dl_dw[0]: %.4f" % tf.reduce_mean(dl_dw[0]))
print("-")
print("Max of d2l_dw2[0]: %.4f" % tf.reduce_max(d2l_dw2[0]))
print("Min of d2l_dw2[0]: %.4f" % tf.reduce_min(d2l_dw2[0]))
print("Mean of d2l_dw2[0]: %.4f" % tf.reduce_mean(d2l_dw2[0]))
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
run_eagerly=True,
)
model.step_counter = 0
# We pass epochs=1 and steps_per_epoch=10 to only run 10 steps of training.
model.fit(x_train, y_train, epochs=1, batch_size=1024, verbose=0, steps_per_epoch=10)
# + [markdown] colab_type="text"
# What did we learn?
#
# - The first order and second order gradients can have values that differ by orders of
# magnitudes.
# - Sometimes, they may not even have the same sign.
# - Their values can vary greatly at each step.
#
# This leads us to an obvious idea: let's normalize the gradients before combining them.
#
# + colab_type="code"
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
dl_dw = [tf.math.l2_normalize(w) for w in dl_dw]
d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2]
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(x_train, y_train, epochs=5, batch_size=1024, validation_split=0.1)
# + [markdown] colab_type="text"
# Now, training converges! It doesn't work well at all, but at least the model learns
# something.
#
# After spending a few minutes tuning parameters, we get to the following configuration
# that works somewhat well (achieves 97% validation accuracy and seems reasonably robust to
# overfitting):
#
# - Use `0.2 * w1 + 0.8 * w2` for combining gradients.
# - Use a learning rate that decays linearly over time.
#
# I'm not going to say that the idea works -- this isn't at all how you're supposed to do
# second-order optimization (pointers: see the Newton & Gauss-Newton methods, quasi-Newton
# methods, and BFGS). But hopefully this demonstration gave you an idea of how you can
# debug your way out of uncomfortable training situations.
#
# Remember: use `run_eagerly=True` for debugging what happens in `fit()`. And when your code
# is finally working as expected, make sure to remove this flag in order to get the best
# runtime performance!
#
# Here's our final training run:
#
# + colab_type="code"
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
dl_dw = [tf.math.l2_normalize(w) for w in dl_dw]
d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2]
# Combine first-order and second-order gradients
grads = [0.2 * w1 + 0.8 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
lr = learning_rate = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.1, decay_steps=25, decay_rate=0.1
)
model.compile(
optimizer=keras.optimizers.SGD(lr),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(x_train, y_train, epochs=50, batch_size=2048, validation_split=0.1)
# + [markdown] colab_type="text"
# ## Tip 4: if your code is slow, run the TensorFlow profiler
#
# One last tip -- if your code seems slower than it should be, you're going to want to plot
# how much time is spent on each computation step. Look for any bottleneck that might be
# causing less than 100% device utilization.
#
# To learn more about TensorFlow profiling, see
# [this extensive guide](https://www.tensorflow.org/guide/profiler).
#
# You can quickly profile a Keras model via the TensorBoard callback:
#
# ```python
# # Profile from batches 10 to 15
# tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
# profile_batch=(10, 15))
# # Train the model and use the TensorBoard Keras callback to collect
# # performance profiling data
# model.fit(dataset,
# epochs=1,
# callbacks=[tb_callback])
# ```
#
# Then navigate to the TensorBoard app and check the "profile" tab.
#
| examples/keras_recipes/ipynb/debugging_tips.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as Math
import pylab as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta));
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print "Error: array X should have type float.";
return -1;
if round(no_dims) != no_dims:
print "Error: number of dimensions should be an integer.";
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
# +
import numpy as np
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import sys
plt.rcParams['figure.figsize'] = 10, 10
# sklearn tsne
X = Math.loadtxt("input/x_rodrigo_teste.txt");
labels = Math.loadtxt("input/label_rodrigo_teste.txt");
last_divergence = sys.maxint
X_embedded = None
for i in range(0,25):
my_tse = TSNE(n_components=2)
cur_X_embedded = my_tse.fit_transform(X)
if last_divergence >= my_tse.kl_divergence_:
X_embedded = cur_X_embedded
last_divergence = my_tse.kl_divergence_
print 'Lowest kl_divergence: ' + str(last_divergence)
color_legends = {1: 'Extraesqueletal Ewing Sarcoma',
2: 'germ cell tumor',
3: 'nasopharyngeal carcinoma',
4: 'Neuroblastic tumor',
5: 'rhabdomyosarcoma',
6: '<NAME>'}
raibow = cm.rainbow(np.linspace(0, 1, len(np.unique(labels))))
labels_array = []
legends_array = []
for i in range(0, len(labels)):
labels_array.append(raibow[int(labels[i]) - 1])
legends_array.append(color_legends[int(labels[i])])
scater_for_legend = []
last_color_id = labels[0]
for i in range(0, len(X_embedded)):
if last_color_id != labels[i] or i==0:
plt.scatter(X_embedded[i][0], X_embedded[i][1], color = labels_array[i], label = color_legends[int(labels[i])])
else :
plt.scatter(X_embedded[i][0], X_embedded[i][1], color = labels_array[i])
last_color_id = labels[i]
plt.title('TSNE Result')
plt.legend(scatterpoints=1,
loc='lower left',
ncol=3,
fontsize=8)
plt.show()
# -
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
X = Math.loadtxt("input/mnist2500_X.txt");
labels = Math.loadtxt("input/mnist2500_labels.txt");
Y = tsne(X, 2, 5, 20.0);
Plot.scatter(Y[:,0], Y[:,1], 20, labels);
Plot.show();
# +
# sklearn tsne
X = Math.loadtxt("input/mnist2500_X.txt");
labels = Math.loadtxt("input/mnist2500_labels.txt");
last_divergence = sys.maxint
X_embedded = None
#for i in range(0,25):
my_tse = TSNE(n_components=2)
cur_X_embedded = my_tse.fit_transform(X)
if last_divergence >= my_tse.kl_divergence_:
X_embedded = cur_X_embedded
last_divergence = my_tse.kl_divergence_
print 'Lowest kl_divergence: ' + str(last_divergence)
Plot.scatter(Y[:,0], Y[:,1], 20, labels);
Plot.show();
# +
import sklearn.datasets
X, color = datasets.samples_generator.make_blobs(n_samples=1000, centers=3, n_features=80, random_state=0)
my_tse = TSNE(n_components=2)
cur_X_embedded = my_tse.fit_transform(X)
if last_divergence >= my_tse.kl_divergence_:
X_embedded = cur_X_embedded
last_divergence = my_tse.kl_divergence_
print 'Lowest kl_divergence: ' + str(last_divergence)
Plot.scatter(cur_X_embedded[:,0], cur_X_embedded[:,1], label = color, cmap=plt.cm.Spectral);
Plot.show();
# +
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
X, color = data['data'], data['target']
last_divergence = sys.maxint
X_embedded = None
for i in range(0,25):
my_tse = TSNE(n_components=2)
cur_X_embedded = my_tse.fit_transform(X)
if last_divergence >= my_tse.kl_divergence_:
X_embedded = cur_X_embedded
last_divergence = my_tse.kl_divergence_
print 'Lowest kl_divergence: ' + str(last_divergence)
names = data['target_names']
colors_map = []
for c in color:
if c == 0:
colors_map.append('red')
else:
colors_map.append('blue')
Plot.scatter(cur_X_embedded[:,0], cur_X_embedded[:,1], label = color, color = colors_map);
Plot.show();
# -
| last_tsne/t-sne_wrong_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 3.1 创建标题部分、摘要及关键词
#
# 文档主体代码是指位于document环境的部分。在文档正文章节内容及目录之前,一般先创建标题部分(包括文档标题、作者和日期)、摘要、以及关键词信息,这也是文档主体代码中最开始的部分。下面分别介绍这部分的创建过程。
#
# ### 3.1.1 创建标题部分
#
# 这一部分与`2.5.1添加标题、日期、作者信息`类似
#
# - 使用`\title{}`命令设置文档标题
#
# 对于较长的文档标题,可以使用`\\`对标题内容进行分行。
#
# - 使用`\author{}`命令设置作者
#
# 如果有多个作者,作者之间可以使用`\and`进行分隔。
#
# - 使用`\date{}`命令设置日期信息
#
# 在实际使用时,有时需要省略日期信息,那么在{}中不写任何内容即可。如果想要使用默认值(当前日期),则应使用`\date`命令。
#
# - 使用`\maketitle`命令完成标题部分的创建
#
# 仅仅执行上述三行语句无法在文档编译时生成标题部分,还必须在之后加上`\maketitle`命令,表示对标题部分内容进行排版才能真正实现标题部分的创建,具体实例见例1。
#
#
#
# ### 3.1.2 创建摘要及关键词
#
# 在LaTeX中,使用`abstract`环境撰写文档摘要部分,并在其后使用`\textbf{}`命令设置文档关键词。
#
# 【**例1**】创建标题部分、摘要及关键词。
#
# ```tex
# \documentclass[fontsize=12pt]{article}
#
# \begin{document}
#
# \title{My title} % 设置文档标题
# \author{A, B and C} % 设置作者
# \date{August 2021} % 设置日期信息
# \maketitle %
#
# \begin{abstract} % 设置摘要
# This is the abstract. This is the abstract. This is the abstract. This is the abstract. This is the abstract. This is the abstract.
# \end{abstract}
# \textbf{Keywords: keyword1, keyword2, keyword3} % 设置关键词
#
# Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! Hello, LaTeX! % 文档内容
#
# \end{document}
# ```
#
# 编译后的效果如图3-1-1所示:
#
# <p align="center">
# <img align="middle" src="graphics/example3_1_1.png" width="500" />
# </p>
#
# <center><b>图3-1-1</b> 编译后效果</center>
#
#
#
#
# 【回放】[**导言**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-3/section0.ipynb)
#
# 【继续】[**3.2 创建章节**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-3/section2.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-3/section1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorFlow Tutorial #03
# # PrettyTensor
#
# These lessons are adapted from [tutorials](https://github.com/Hvass-Labs/TensorFlow-Tutorials)
# by [<NAME>](http://www.hvass-labs.org/) / [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)
# which are published under the [MIT License](https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/LICENSE) which allows very broad use for both academic and commercial purposes.
#
# ## Introduction
#
# The previous tutorial showed how to implement a Convolutional Neural Network in TensorFlow, which required low-level knowledge of how TensorFlow works. It was complicated and easy to make mistakes.
#
# This tutorial shows how to use the add-on package for TensorFlow called [PrettyTensor](https://github.com/google/prettytensor), which is also developed by Google. PrettyTensor provides much simpler ways of constructing Neural Networks in TensorFlow, thus allowing us to focus on the idea we wish to implement and not worry so much about low-level implementation details. This also makes the source-code much shorter and easier to read and modify.
#
# Most of the source-code in this tutorial is identical to Tutorial #02 except for the graph-construction which is now done using PrettyTensor, as well as some other minor changes.
#
# This tutorial builds directly on Tutorial #02 and it is recommended that you study that tutorial first if you are new to TensorFlow. You should also be familiar with basic linear algebra, Python and the Jupyter Notebook editor.
# ## Flowchart
# The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. See the previous tutorial for a more detailed description of convolution.
from IPython.display import Image
Image('images/02_network_flowchart.png')
# The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled so the image resolution is decreased from 28x28 to 14x14.
#
# These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are down-sampled again to 7x7 pixels.
#
# The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image.
#
# The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low.
#
# These particular filter-weights and intermediate images are the results of one optimization run and may look different if you re-run this Notebook.
#
# Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow.
# ## Imports
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import time
from datetime import timedelta
import math
# We also need PrettyTensor.
import prettytensor as pt # pip install prettytensor
# -
# This was developed using Python 3.5.2 (Anaconda) and TensorFlow version:
tf.__version__
# PrettyTensor version:
pt.__version__
# ## Load Data
# The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
# The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
# The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now.
data.test.cls = np.argmax(data.test.labels, axis=1)
# ## Data Dimensions
# The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below.
# +
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
# -
# ### Helper-function for plotting images
# Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# ### Plot a few images to see if data is correct
# +
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
# -
# ## TensorFlow Graph
#
# The entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.
#
# TensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.
#
# TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.
#
# A TensorFlow graph consists of the following parts which will be detailed below:
#
# * Placeholder variables used for inputting data to the graph.
# * Variables that are going to be optimized so as to make the convolutional network perform better.
# * The mathematical formulas for the convolutional network.
# * A cost measure that can be used to guide the optimization of the variables.
# * An optimization method which updates the variables.
#
# In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial.
# ### Placeholder variables
# Placeholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.
#
# First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
# The convolutional layers expect `x` to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is:
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
# Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case.
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
# We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point.
y_true_cls = tf.argmax(y_true, dimension=1)
# ## TensorFlow Implementation
# This section shows the original source-code from Tutorial #02 which implements the Convolutional Neural Network directly in TensorFlow. The code is not actually used in this Notebook and is only meant for easy comparison to the PrettyTensor implementation below.
#
# The thing to note here is how many lines of code there are and the low-level details of how TensorFlow stores its data and performs the computation. It is easy to make mistakes even for fairly small Neural Networks.
# ### Helper-functions
# In the direct TensorFlow implementation, we first make some helper-functions which will be used several times in the graph construction.
#
# These two functions create new variables in the TensorFlow graph that will be initialized with random values.
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
# The following helper-function creates a new convolutional network. The input and output are 4-dimensional tensors (aka. 4-rank tensors). Note the low-level details of the TensorFlow API, such as the shape of the weights-variable. It is easy to make a mistake somewhere which may result in strange error-messages that are difficult to debug.
def new_conv_layer(input, # The previous layer.
num_input_channels, # Num. channels in prev. layer.
filter_size, # Width and height of filters.
num_filters, # Number of filters.
use_pooling=True): # Use 2x2 max-pooling.
# Shape of the filter-weights for the convolution.
# This format is determined by the TensorFlow API.
shape = [filter_size, filter_size, num_input_channels, num_filters]
# Create new weights aka. filters with the given shape.
weights = new_weights(shape=shape)
# Create new biases, one for each filter.
biases = new_biases(length=num_filters)
# Create the TensorFlow operation for convolution.
# Note the strides are set to 1 in all dimensions.
# The first and last stride must always be 1,
# because the first is for the image-number and
# the last is for the input-channel.
# But e.g. strides=[1, 2, 2, 1] would mean that the filter
# is moved 2 pixels across the x- and y-axis of the image.
# The padding is set to 'SAME' which means the input image
# is padded with zeroes so the size of the output is the same.
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Add the biases to the results of the convolution.
# A bias-value is added to each filter-channel.
layer += biases
# Use pooling to down-sample the image resolution?
if use_pooling:
# This is 2x2 max-pooling, which means that we
# consider 2x2 windows and select the largest value
# in each window. Then we move 2 pixels to the next window.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Rectified Linear Unit (ReLU).
# It calculates max(x, 0) for each input pixel x.
# This adds some non-linearity to the formula and allows us
# to learn more complicated functions.
layer = tf.nn.relu(layer)
# Note that ReLU is normally executed before the pooling,
# but since relu(max_pool(x)) == max_pool(relu(x)) we can
# save 75% of the relu-operations by max-pooling first.
# We return both the resulting layer and the filter-weights
# because we will plot the weights later.
return layer, weights
# The following helper-function flattens a 4-dim tensor to 2-dim so we can add fully-connected layers after the convolutional layers.
def flatten_layer(layer):
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [num_images, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
# The following helper-function creates a fully-connected layer.
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
# ### Graph Construction
#
# The Convolutional Neural Network will now be constructed using the helper-functions above. Without the helper-functions this would have been very long and confusing
#
# Note that the following code will not actually be executed. It is just shown here for easy comparison to the PrettyTensor code below.
#
# The previous tutorial used constants defined elsewhere so they could be changed easily. For example, instead of having `filter_size=5` as an argument to `new_conv_layer()` we had `filter_size=filter_size1` and then defined `filter_size1=5` elsewhere. This made it easier to change all the constants.
if False: # Don't execute this! Just show it for easy comparison.
# First convolutional layer.
layer_conv1, weights_conv1 = \
new_conv_layer(input=x_image,
num_input_channels=num_channels,
filter_size=5,
num_filters=16,
use_pooling=True)
# Second convolutional layer.
layer_conv2, weights_conv2 = \
new_conv_layer(input=layer_conv1,
num_input_channels=16,
filter_size=5,
num_filters=36,
use_pooling=True)
# Flatten layer.
layer_flat, num_features = flatten_layer(layer_conv2)
# First fully-connected layer.
layer_fc1 = new_fc_layer(input=layer_flat,
num_inputs=num_features,
num_outputs=128,
use_relu=True)
# Second fully-connected layer.
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=128,
num_outputs=num_classes,
use_relu=False)
# Predicted class-label.
y_pred = tf.nn.softmax(layer_fc2)
# Cross-entropy for the classification of each image.
cross_entropy = \
tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_true)
# Loss aka. cost-measure.
# This is the scalar value that must be minimized.
loss = tf.reduce_mean(cross_entropy)
# ## PrettyTensor Implementation
# This section shows how to make the exact same implementation of a Convolutional Neural Network using PrettyTensor.
#
# The basic idea is to wrap the input tensor `x_image` in a PrettyTensor object which has helper-functions for adding new computational layers so as to create an entire Neural Network. This is a bit similar to the helper-functions we implemented above, but it is even simpler because PrettyTensor also keeps track of each layer's input and output dimensionalities, etc.
x_pretty = pt.wrap(x_image)
# Now that we have wrapped the input image in a PrettyTensor object, we can add the convolutional and fully-connected layers in just a few lines of source-code.
#
# Note that `pt.defaults_scope(activation_fn=tf.nn.relu)` makes `activation_fn=tf.nn.relu` an argument for each of the layers constructed inside the `with`-block, so that Rectified Linear Units (ReLU) are used for each of these layers. The `defaults_scope` makes it easy to change arguments for all of the layers.
with pt.defaults_scope(activation_fn=tf.nn.relu):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=16, name='layer_conv1').\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=36, name='layer_conv2').\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=128, name='layer_fc1').\
softmax_classifier(num_classes=num_classes, labels=y_true)
# That's it! We have now created the exact same Convolutional Neural Network in a few simple lines of code that required many complex lines of code in the direct TensorFlow implementation.
#
# Using PrettyTensor instead of TensorFlow, we can clearly see the network structure and how the data flows through the network. This allows us to focus on the main ideas of the Neural Network rather than low-level implementation details. It is simple and pretty!
# ### Getting the Weights
# Unfortunately, not everything is pretty when using PrettyTensor.
#
# Further below, we want to plot the weights of the convolutional layers. In the TensorFlow implementation we had created the variables ourselves so we could just refer to them directly. But when the network is constructed using PrettyTensor, all the variables of the layers are created indirectly by PrettyTensor. We therefore have to retrieve the variables from TensorFlow.
#
# We used the names `layer_conv1` and `layer_conv2` for the two convolutional layers. These are also called variable scopes (not to be confused with `defaults_scope` as described above). PrettyTensor automatically gives names to the variables it creates for each layer, so we can retrieve the weights for a layer using the layer's scope-name and the variable-name.
#
# The implementation is somewhat awkward because we have to use the TensorFlow function `get_variable()` which was designed for another purpose; either creating a new variable or re-using an existing variable. The easiest thing is to make the following helper-function.
def get_weights_variable(layer_name):
# Retrieve an existing variable named 'weights' in the scope
# with the given layer_name.
# This is awkward because the TensorFlow function was
# really intended for another purpose.
with tf.variable_scope(layer_name, reuse=True):
variable = tf.get_variable('weights')
return variable
# Using this helper-function we can retrieve the variables. These are TensorFlow objects. In order to get the contents of the variables, you must do something like: `contents = session.run(weights_conv1)` as demonstrated further below.
weights_conv1 = get_weights_variable(layer_name='layer_conv1')
weights_conv2 = get_weights_variable(layer_name='layer_conv2')
# ### Optimization Method
# PrettyTensor gave us the predicted class-label (`y_pred`) as well as a loss-measure that must be minimized, so as to improve the ability of the Neural Network to classify the input images.
#
# It is unclear from the documentation for PrettyTensor whether the loss-measure is cross-entropy or something else. But we now use the `AdamOptimizer` to minimize the loss.
#
# Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
# ### Performance Measures
#
# We need a few more performance measures to display the progress to the user.
#
# First we calculate the predicted class number from the output of the Neural Network `y_pred`, which is a vector with 10 elements. The class number is the index of the largest element.
y_pred_cls = tf.argmax(y_pred, dimension=1)
# Then we create a vector of booleans telling us whether the predicted class equals the true class of each image.
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
# The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# ## TensorFlow Run
# ### Create TensorFlow session
#
# Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.
session = tf.Session()
# ### Initialize variables
#
# The variables for `weights` and `biases` must be initialized before we start optimizing them.
session.run(tf.global_variables_initializer())
# ### Helper-function to perform optimization iterations
# There are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer.
#
# If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to perform more optimization iterations.
train_batch_size = 64
# Function for performing a number of optimization iterations so as to gradually improve the variables of the network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations.
# +
# Counter for total number of iterations performed so far.
total_iterations = 0
def optimize(num_iterations):
# Ensure we update the global variable rather than a local copy.
global total_iterations
# Start-time used for printing time-usage below.
start_time = time.time()
for i in range(total_iterations,
total_iterations + num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status every 100 iterations.
if i % 100 == 0:
# Calculate the accuracy on the training-set.
acc = session.run(accuracy, feed_dict=feed_dict_train)
# Message for printing.
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
# Print it.
print(msg.format(i + 1, acc))
# Update the total number of iterations performed.
total_iterations += num_iterations
# Ending time.
end_time = time.time()
# Difference between start and end-times.
time_dif = end_time - start_time
# Print the time-usage.
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# -
# ### Helper-function to plot example errors
# Function for plotting examples of images from the test-set that have been mis-classified.
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
# ### Helper-function to plot confusion matrix
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# ### Helper-function for showing the performance
# Function for printing the classification accuracy on the test-set.
#
# It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function.
#
# Note that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size.
# +
# Split the test-set into smaller batches of this size.
test_batch_size = 256
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# Number of images in the test-set.
num_test = len(data.test.images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_test, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + test_batch_size, num_test)
# Get the images from the test-set between index i and j.
images = data.test.images[i:j, :]
# Get the associated labels.
labels = data.test.labels[i:j, :]
# Create a feed-dict with these images and labels.
feed_dict = {x: images,
y_true: labels}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Convenience variable for the true class-numbers of the test-set.
cls_true = data.test.cls
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
# -
# ## Performance before any optimization
#
# The accuracy on the test-set is very low because the model variables have only been initialized and not optimized at all, so it just classifies the images randomly.
print_test_accuracy()
# ## Performance after 1 optimization iteration
#
# The classification accuracy does not improve much from just 1 optimization iteration, because the learning-rate for the optimizer is set very low.
optimize(num_iterations=1)
print_test_accuracy()
# ## Performance after 100 optimization iterations
#
# After 100 optimization iterations, the model has significantly improved its classification accuracy.
optimize(num_iterations=99) # We already performed 1 iteration above.
print_test_accuracy(show_example_errors=True)
# ## Performance after 1000 optimization iterations
#
# After 1000 optimization iterations, the model has greatly increased its accuracy on the test-set to more than 90%.
optimize(num_iterations=900) # We performed 100 iterations above.
print_test_accuracy(show_example_errors=True)
# ## Performance after 10,000 optimization iterations
#
# After 10,000 optimization iterations, the model has a classification accuracy on the test-set of about 99%.
optimize(num_iterations=9000) # We performed 1000 iterations above.
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
# ## Visualization of Weights and Layers
#
# When the Convolutional Neural Network was implemented directly in TensorFlow, we could easily plot both the convolutional weights and the images that were output from the different layers. When using PrettyTensor instead, we can also retrieve the weights as shown above, but we cannot so easily retrieve the images that are output from the convolutional layers. So in the following we only plot the weights.
# ### Helper-function for plotting convolutional weights
def plot_conv_weights(weights, input_channel=0):
# Assume weights are TensorFlow ops for 4-dim variables
# e.g. weights_conv1 or weights_conv2.
# Retrieve the values of the weight-variables from TensorFlow.
# A feed-dict is not necessary because nothing is calculated.
w = session.run(weights)
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
# Number of filters used in the conv. layer.
num_filters = w.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = w[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# ### Convolution Layer 1
# Now plot the filter-weights for the first convolutional layer.
#
# Note that positive weights are red and negative weights are blue.
plot_conv_weights(weights=weights_conv1)
# ### Convolution Layer 2
# Now plot the filter-weights for the second convolutional layer.
#
# There are 16 output channels from the first conv-layer, which means there are 16 input channels to the second conv-layer. The second conv-layer has a set of filter-weights for each of its input channels. We start by plotting the filter-weigths for the first channel.
#
# Note again that positive weights are red and negative weights are blue.
plot_conv_weights(weights=weights_conv2, input_channel=0)
# There are 16 input channels to the second convolutional layer, so we can make another 15 plots of filter-weights like this. We just make one more with the filter-weights for the second channel.
plot_conv_weights(weights=weights_conv2, input_channel=1)
# ### Close TensorFlow Session
# We are now done using TensorFlow, so we close the session to release its resources.
# This has been commented out in case you want to modify and experiment
# with the Notebook without having to restart it.
session.close()
# ## Conclusion
#
# PrettyTensor allows you to implement Neural Networks using a much simpler syntax than a direct implementation in TensorFlow. This lets you focus on your ideas rather than low-level implementation details. It makes the code much shorter and easier to understand, and you will make fewer mistakes.
#
# However, there are some inconsistencies and awkward designs in PrettyTensor, and it can be difficult to learn because the documentation is short and confusing. Hopefully this gets better in the future (this was written in July 2016).
#
# There are alternatives to PrettyTensor including [TFLearn](https://github.com/tflearn/tflearn) and [Keras](https://github.com/fchollet/keras).
# ## Exercises
#
# These are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.
#
# You may want to backup this Notebook before making any changes.
#
# * Change the activation function to sigmoid for all the layers.
# * Use sigmoid in some layers and relu in others. Can you use `defaults_scope` for this?
# * Use l2loss in all layers. Then try it for only some of the layers.
# * Use PrettyTensor's reshape for `x_image` instead of TensorFlow's. Is one better than the other?
# * Add a dropout-layer after the fully-connected layer. If you want a different `keep_prob` during training and testing then you will need a placeholder variable and set it in the feed-dict.
# * Replace the 2x2 max-pooling layers with stride=2 in the convolutional layers. Is there a difference in classification accuracy? What if you optimize it again and again? The difference is random, so how would you measure if there really is a difference? What are the pros and cons of using max-pooling vs. stride in the conv-layer?
# * Change the parameters for the layers, e.g. the kernel, depth, size, etc. What is the difference in time usage and classification accuracy?
# * Add and remove some convolutional and fully-connected layers.
# * What is the simplest network you can design that still performs well?
# * Retrieve the bias-values for the convolutional layers and print them. See `get_weights_variable()` for inspiration.
# * Remake the program yourself without looking too much at this source-code.
# * Explain to a friend how the program works.
# ## License (MIT)
#
# Copyright (c) 2016 by [<NAME>](http://www.hvass-labs.org/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| Deep_Learning/TensorFlow-Hvass-Labs/03_PrettyTensor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas
pandas.read_excel(io='prueba.xlsx')
pandas.read_excel(io='prueba.xlsx', sheet_name=1)
valor=pandas.read_excel(io='prueba.xlsx', sheet_name=1) # le asignamos un nombre para evitar teclear todo el texto. En este caso, valor.
valor
valor.ciudad
valor['direccion']
pandas.read_excel(io='prueba.xlsx')
suma_edad=pandas.read_excel(io='prueba.xlsx')
suma_edad.Edad.sum()
suma_edad['Edad'].sum()
import pandas as pd
df =pd.read_excel(io='prueba.xlsx')
df
df.Edad.sum()
suma_edad = df.Edad.sum()
suma_edad.Edad.sum()
suma_edad['Edad'].sum()
# # PRACTICA
#
# ## Scatterplot con 2 variables
import seaborn as sns
sns.get_dataset_names()
sns.load_dataset(name='mpg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
df = sns.load_dataset(name='car_crashes')
df.head()
tabla_accidentes=sns.load_dataset(name='car_crashes')
df.alcohol.hist()
df.alcohol.values
tabla_accidentes['alcohol']# preguntar cómo imprimirlos de manera horizontal
tabla_accidentes['total']
df=sns.load_dataset(name='car_crashes') # data frame ya lo había definido como tabla_de_accidentes
tabla_de_accidentes=sns.load_dataset(name='car_crashes')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df=sns.load_dataset(name='car_crashes')
tabla_de_accidentes=sns.load_dataset(name='car_crashes')
sns.scatterplot(x="alcohol",
y="total",
data=tabla_de_accidentes)
# ## Scatterplot with 3 variables
tabla_de_accidentes=sns.load_dataset(name='car_crashes')
sns.scatterplot(x="alcohol",
y="total",hue='speeding',
data=tabla_de_accidentes)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
tabla_de_accidentes=sns.load_dataset(name='car_crashes')
# +
plt.figure(figsize=(10,8))
sns.scatterplot(x="alcohol",
y="total",hue='speeding',
data=tabla_de_accidentes)
plt.xlabel("alcohol")
plt.ylabel("total")
plt.savefig("Color_and_shape_by_variable_Seaborn_scatterplot.png",
format='png',dpi=150)
# -
df
| #01. Data Tables & Basic Concepts of Programming/prueba.ipynb |