code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
# +
# This file reads void and phoneme features and combine them together to create input data for NNs
# +
# load data
data_path = ".\data"
training_set_description_path = ".\data\protocol_V2\ASVspoof2017_V2_train.trn.txt"
# load data
# each row is a sample
# label 1 for genuine data, 0 for recorded data
with open(os.path.join(data_path,'training_features_for_all_samples_PCA.npy'), 'rb') as f:
training_featuures = np.load(f)
with open(os.path.join(data_path,'training_labels.npy'), 'rb') as f:
training_labels = np.load(f)
f = open(training_set_description_path, "r")
training_set_description = f.read().split('\n')
f.close()
# -
# void data column 0:97 is void feature
# void data column 97 is label
with open(os.path.join(data_path,'void_feature_label_train.npy'), 'rb') as f:
void_data = np.load(f)
# check labels. They should be the same
print(np.sum(void_data[:,97].reshape(-1)- training_labels.reshape(-1)))
print(training_featuures.shape)
print(void_data.shape)
combined_training_features = np.concatenate((void_data[:,0:97], training_featuures), axis=1)
print(combined_training_features.shape)
# save data
with open(os.path.join(data_path,'combined_void_phoneme_training_features.npy'), 'wb') as f:
np.save(f, combined_training_features)
| PhoDopLive + Radar/CombinePhoneticSegVoidFeaatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Import the required packages for the assignment
# -
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Import the training dataset
df = pd.read_csv('../data/raw/train.csv')
#Show key statistics for the dataset
df.describe()
#Create dataset without the Id and oldID columns
df_analysis = df.drop(columns=["Id_old","Id"])
#show top 5 rows of new dataset
df_analysis.head()
#The target looks like it is all 1? Unbalanced dataset. Will this cause problems down the track?
df_analysis['TARGET_5Yrs'].value_counts().plot(kind='bar')
#Pop into a new variable the target column
target = df_analysis.pop('TARGET_5Yrs')
# +
#Grab the correlation matrix - lots of variables
df_analysis.corr()
# -
#Visualise the correlation matrix
f = plt.figure(figsize=(10, 10))
plt.matshow(df_analysis.corr(),fignum=f.number)
plt.xticks(range(df_analysis.shape[1]), df_analysis.columns, fontsize=14,rotation=45)
plt.yticks(range(df_analysis.shape[1]), df_analysis.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16)
plt.show()
#scale the variables
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df_scaled= scaler.fit_transform(df_analysis)
#Split the training dataset into validation and training set
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split (df_scaled, target, test_size=0.2, random_state=8)
#Do a baseline model with all the variables and no regularisation penalty
from sklearn.linear_model import LogisticRegression
model_1=LogisticRegression(penalty = 'none')
model_1.fit(X_train, y_train)
model_1.predict(X_valid)
# +
#get the AOC metric and plot the curve
from sklearn import metrics
y_pred_proba = model_1.predict_proba(X_valid)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_valid, y_pred_proba)
auc = metrics.roc_auc_score(y_valid, y_pred_proba)
plt.plot(fpr,tpr,label="No reg model, auc="+str(auc))
plt.legend(loc=4)
plt.show()
# -
np.round(model_1.coef_,1)
#There are 17 features with co-efficients > 0. Do the same model but with L1 penalty to select features
model_1_l1=LogisticRegression(penalty = 'l1', solver ='liblinear')
model_1_l1.fit(X_train, y_train)
model_1_l1.predict(X_valid)
y_pred_proba = model_1_l1.predict_proba(X_valid)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_valid, y_pred_proba)
auc = metrics.roc_auc_score(y_valid, y_pred_proba)
plt.plot(fpr,tpr,label="L1 model, auc="+str(auc))
plt.legend(loc=4)
plt.show()
#There are 14 features with coefficient > 0 in when L1 regularisation is applied.
#The area under the AOC curve is also still ~ 0.7
np.round(model_1_l1.coef_,1)
# Do the same model but with L2 penalty to select features
model_1_l2=LogisticRegression(penalty = 'l2')
model_1_l2.fit(X_train, y_train)
model_1_l2.predict(X_valid)
y_pred_proba = model_1_l2.predict_proba(X_valid)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_valid, y_pred_proba)
auc = metrics.roc_auc_score(y_valid, y_pred_proba)
plt.plot(fpr,tpr,label="L2 model, auc="+str(auc))
plt.legend(loc=4)
plt.show()
np.round(model_1_l2.coef_,1)
# +
#Applying the L2 regularisation also leads to 17 coefficients > 0 and AOC ~.70
#compare all the model co-efficients and see if they make sense
# -
co_efficients=np.vstack((np.round(model_1.coef_,1),np.round(model_1_l1.coef_,1),np.round(model_1_l2.coef_,1)))
pd.DataFrame(co_efficients, columns=df_analysis.columns)
| notebooks/Assignment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import numpy as np
# # Boolean arrays
# _(also called masks)_
a = np.arange(4)
a
a[[0, -1]]
a[[True, False, False, True]]
a >= 2
a == int
a[a >= 2]
a[a == int]
a.mean()
a[a > a.mean()]
a[ ~ ( a > a.mean() )]
# OR BOOLEAN
a[(a == 0) | (a == 1)]
# AND BOOLEAN
a[(a <= 2) & (a % 2 == 0)]
a[(a == int) | (a == float)]
A = np.random.randint(100, size=(3, 3))
# +
# A = np.random.randint(10, size=(3, 3))
# -
A
A[np.array([
[True, True, True],
[True, True, True],
[False, True, True]
])]
A > 30
| FreeCodeCamp/Data Analysis with Python/Numpy Boolean Arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
data_dir = '/Users/boyuliu/pyprojects/Joann/Joann-Thailand-Project/notebooks/datasets/new_dataset/'
wv1 = pd.read_csv(
data_dir + 'regression_data_%s_20210120.csv' % 'wv_cases1')
wv2 = pd.read_csv(
data_dir + 'regression_data_%s_20210120.csv' % 'wv_cases2')
wv3 = pd.read_csv(
data_dir + 'regression_data_%s_20210120.csv' % 'wv_cases3')
wv4 = pd.read_csv(
data_dir + 'regression_data_%s_20210120.csv' % 'wv_cases4')
wv1.head()
# -
# ## remove linear trend
from sklearn.linear_model import LinearRegression
# +
wv_data_file = 'wv_cases1'
df = wv1
df['detrend_demand'] = None
for prov in df.province.unique():
prov_row_idx = df[df.province==prov].index
demand_data_points = df.loc[prov_row_idx, 'total_demand'].values
time_x = np.arange(len(demand_data_points)).reshape(-1, 1)
reg = LinearRegression().fit(time_x, demand_data_points)
trend_demand = reg.predict(time_x)
std_demand = np.std(demand_data_points)
df.loc[prov_row_idx, 'detrend_demand'] = (demand_data_points - trend_demand)/std_demand
print(df.shape)
# create placeholder
for offset in range(1, 9):
df['detrend_demand_plus_%s' % offset] = None
# shift IV the other way by up to 2 months
for offset in range(1, 9):
df['detrend_demand_minus_%s' % offset] = None
for prov in df.province.unique():
prov_row_idx = df[df.province==prov].index
for offset in range(1, 9):
df.loc[prov_row_idx, 'detrend_demand_plus_%s' % offset] = df.loc[prov_row_idx, 'detrend_demand'].shift(offset)
# shift IV the other way by up to 2 months
for offset in range(1, 9):
df.loc[prov_row_idx, 'detrend_demand_minus_%s' % offset] = df.loc[prov_row_idx, 'detrend_demand'].shift(-offset)
print(df.shape)
# +
wv_data_file = 'wv_cases1'
df.to_csv(
data_dir + 'regression_data_%s_detrend_20210204.csv' % wv_data_file,
index=False)
df.head()
# -
df.columns
len(df.province), len(df.detrend_demand)
# +
a4_dims = (24, 16)
fig, ax = plt.subplots(figsize=a4_dims)
df['total_demand'] = df['total_demand'].astype(float)
bxplot = sns.boxplot(data=df, x='province', y='total_demand')
for item in bxplot.get_xticklabels():
item.set_rotation(45)
plt.show()
# +
a4_dims = (24, 16)
fig, ax = plt.subplots(figsize=a4_dims)
df['detrend_demand'] = df['detrend_demand'].astype(float)
bxplot = sns.boxplot(data=df, x='province', y='detrend_demand')
for item in bxplot.get_xticklabels():
item.set_rotation(45)
plt.show()
# -
| notebooks/data_cleaning_w_2020/Detrend demand (linear).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pre-Process COVIDx Dataset
from fastai2.vision.all import *
import os.path
path = Path('/home/jupyter/covidx')
torch.cuda.empty_cache()
# fix result
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
SEED = 42
seed_everything(SEED)
# ## 1. View COVIDx
path = Path('/home/jupyter/covidx/train')
folder_train = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
folder_train
path = Path('/home/jupyter/covidx/test')
folder_test = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
folder_test
path = Path('/home/jupyter/covidx')
df_train = pd.read_csv(path/'covidx_train.csv')
df_train
path = Path('/home/jupyter/covidx')
df_test = pd.read_csv(path/'covidx_test.csv')
df_test
# ## 2. Combine Images
# run this line in the terminal
# # mv /home/jupyter/covidx/test/* /home/jupyter/covidx/images
path = Path('/home/jupyter/covidx/images')
folder_images = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
folder_images
# ## 3. Join CSVs & Split COVIDx by is_valid Column
path = Path('/home/jupyter/covidx')
# add Headers
df_train = pd.read_csv(path/"covidx_train.csv", names=['patientid', 'path', 'finding', 'source'])
df_test = pd.read_csv(path/"covidx_test.csv", names=['patientid', 'path', 'finding', 'source'])
# add is_valid col
df_train['is_valid'] = True
df_test['is_valid'] = False
df_train.info()
# stack the data frames on top of each other
frames = [df_train, df_test]
df_covidx = pd.concat(frames)
df_covidx.to_csv( path/"covidx.csv", index=False, encoding='utf-8-sig')
df_covidx.info()
df_covidx['is_valid'].value_counts()
| Dataset/COVIDx-preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.043365, "end_time": "2020-10-19T05:27:12.248379", "exception": false, "start_time": "2020-10-19T05:27:12.205014", "status": "completed"} tags=[]
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 1.171995, "end_time": "2020-10-19T05:27:13.445212", "exception": false, "start_time": "2020-10-19T05:27:12.273217", "status": "completed"} tags=[]
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + papermill={"duration": 0.042137, "end_time": "2020-10-19T05:27:13.513409", "exception": false, "start_time": "2020-10-19T05:27:13.471272", "status": "completed"} tags=[]
iris = pd.read_csv('../input/iris/Iris.csv')
# + papermill={"duration": 0.056883, "end_time": "2020-10-19T05:27:13.594556", "exception": false, "start_time": "2020-10-19T05:27:13.537673", "status": "completed"} tags=[]
iris.head()
# + papermill={"duration": 0.044694, "end_time": "2020-10-19T05:27:13.664271", "exception": false, "start_time": "2020-10-19T05:27:13.619577", "status": "completed"} tags=[]
del iris['Id']
iris.head()
# + papermill={"duration": 0.036971, "end_time": "2020-10-19T05:27:13.727029", "exception": false, "start_time": "2020-10-19T05:27:13.690058", "status": "completed"} tags=[]
iris.shape
# + papermill={"duration": 0.039243, "end_time": "2020-10-19T05:27:13.792994", "exception": false, "start_time": "2020-10-19T05:27:13.753751", "status": "completed"} tags=[]
iris.columns
# + papermill={"duration": 0.03979, "end_time": "2020-10-19T05:27:13.859635", "exception": false, "start_time": "2020-10-19T05:27:13.819845", "status": "completed"} tags=[]
iris['Species'].value_counts()
# + papermill={"duration": 0.317385, "end_time": "2020-10-19T05:27:14.214303", "exception": false, "start_time": "2020-10-19T05:27:13.896918", "status": "completed"} tags=[]
iris.plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm')
# + papermill={"duration": 0.735944, "end_time": "2020-10-19T05:27:14.981228", "exception": false, "start_time": "2020-10-19T05:27:14.245284", "status": "completed"} tags=[]
sns.set_style("whitegrid")
sns.FacetGrid(iris, hue='Species', height=6).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
# + papermill={"duration": 8.453594, "end_time": "2020-10-19T05:27:23.465978", "exception": false, "start_time": "2020-10-19T05:27:15.012384", "status": "completed"} tags=[]
sns.pairplot(iris, hue='Species', height=4)
# + papermill={"duration": 0.475834, "end_time": "2020-10-19T05:27:23.978623", "exception": false, "start_time": "2020-10-19T05:27:23.502789", "status": "completed"} tags=[]
iris_setosa = iris.loc[iris['Species'] == 'Iris-setosa']
iris_versicolor = iris.loc[iris['Species'] == 'Iris-versicolor']
iris_virginica = iris.loc[iris['Species'] == 'Iris-virginica']
plt.plot(iris_setosa['PetalLengthCm'], np.zeros_like(iris_setosa['PetalLengthCm']), 'o')
plt.plot(iris_versicolor['PetalLengthCm'], np.zeros_like(iris_versicolor['PetalLengthCm']), 'o')
plt.plot(iris_virginica['PetalLengthCm'], np.zeros_like(iris_virginica['PetalLengthCm']), 'o')
# + papermill={"duration": 0.783694, "end_time": "2020-10-19T05:27:24.800962", "exception": false, "start_time": "2020-10-19T05:27:24.017268", "status": "completed"} tags=[]
sns.FacetGrid(iris, hue='Species', height=5).map(sns.distplot, 'PetalLengthCm').add_legend()
# + papermill={"duration": 0.778297, "end_time": "2020-10-19T05:27:25.621862", "exception": false, "start_time": "2020-10-19T05:27:24.843565", "status": "completed"} tags=[]
sns.FacetGrid(iris, hue='Species', height=5).map(sns.distplot, 'PetalWidthCm').add_legend()
# + papermill={"duration": 0.74743, "end_time": "2020-10-19T05:27:26.410477", "exception": false, "start_time": "2020-10-19T05:27:25.663047", "status": "completed"} tags=[]
sns.FacetGrid(iris, hue='Species', height=5).map(sns.distplot, 'SepalLengthCm').add_legend()
# + papermill={"duration": 1.12509, "end_time": "2020-10-19T05:27:27.579136", "exception": false, "start_time": "2020-10-19T05:27:26.454046", "status": "completed"} tags=[]
sns.FacetGrid(iris, hue='Species', height=5).map(sns.distplot, 'SepalWidthCm').add_legend()
# + papermill={"duration": 0.072618, "end_time": "2020-10-19T05:27:27.730196", "exception": false, "start_time": "2020-10-19T05:27:27.657578", "status": "completed"} tags=[]
counts, bin_edges = np.histogram(iris_setosa['PetalLengthCm'], bins=10, density=True)
pdf = counts/(sum(counts))
print("PDF : ",pdf)
print("Bin_edges : ", bin_edges)
# + papermill={"duration": 0.391401, "end_time": "2020-10-19T05:27:28.169274", "exception": false, "start_time": "2020-10-19T05:27:27.777873", "status": "completed"} tags=[]
cdf = np.cumsum(pdf)
plt.plot(bin_edges[1:], pdf)
plt.plot(bin_edges[1:], cdf)
# + papermill={"duration": 0.061346, "end_time": "2020-10-19T05:27:28.277756", "exception": false, "start_time": "2020-10-19T05:27:28.216410", "status": "completed"} tags=[]
counts, bin_edges = np.histogram(iris_versicolor['PetalLengthCm'], bins=10, density=True)
pdf = counts/ (sum(counts))
print("PDF : ", pdf)
print("Bin_Edges : ",bin_edges)
# + papermill={"duration": 0.38222, "end_time": "2020-10-19T05:27:28.707523", "exception": false, "start_time": "2020-10-19T05:27:28.325303", "status": "completed"} tags=[]
cdf = np.cumsum(pdf)
plt.plot(bin_edges[1:], pdf)
plt.plot(bin_edges[1:], cdf)
# + papermill={"duration": 0.067115, "end_time": "2020-10-19T05:27:28.825014", "exception": false, "start_time": "2020-10-19T05:27:28.757899", "status": "completed"} tags=[]
counts, bin_edges = np.histogram(iris_virginica['PetalLengthCm'], bins=10, density=True)
pdf = counts/ (sum(counts))
print("PDF : ", pdf)
print("Bin_Edges : ",bin_edges)
# + papermill={"duration": 0.358073, "end_time": "2020-10-19T05:27:29.233176", "exception": false, "start_time": "2020-10-19T05:27:28.875103", "status": "completed"} tags=[]
cdf = np.cumsum(pdf)
plt.plot(bin_edges[1:], pdf)
plt.plot(bin_edges[1:], cdf)
# + papermill={"duration": 0.082021, "end_time": "2020-10-19T05:27:29.366841", "exception": false, "start_time": "2020-10-19T05:27:29.284820", "status": "completed"} tags=[]
print("Mean petal length of each :")
print("Setosa : ", np.mean(iris_setosa['PetalLengthCm']))
print("Versicolor : ", np.mean(iris_versicolor['PetalLengthCm']))
print("Virginica : ", np.mean(iris_virginica['PetalLengthCm']))
print("\nMedian petal length of each :")
print("Setosa : ", np.median(iris_setosa['PetalLengthCm']))
print("Versicolor : ", np.median(iris_versicolor['PetalLengthCm']))
print("Virginica : ", np.median(iris_virginica['PetalLengthCm']))
print("\nQunatiles [0%, 25%, 50%, 75%, 100%]for petal length of each :")
print("Setosa : ", np.percentile(iris_setosa['PetalLengthCm'],np.arange(0,101,25)))
print("Versicolor : ", np.percentile(iris_versicolor['PetalLengthCm'], np.arange(0,101, 25)))
print("Virginica : ", np.percentile(iris_virginica['PetalLengthCm'], np.arange(0,101,25)))
# + papermill={"duration": 0.076545, "end_time": "2020-10-19T05:27:29.495764", "exception": false, "start_time": "2020-10-19T05:27:29.419219", "status": "completed"} tags=[]
from statsmodels import robust
print("\nMedian Absolute Deviation (MAD) for petal length of each :")
print("Setosa : ", robust.mad(iris_setosa['PetalLengthCm']))
print("Versicolor : ", robust.mad(iris_versicolor['PetalLengthCm']))
print("Virginica : ", robust.mad(iris_virginica['PetalLengthCm']))
# + papermill={"duration": 0.27446, "end_time": "2020-10-19T05:27:29.822786", "exception": false, "start_time": "2020-10-19T05:27:29.548326", "status": "completed"} tags=[]
sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
# + papermill={"duration": 0.281128, "end_time": "2020-10-19T05:27:30.158442", "exception": false, "start_time": "2020-10-19T05:27:29.877314", "status": "completed"} tags=[]
sns.violinplot(x='Species', y='PetalLengthCm', data=iris)
# + papermill={"duration": 1.797644, "end_time": "2020-10-19T05:27:32.011592", "exception": false, "start_time": "2020-10-19T05:27:30.213948", "status": "completed"} tags=[]
sns.jointplot(x='PetalLengthCm', y='PetalWidthCm', data=iris_setosa, kind='kde')
# + papermill={"duration": 1.805001, "end_time": "2020-10-19T05:27:33.876290", "exception": false, "start_time": "2020-10-19T05:27:32.071289", "status": "completed"} tags=[]
sns.jointplot(x='PetalLengthCm', y='PetalWidthCm', data=iris_versicolor, kind='kde')
# + papermill={"duration": 1.993405, "end_time": "2020-10-19T05:27:35.930411", "exception": false, "start_time": "2020-10-19T05:27:33.937006", "status": "completed"} tags=[]
sns.jointplot(x='PetalLengthCm', y='PetalWidthCm', data=iris_virginica, kind='kde')
# + papermill={"duration": 0.064533, "end_time": "2020-10-19T05:27:36.069973", "exception": false, "start_time": "2020-10-19T05:27:36.005440", "status": "completed"} tags=[]
| eda-on-iris-data-set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sensor Position Sanity Check
#
# Extreme case test: 90 degree.
# <a id='step1'></a>
import bifacial_radiance
import numpy as np
import os # this operative system to do teh relative-path testfolder for this example.
import pprint # We will be pretty-printing the trackerdictionary throughout to show its structure.
# +
testfolder = os.path.abspath(r'..\..\bifacial_radiance\TEMP')
albedo = "litesoil" # this is one of the options on ground.rad
lat = 37.5
lon = -77.6
# Scene variables
nMods = 1
nRows = 1
hub_height = 2.3 # meters
gcr = 0.35
moduletype = 'SimpleModule'
x=1
y=2
sensorsy = 4 # to make it fast
timeindex = 4020
# -
# <a id='step3'></a>
simulationName = 'Vertical-SouthFacing'
demo = bifacial_radiance.RadianceObj(simulationName, path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat = lat, lon = lon)
metdata = demo.readWeatherFile(weatherFile = epwfile)
demo.gendaylit(metdata,timeindex)
mymodule = demo.makeModule(name=moduletype, x=x, y=y)
sceneDict = {'gcr': gcr,'hub_height':hub_height, 'tilt': 90, 'azimuth': 180, 'nMods':nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype,sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = sensorsy)
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-SouthFacing.csv')
pprint.pprint(frontscan)
pprint.pprint(backscan)
frontscan['ystart'] = -0.5
results = analysis.analysis(octfile, demo.basename+'_hack_ystart', frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-SouthFacing_hack_ystart.csv')
pprint.pprint(frontscan)
pprint.pprint(backscan)
frontscan['orient'] = '-0.000 1.000 -0.000'
results = analysis.analysis(octfile, demo.basename+'_hack_ystart_orient', frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-SouthFacing_hack_ystart_orient.csv')
# SUCCESS! Hitting the right Side now (front scan only)!
#
# ## Vertical East Facing
simulationName = 'Vertical-EastFacing'
demo = bifacial_radiance.RadianceObj(simulationName, path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat = lat, lon = lon)
metdata = demo.readWeatherFile(weatherFile = epwfile)
demo.gendaylit(metdata,timeindex)
mymodule = demo.makeModule(name=moduletype, x=x, y=y)
sceneDict = {'gcr': gcr,'hub_height':hub_height, 'tilt': 90, 'azimuth': 90, 'nMods':nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype,sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = sensorsy)
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-EastFacing.csv')
pprint.pprint(frontscan)
pprint.pprint(backscan)
frontscan['xstart'] = 0.5
results = analysis.analysis(octfile, demo.basename+'_hack_xstart', frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-EastFacing_hack_xstart.csv')
pprint.pprint(frontscan)
pprint.pprint(backscan)
frontscan['orient'] = '-1.000 0.000 -0.000'
results = analysis.analysis(octfile, demo.basename+'_hack_xstart_orient', frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-EastFacing_hack_xstart_orient.csv')
# # Success! :D
#
# # 75 ?
simulationName = 'Vertical-EastFacing-75'
demo = bifacial_radiance.RadianceObj(simulationName, path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat = lat, lon = lon)
metdata = demo.readWeatherFile(weatherFile = epwfile)
demo.gendaylit(metdata,timeindex)
mymodule = demo.makeModule(name=moduletype, x=x, y=y)
sceneDict = {'gcr': gcr,'hub_height':hub_height, 'tilt': 75, 'azimuth': 90, 'nMods':nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype,sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = sensorsy)
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Vertical-EastFacing.csv')
# ## =(
simulationName = 'Flat-135'
demo = bifacial_radiance.RadianceObj(simulationName, path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat = lat, lon = lon)
metdata = demo.readWeatherFile(weatherFile = epwfile)
demo.gendaylit(metdata,timeindex)
mymodule = demo.makeModule(name=moduletype, x=0.001, y=y)
sceneDict = {'gcr': gcr,'hub_height':hub_height, 'tilt': 45, 'azimuth': 135, 'nMods':nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype,sceneDict)
octfile = demo.makeOct(demo.getfilelist())
analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename)
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy = sensorsy)
results = analysis.analysis(octfile, demo.basename, frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Flat-135.csv')
pprint.pprint(frontscan)
pprint.pprint(backscan)
pprint.pprint(backscan)
frontscan['orient']='-0.500 0.500 -0.707'
backscan['orient'] = '0.500 -0.500 0.707'
results = analysis.analysis(octfile, demo.basename+'_hack_orient', frontscan, backscan)
bifacial_radiance.load.read1Result('results\irr_Flat-135_hack_orient.csv')
| docs/tutorials/(development) Sensor Position Sanity Check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Simulate the model and make Figure SI-1
# ## Imports
# First run all of the code in this section to import the necessary packages.
#
# First we load some magic commands:
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# Next load some standard modules. If you do not have one of these modules (such as [progressbar](https://pypi.python.org/pypi/progressbar2) or [joblib](https://pypi.python.org/pypi/joblib)), then run, for example, `!pip install progressbar` to install it using `pip`.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import seaborn as sns
import time, datetime
import progressbar
import pickle
import os
from itertools import product
from joblib import Parallel, delayed
from scipy import stats
import sys
sys.setrecursionlimit(10000) # to be able to pickle the results of simulations and avoid a RecursionError
# Set style parameters for matplotlib figures:
from matplotlib import rc
rc('font', **{'family': 'sans-serif','sans-serif': ['Helvetica']})
rc('text', usetex=True)
rc('axes', **{'titlesize': 10, 'labelsize': 8})
rc('legend', **{'fontsize': 9})
# Set the path for saving the figures:
figures_path = os.path.join(os.pardir, 'figures')
if not os.path.exists(figures_path):
os.mkdir(figures_path)
# Import the code for simulating the model:
import ABM
import EconomySimulator
# ## Panel (a): long-run fraction functional as a function of the initial condition
# ### Compute the data (takes about 1.5 hours to run)
# The code in the cell below creates a pandas DataFrame called `long_run_results`. It in `long_run_results` the dictionary returned by the function `EconomySimulator.simulate_economy_long_run`. This dictionary contains some measures of the state of the model economy after 1000 production attempts have been simulated.
#
# The function `run_long_run_sim` sets the parameters of the economy, and the for loop iterates over the initial condition `F0` (the initial fraction of functional agents), `r` in `[1, 2000]`, `xi` in `[0, 1]`, and a trial index `trial` in `range(1000)` (we run 1000 trials for each initial condition).
#
#
# **Warning**: _This code takes about 1.5 hours to run on a laptop computer_. To avoid having to re-run this, run the cell under the section heading **Load `long_run_results` from the hard drive** below.
# +
def run_long_run_sim(trial_number, F0, r, xi):
n_agents = 200
beta = .4
n_steps = 5 * n_agents
L = 1
exog_fail = 0.0001
alpha = 0.15
tolerance_std = 0.0
n_steps_detect_fixed_point = 50
return EconomySimulator.simulate_economy_long_run(
n_agents=n_agents, init_fraction_functional=F0,
alpha=alpha, beta=beta, r=r, L=L, xi=xi, exog_fail=exog_fail,
n_steps=n_steps, trial=trial_number,
tolerance_std=tolerance_std, n_steps_detect_fixed_point=n_steps_detect_fixed_point)
try:
long_run_results
except NameError:
long_run_results = None
start_time = time.time()
long_run_results = pd.concat([long_run_results, pd.DataFrame(
Parallel(n_jobs=4)(
delayed(run_long_run_sim)(trial, F0, r, xi)
for trial in range(1000)
for F0 in np.arange(.155, .205, .01)
for r in [1., 2000.]
for xi in [0, 1]
)
)])
end_time = time.time()
print(datetime.timedelta(seconds=(end_time - start_time)))
# -
# This confirms that we have 1000 simulations for each quadruple `(r, xi, n_agents, init_F)`.
long_run_results.groupby(['r', 'xi', 'n_agents', 'init_F']).size()
# #### Save the data to the hard drive as a `CSV` file
long_run_results.to_csv(
os.path.join(
'simulated_data',
'long_run_results_n200_alpha0p15_beta0p4_epsilon0p0001.csv'))
# #### Load `long_run_results` from the hard drive
# Run the code below to load the results from the CSV file in order to avoid having to re-run the simulations above (which takes about 90 minutes):
long_run_results = pd.read_csv(
os.path.join(
'simulated_data',
'long_run_results_n200_alpha0p15_beta0p4_epsilon0p0001.csv'),
index_col=0)
# ## Panel (b): show two representative time-series
# #### Simulate the original model and the model with sticky links and preferential attachment
# Either
#
# * run the simulations below (which should take around 50 minutes to run), or
# * load the results of those simulations that were pickled (scroll down to the heading **Load the simulations from the `pickle` file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`**).
# ##### Simulate the original model
# Set up the simulation:
sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7 = EconomySimulator.AssortativitySimulator(
ABM.Economy(1000, .7, alpha=.15, beta=.4, r=1, exog_fail=0.0001, xi=0))
# This takes about 22 minutes to run:
sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7.simulate(200000)
# Plot some time-series from the simulation:
sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7.combined_plot()
# ##### Simulate the model with sticky links and preferential attachment
# Set up the simulation:
sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7 = EconomySimulator.AssortativitySimulator(
ABM.Economy(1000, .7, alpha=.15, beta=.4, r=2000., exog_fail=0.0001, xi=1))
# This takes about 25 minutes to run:
sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7.simulate(200000)
# Plot some time-series from the simulation:
sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7.combined_plot()
# #### Save and load the two simulations above using `pickle`
# ###### Save (pickle) the simulations to file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`:
# +
with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r1_xi0.pkl'), 'wb') as f:
pickle.dump(sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7, f)
with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r2000_xi1.pkl'), 'wb') as f:
pickle.dump(sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7, f)
# -
# ###### Load the simulations from the `pickle` file `sim_N1000_alpha0p15_beta0p4eps0p0001_initF0p7.pkl`:
# Run the code below to avoid having to run the two simulations above:
# +
with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r1_xi0.pkl'), 'rb') as f:
sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7 = pickle.load(f)
with open(os.path.join('simulated_data', 'sim_N1000_alpha0p15_beta0p4_eps0p0001_initF0p7_r2000_xi1.pkl'), 'rb') as f:
sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7 = pickle.load(f)
# -
# ## Make Figure SI-1
# The cell below makes Figure SI-1 and saves it to the folder `figures` as a PDF.
# +
data = long_run_results
data.init_F = np.round(data.init_F, 3)
data = data[((data.r == 1) & (data.xi == 0)) | ((data.r > 1) & (data.xi > 0))]
grouped_by_r_xi = data.groupby(['r', 'xi'])
fig, ax = plt.subplots(ncols=2, figsize=(3.4 * 2 * .95, 3.4 / 5 * 3))
colors = ['#2ca02c', '#e377c2']
handles = []
labels = []
indx = 0
for r_xi, r_df in grouped_by_r_xi:
color = colors[indx]
indx += 1
labels.append(r_xi)
linestyle = {0: '-', 1: '--'}.get(r_xi[1])
data_final_F = (
r_df.groupby('init_F')['final_F']
.agg({
'mean_final_F': np.mean,
'std_final_F': np.std,
'num_trials': 'size',
'sem_final_F': lambda final_F: np.std(final_F) / len(final_F)**.5,
'75_percentile_final_F': lambda final_F: np.percentile(final_F, 75.),
'25_percentile_final_F': lambda final_F: np.percentile(final_F, 25.)}))
handle, = ax[0].plot(data_final_F.index, data_final_F.mean_final_F, label=str(r_xi),
color=color, alpha=1, linewidth=1,
linestyle='-')
ax[0].errorbar(data_final_F.index, data_final_F.mean_final_F,
yerr=2 * data_final_F.sem_final_F,
label=str(r_xi),
color=color)
handles.append(handle)
ax[0].set_xlabel(r'$F(0) \equiv$ initial fraction functional')
ax[0].set_ylabel(r'mean of $F(1000)$')
ax[0].set_ylim(0, 1)
xlim = (0.14 - .001, .201)
ax[0].set_xlim(*xlim)
height_trap_label = .01
label_size = 8
ax[0].annotate(
"",
xy=(xlim[0], height_trap_label),
xytext=(.15, height_trap_label),
arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25))
ax[0].text(xlim[0] * .65 + .15 * .35, height_trap_label + .04, 'trap',
color='k', size=label_size)
height_bimodal_label = height_trap_label
ax[0].annotate(
"",
xy=(.152, height_bimodal_label),
xytext=(.185, height_bimodal_label),
arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25))
ax[0].annotate(
"",
xytext=(.152, height_bimodal_label),
xy=(.185, height_bimodal_label),
arrowprops=dict(linewidth=1, headwidth=3, headlength=2, width=0.25))
ax[0].text(.152 * .65 + .185 * .35, height_bimodal_label + .04, 'bimodal', color='k', size=label_size)
ax[0].annotate(
'original model'
#'\n'
#r'$(r, \xi) = (1, 0)$'
,
size=label_size,
xy=(.1725, .56),
xytext=(.17, .30),
xycoords='data',
textcoords='data',
arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2"))
ax[0].annotate(
'sticky links'
#r' ($r = 2000$)'
' and'
'\n'
'prefential attachment'
#r' ($\xi = 1$)'
,
size=label_size,
xy=(.1625, .5),
xytext=(.145, .74),
xycoords='data',
textcoords='data',
arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2"))
sims = [
sim_N1000_alpha0p15_beta0p4_r1_xi0_eps0p0001_initF0p7,
sim_N1000_alpha0p15_beta0p4_r2000_xi1_eps0p0001_initF0p7
]
for indx, sim in enumerate(sims):
ax[1].plot(sim.fraction_functional_history,
alpha=.8,
color=colors[indx], linewidth=1)
ax[1].set_ylabel(r'$F(t)$')
ax[1].set_xlabel(r'time $t$ (number of production attempts)')
ax[1].set_xlim(0, sims[0].economy.n_production_attempts)
ax[1].set_ylim(0, 1)
ax[1].set_xticks([0, 10**5, 2 * 10**5], ['0', '10^5', '2 10^5'])
ax[1].tick_params(axis='both', labelsize=7, colors='.4')
ax[0].tick_params(axis='both', labelsize=7, colors='.4')
def format_label(value, pos):
return {
0: '0',
2.5 * 10**4: '',#r'$2.5\!\!\times\!\!10^4$',
5 * 10**4: r'$5\!\!\times\!\!10^4$',
10**5: r'$10^5$',
1.5 * 10**5: r'$1.5\!\!\times\!\!10^5$',
2*10**5: r'$2\!\!\times\!\!10^5$'
}.get(value, '')
ax[1].xaxis.set_major_formatter(mpl.ticker.FuncFormatter(format_label))
fig.text(.001, .94, r'\textbf{(a)}', size=label_size)
fig.text(#.49,
.50,
.94, r'\textbf{(b)}', size=label_size)
fig.tight_layout(pad=0.15)
fig.subplots_adjust(wspace=.25)
fig.savefig(os.path.join(figures_path, 'figure_SI_1.pdf'))
plt.show()
# -
# ### Check statistical significance of the difference in means in Figure SI-1(a)
# In the cell below, we find that the means of $F(1000)$ are statistically significantly different between the two models for $F(0) = 0.155, 0.16, 0.165, ..., 0.2$ according to the two-sided Mann-Whitney $U$ test ($p$-value $< 10^{-5}$):
for init_F, df in long_run_results.groupby('init_F'):
df_grouped_by_r_xi = df.groupby(['r', 'xi'])
print('F(0) = {:>5}'.format(init_F), end='\n\t')
original_final_F = df_grouped_by_r_xi.get_group((1, 0))['final_F']
sticky_PA_final_F = df_grouped_by_r_xi.get_group((2000, 1))['final_F']
print('mean F(1000) for original model: {:>5.3f}'.format(original_final_F.mean()), end='\n\t')
print('mean F(1000) for sticky/PA model: {:>5.3f}'.format(sticky_PA_final_F.mean()), end='\n\t')
mann_whitney_test = stats.mannwhitneyu(sticky_PA_final_F, original_final_F, alternative='two-sided')
print('Mann-Whitney U test:')
print('\t\tp-value: ', mann_whitney_test.pvalue, end=' ')
if mann_whitney_test.pvalue < 10**(-3):
print('*' * 3)
else:
print('')
print('\t\tU = ', mann_whitney_test.statistic, end=' ')
print('\n')
# ### Check the robustness of the difference in variance in the time-series in Figure SI-1(b)
# Below we run simulations with the same parameters and starting condition as in Figure SI-1(b) and record the mean and standard deviation of the time-series.
# #### Run 200 simulations as in Figure SI-1(b)
# Running the cell below takes about 21 hours to complete. Either run this cell or skip it to import the results in the section titled **Import the results of running 200 simulations**.
# +
parameters = product(range(200), ((1, 0), (2000, 1)))
def simulate_long_run_variance(trial_number, r, xi):
n_agents = 1000
beta = .4
n_steps = 200 * n_agents
L = 1
F0 = 0.7
exog_fail = 0.0001
alpha = 0.15
econ = ABM.Economy(
n_agents, F0, alpha=alpha, beta=beta, r=r, exog_fail=exog_fail, xi=xi)
frac_functional_history = []
init_best_response = econ.latest_best_response
result = {
'init_n_inputs_needed': init_best_response.n_inputs_needed,
'init_n_inputs_attempted': init_best_response.n_inputs_attempted}
for i in range(n_steps):
econ.update_one_step()
frac_functional_history.append(econ.fraction_functional_agents())
final_best_response = econ.latest_best_response
result.update({
'final_n_inputs_needed': final_best_response.n_inputs_needed,
'final_n_inputs_attempted': final_best_response.n_inputs_attempted,
'final_F': econ.fraction_functional_agents(),
'n_agents': n_agents, 'init_F': F0, 'alpha': alpha, 'beta': beta, 'xi': xi,
'r': r, 'L': L, 'n_steps': n_steps,
'mean_F': np.mean(frac_functional_history),
'std_F': np.std(frac_functional_history),
'max_F': np.max(frac_functional_history),
'min_F': np.min(frac_functional_history)})
buffers = {
'init_buffer': (result['init_n_inputs_attempted'] -
result['init_n_inputs_needed']),
'final_buffer': (result['final_n_inputs_attempted'] -
result['final_n_inputs_needed'])}
result.update(buffers)
return result
try:
long_run_variance_simulations
except NameError:
long_run_variance_simulations = None
if __name__ == '__main__':
bar = progressbar.ProgressBar()
long_run_variance_simulations = pd.concat([long_run_variance_simulations, pd.DataFrame(
Parallel(n_jobs=4)(
delayed(simulate_long_run_variance)(trial, r, xi)
for trial, (r, xi) in bar(list(parameters))
)
)])
# -
# ##### Save the results to a `CSV` file:
long_run_variance_simulations.to_csv(
os.path.join(
'simulated_data',
'long_run_variance_simulations_n1000_alpha0p15_beta0p4_eps0p0001_initF0p7.csv'))
# ##### Import the results of running 200 simulations
long_run_variance_simulations = pd.read_csv(
os.path.join(
'simulated_data',
'long_run_variance_simulations_n1000_alpha0p15_beta0p4_eps0p0001_initF0p7.csv'),
index_col=0)
# ### Analyze the results
# First we plot histograms of the standard deviation of the time-series $F(t)$ for the two models. This figure is saved as `compare_std_dev_F.pdf` in the `figures` folder.
# +
colors = {(1, 0): '#2ca02c', (2000, 1): '#e377c2'}
fig, ax = plt.subplots(figsize=(3.4, 3.4 / 5 * 3))
grouped_std_F = long_run_variance_simulations.groupby(['r', 'xi'])['std_F']
for r_xi, df in grouped_std_F:
ax.hist(df, bins=30, normed=False, color=colors[r_xi])
ax.set_xlabel('standard deviation of $F(t)$', size=12)
ax.set_ylabel('count', size=12)
ax.annotate(
'original model\n'
r'$(r, \xi) = (1, 0)$',
xy=(.02, 5), xytext=(.05, 5), xycoords='data', textcoords='data',
arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2"))
ax.annotate(
'sticky links \& preferential \nattachment\n'
r'$(r, \xi) = (2000, 1)$',
xy=(.14, 8), xytext=(.06, 12), xycoords='data', textcoords='data',
arrowprops=dict(arrowstyle="-|>", linewidth=1, connectionstyle="arc3,rad=.2"))
fig.tight_layout(pad=.15)
fig.savefig(os.path.join(figures_path, 'compare_std_dev_F.pdf'))
plt.show()
# -
# Next we group by `(r, xi)` and then compute the mean and standard deviation of the mean of the time-series.
compare_std_F = long_run_variance_simulations.groupby(['r', 'xi']).std_F.agg(
{'mean_std_F': 'mean', 'std_std_F': 'std', 'count': 'size'})
compare_std_F
# The sticky links + preferential attachment model has a variance that is 8.6 times larger:
compare_std_F.loc[(2000, 1)].mean_std_F / compare_std_F.loc[(1, 0)].mean_std_F
# This 8.6-fold difference amounts to a difference in 14.6 standard deviations:
((compare_std_F.loc[(2000, 1)].mean_std_F - compare_std_F.loc[(1, 0)].mean_std_F) /
compare_std_F.loc[(2000, 1)].std_std_F)
# In a two-sided t-test (using [scipy's `ttest_ind`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.stats.mstats.ttest_ind.html)) that allows for unequal variances in the two populations (because, as found below, the variances are found to be statistically significantly different), we obtain a p-value of `5.3e-251`:
# +
std_F_sticky_PA = long_run_variance_simulations.groupby(['r', 'xi']).get_group((2000, 1)).std_F
std_F_original_model = long_run_variance_simulations.groupby(['r', 'xi']).get_group((1, 0)).std_F
print('two-sided t-test: ', stats.ttest_ind(std_F_sticky_PA, std_F_original_model, equal_var = False))
# -
# We also find that a two-sided [Mann-Whitney U test](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html) has a very small p-value (`1e-67`):
stats.mannwhitneyu(std_F_sticky_PA, std_F_original_model, alternative='two-sided')
# ##### Check normality and different variances
# Below we find that the standard deviations of the time-series $F(t)$ (plotted as a histogram above) are normal with p-values `0.06` and `2.6e-5`.
# +
print('standard deviation of the time-series F(t) in the sticky links + preferential attachment model (r, xi) = (2000, 1)')
print('-' * 114)
print(' variance: ', np.var(std_F_sticky_PA))
print(' normality test: ', stats.normaltest(std_F_sticky_PA), end='\n' * 3)
print('standard deviation of the time-series F(t) in the original model (r, xi) = (1, 0)')
print('-' * 81)
print(' variance: ', np.var(std_F_original_model))
print(' normality test: ', stats.normaltest(std_F_original_model))
# -
# According to the [Bartlett test](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.bartlett.html#scipy.stats.bartlett), their variances are different (p-value `2.6e-74`), so we reject the null hypothesis that they are drawn from populations with the same variance.
#
# In case the sticky/preferential attachment model's standard deviation of $F(t)$ is not normally distributed, we also use the [Levene test](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.levene.html) with the parameter `center` set to the `'mean'` and to `'median'` (to check both).
#
# In all three cases, we get a very small p-value (`1e-74`, `1e-44`, `1e-42`, respectively), so we reject the null hypothesis that the variances are the same, and hence in the two-sided t-test above we set the keyword argument `equal_var` to `False`.
print('Bartlett test (null hypothesis: equal variance; used for normal data):\n\t',
stats.bartlett(std_F_sticky_PA, std_F_original_model), end='\n\n')
print('Levene test with center=mean (null hypothesis: equal variance; used for potentially non-normal data)\n\t',
stats.levene(std_F_sticky_PA, std_F_original_model, center='mean'), end='\n\n')
print('Levene test with center=mean (null hypothesis: equal variance; used for potentially non-normal data)\n\t',
stats.levene(std_F_sticky_PA, std_F_original_model, center='median'))
# ## Dependencies
import sys
sys.version
import joblib
for pkg in [mpl, pd, sns, np, progressbar, joblib]:
print(pkg.__name__, pkg.__version__)
| scripts/Figure_SI_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Follow [the gcloud sdk installation guide](https://cloud.google.com/sdk/docs/quickstart-linux) to install the gcloud SDK. Clean up older executables when prompted to do so.
#
# Perform the following to install the gcloud, kubectl and docker
# ```
# # Cloud SDK (gcloud and friends)
# export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)"
# # echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
# curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
# sudo apt-get update && sudo apt-get install -y google-cloud-sdk
# sudo apt-get install -y kubectl
#
# # docker
# sudo apt-get install apt-transport-https ca-certificates curl gnupg2 software-properties-common
# curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
# sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
# sudo apt-get update
# sudo apt-get install -y docker-ce
#
# # Not sure whether this is really needed. Change to your service account.
# # Use gcloud auth list to identify your service account
# SERVICE_ACCOUNT=serviceAccount:1094881674505-compute@developer.g<EMAIL>.com
# gsutil iam ch $SERVICE_ACCOUNT:admin gs://going-tfx
#
# # docker credential helper provides access to the image repo
# VERSION=1.5.0
# OS=linux
# ARCH=amd64
# curl -fsSL "https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v${VERSION}/docker-credential-gcr_${OS}_${ARCH}-${VERSION}.tar.gz" | tar xz
# sudo mv docker-credential-gcr /usr/bin/docker-credential-gcr
# sudo chmod +x /usr/bin/docker-credential-gcr
# docker-credential-gcr
# ```
# The last command should indicate that your active account is a service account, created for you on the fly to access the GCP resources.
#
# Now the following command should succeed and create a small 2-node cluster for you
| microblog/Installations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas_datareader as dr
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime
# +
#create start + end date 1st Nov 2019 to 31st oct 20
# -
start = datetime(2019, 11, 1)
end = datetime(2020, 10, 31)
#Get nececssary api key from enviormental variables.
my_api_key = os.environ.get("TIINGO_API_KEY")
#use the tiingo api to get the google stock price data with my api_key
stock_df = dr.get_data_tiingo('TTWO', start=start, end=end, api_key= my_api_key)
print(stock_df.head(5))
#Export to csv
stock_df.to_csv("taketwo.csv")
#import back
df = pd.read_csv("taketwo.csv")
period_high = df.high.max()
period_low = df.low.min()
print("The period range was from "+ str(period_low)+" to " + str(period_high)+".")
# +
fig = go.Figure(data=[go.Candlestick(x=df['date'],
open=df['open'],
high=df['high'],
low=df['low'],
close=df['close'])])
fig.show()
| Current_v2_incl_kpi_period_high_low.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 13/09/2021
# ## Familiarisation with equipment, theory and experimental procedure
# ___
# ### Plan
#
# * Study experimental setup and relate to circuit diagrams in manual
# * Create diagrams of the above
#
# Experimental Setup
# <img src="../images/Day1/setup.jpg" alt="Experimental Setup" style="height: 400px"/>
# Closeups of apparatus in quartz test tube, circuit diagram of 4-wire diode thermometer
# <img src="../images/Day1/closeup.jpg" alt="Experimental Setup" style="height: 400px"/>
#
| Experiment_1/notebooks/Day1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="MfeU3To_RWmK"
# # Transformando Códigos em Python para Executáveis
#
# ### Objetivo:
#
# Os arquivos do jupyter que temos até aqui no curso são scripts que podemos usar para rodar códigos e fazer diversas tarefas.
#
# Mas, algumas vezes, não seremos nós que iremos rodar os códigos e também não necessariamente o computador que vai executar o código não necessariamente tem python instalado.
#
# Por isso, podemos transformar esses códigos em arquivos .exe (executáveis que funcionam em qualquer computador).
#
# ### Cuidados
#
# Para códigos simples, basta fazermos a conversão de python para executável, mas em muitos códigos, temos que pensar se precisamos fazer alguma adaptação.
#
# Ex: Se o nosso código abre algum arquivo do nosso computador, temos que tornar essa ação de abrir o arquivo algo que funcione em qualquer computador.
#
# Sempre precisamos olhar o código e pensar: ele funcionaria em qualquer computador? Tem alguma coisa aqui nele que impede de funcionar no computador de outro pessoa? Se necessário, fazemos as adaptações. Vamos aprender como.
#
# ### Funcionamento:
#
# - Passo 1 - Seu código deve estar funcionando sem erros no jupyter
#
# - Passo 2 - Transformar o código jupyter em scripts python padrão (extensão .py). Seu código deve estar funcionando nesse formato também: Jupyter > File > Download as > Python
#
# - Passo 3 - Vá no terminal do anaconda, vá no diretório onde seu arquivo se encontra e digite 'python' + o nome da pasta que você baixou: python pasta
#
# Dica: se o arquivo tem espaços coloque-o entre aspas duplas
#
# - Passo 3 - Digite no terminal: pip install pyinstaller
#
# - Passo 5 - Digite no terminal: pyinstaller -w nomedoprograma.py
#
# - Passo 4 - Vá na pasta dist criada, compacte essa pasta e pode distribuir essa página.
| exe/Python para exe 01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# A new notebook to test the waters of the Spotify API
# TODO: Gain access to the Spotify API
# TODO: Be able to access my liked music with the Spotify API.
# +
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth
SPOTIPY_CLIENT_ID = '305996eeec9c42cb807aebcd48a82b29'
SPOTIPY_SECRET_ID = '3699864be2834ad695827d8092e91812'
SPOTIPY_REDIRECT_URI = 'http://example.com'
# +
# doing a test run with an authorization code flow
# Goal is to be able to get a scope that will read the user's libary.
scope = "user -libary-read"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope,
client_secret=SPOTIPY_SECRET_ID,
client_id=SPOTIPY_CLIENT_ID,
redirect_uri=SPOTIPY_REDIRECT_URI))
results = sp.current_user_saved_tracks()
for idx, item in enumerate(results['items']):
track = item['track']
print(idx, track['artists'][0]['name'], " - ", track['name'])
# -
| Song-Predict-Update/Notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from src.seq2seq import *
from src.attention import *
from src.utils import *
from src.layers import MaskedCrossEntropyLoss
import torch
import torch.optim as optim
import random
# # Setup
# +
# OPTIONS:
# ENGLISH - en,
# GERMAN - de,
# FRENCH - fr,
# CZECH - cs
lang1 = 'en'
lang2 = 'en'
# train_sentences, test_sentences = load_data(lang1, lang2)
# train_sentences = (train_sentences[0][:500], train_sentences[1][:500])
train_sentences = load_summary()
# +
TEST_SIZE=0.2
BATCH_SIZE=64
VALID_BATCH_SIZE=128
MAX_VOCAB=20000
src_vocab, tgt_vocab, train_loader, valid_loader = make_dataset(train_sentences, train_sentences, BATCH_SIZE, VALID_BATCH_SIZE, MAX_VOCAB)
# -
print(f"Number of training examples: {len(train_loader.dataset)}")
print(f"Number of validation examples: {len(valid_loader.dataset)}")
print(f"Training Batches {len(train_loader)}\tValidation Batches {len(valid_loader)}")
print(f"Unique tokens in source ({lang1}) vocabulary: {len(src_vocab)}")
print(f"Unique tokens in target ({lang2}) vocabulary: {len(tgt_vocab)}")
# # Make the Model
# +
# ENCODER ARGS
ENC_UNITS = 128
ENC_EMBEDDING = 256
SRC_VOCAB_SIZE = len(src_vocab)
ENC_NUM_LAYERS = 1
# ATTENTION DECODER ARGS
DEC_UNITS = ENC_UNITS
DEC_EMBEDDING = ENC_EMBEDDING
TGT_VOCAB_SIZE = len(tgt_vocab)
DEC_NUM_LAYERS = ENC_NUM_LAYERS
'''
Choices = [
ConcatAttention, GeneralAttention,
DotAttention, MeanAttention, LastInSeqAttention
]
'''
ATTN_LAYER = DotAttention
ATTN_HIDDEN_SIZE = 128
# SEQ2SEQ ARGS
TEACHER_FORCING = 0.8
MAX_LENGTH = train_loader.dataset.tensors[1].size(-1) + 1
SOS_TOKEN = tgt_vocab.SOS_token
# +
encoder = Encoder(ENC_UNITS, ENC_EMBEDDING, SRC_VOCAB_SIZE, ENC_NUM_LAYERS)
decoder = AttentionDecoder(DEC_UNITS, DEC_EMBEDDING, TGT_VOCAB_SIZE, DEC_NUM_LAYERS, ATTN_LAYER, ATTN_HIDDEN_SIZE)
seq2seq = Seq2Seq(encoder, decoder, TEACHER_FORCING, MAX_LENGTH, SOS_TOKEN)
print(f'The model has {count_parameters(seq2seq):,} trainable parameters')
# -
print(seq2seq)
criterion = MaskedCrossEntropyLoss(pad_tok=tgt_vocab.PAD_token)
optimizer = optim.Adam(seq2seq.parameters())
# # Train
valid_loss = evaluate(seq2seq, valid_loader, criterion)
valid_loss
# +
seq2seq.teacher_forcing = 0.
N_EPOCHS = 100
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
print(f'Epoch: {epoch+1:02}')
train_loss = train(seq2seq, train_loader, optimizer, criterion, CLIP, src_vocab.PAD_token)
# valid_loss = evaluate(seq2seq, valid_loader, criterion)
valid_loss = evaluate(seq2seq, train_loader, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(seq2seq.state_dict(), 'models/seq2seq_attention.pt')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
# +
idx = 666
src_sentence = valid_loader.dataset.tensors[0][idx:idx+1]
tgt_sentence = valid_loader.dataset.tensors[1][idx:idx+1]
print(src_sentence.size(), tgt_sentence.size())
src_sentence = src_vocab.to_string(src_sentence, remove_special=True)[0]
tgt_sentence = tgt_vocab.to_string(tgt_sentence, remove_special=True)[0]
# -
translation, attention = translate(src_sentence, seq2seq, src_vocab, tgt_vocab, src_vocab.PAD_token)
print(f"> {src_sentence}")
print(f"= {tgt_sentence}")
print(f"< {translation}")
plot_attention(attention, src_sentence, translation)
| Seq2Seq-Attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time variant filtering
#
# In contrast to the (linear) time invariant filter, (linear) time variant filter assumes that the filter coefficients are changing across time.
#
# Consider the most general case where there is one filter for each time step $n\in\{0, \cdots, N-1\}$. Given an input sequence $x_{0:N-1}=\{x_0, \cdots, x_{N-1}\}$ and a sequence of filters $\{\{h_{0,1}, h_{1,1}, h_{K-1,1}\}, \{h_{0,2}, h_{1,2}, h_{K-1,2}\}, \cdots, \{h_{0,N-1}, h_{1,N-1}, h_{K-1,N-1}\}\}$, the output $y_n$ at the $n$-th time step can be computed as
#
# $y_n = \sum_{k=0}^{K-1} h_{k,\color{red}{n}}x_{n-k}= h_{0,\color{red}{n}}x_{n} + h_{1,\color{red}{n}}x_{n-1} + \cdots + h_{K-1,\color{red}{n}}x_{n-({K-1})}$
#
# Figure below shows an example of time-variant filtering on input signal $\{x_0, x_1, x_2, x_3\}$ with filters $\{\{h_{0,1}, h_{1,1}, h_{2,1}\}, \{h_{0,2}, h_{1,2}, h_{2,2}\}, \{h_{0,3}, h_{1,3}, h_{2,3}\}, \{h_{0,4}, h_{1,4}, h_{2,4}\}\}$. Notice how the filters of different time steps are applied on the signals.
#
# 
#
# The implemented time variant filtering is similar to the example above. Before explaining the details, let's first explain the behavior of the wrapper:
# 1. Input tensor has shape (batchsize, length, dim)
# 2. Input filter coefficients is in shape (batchsize, length, filter_order)
# 3. Output tensor has shape (batchsize, length, dim)
#
# Note that the filter coefficients tensor has a leading dimension that correspond to batch size.
# Suppose input tensor is in shape (batchsize=B, length, dim=N) and filter tensor is in shape (B, length, order=K), the wrapper conducts filter for each batch independently.
#
# The N dimensions are treated as N 1-D signals, and they are independently processed to produce N output signals.
# ### 1. Module definition
#
# Here is a simple implementation, which corresponds to the intuitive example above.
#
# 
#
# The operations include element-wise multiplication $\odot$, circular shift, and summation.
#
# It conduct filtering of all time steps in parallel. However, it uses a *for loop* to do the summation.
# +
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sys
class TimeVariantFiltering(torch_nn.Module):
""" TimeVariantFiltering
Given sequences of filter coefficients and a signal, do filtering
Initialization: TimeVariantFiltering()
Forward: output = forward(self, signal, filter_coef)
Filter_coefs: (batchsize, signal_length, filter_order = K)
(n, m, :) is the filter coefficients for the m-th
time step for n-th tensor in the batch
Signal: (batchsize, signal_length, dim)
Output: (batchsize, signal_length, dim)
Filtering is conducted for d \in [0, dim), assuming they use the same
time-variant filter.
For time invariant case, given signal [x_1, ..., x_N], filter [a_1, ..., a_K]
we get output
[y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ ...
For time variant casse, given signal [x_1, ..., x_N], filter matrix
[[a_{1,1}, ..., a_{K,1}], [a_{1,2}, ..., a_{K,2}], ...], we get output
y_1, y_2, y_3,
= a_{1,1} * x_1 = a_{1,2} * x_2 = a_{1,3} * x_3
+ a_{2,1} * 0 + a_{2,2} * x_1 + a_{2,3} * x_2
+ a_{3,1} * 0 + a_{3,2} * 0 + a_{3,3} * x_1
+ ... + ... + ...
"""
def __init__(self):
super(TimeVariantFiltering, self).__init__()
def forward(self, signal, f_coef):
"""
"""
# signal length
signal_l = signal.shape[1]
# order of the time varint filter
order_k = f_coef.shape[-1]
# pad to signal (batchsize, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
output = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
output += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
return output
# -
# ### 2. Simple example
#
# Here is the example in the two figures above.
# +
# input signal
# make it a batch (batchsize=1, length=4, dim=1)
data = torch.arange(1.0, 5.0, 1).unsqueeze(0).unsqueeze(2)
# filter coefficients
# make it a batch (batchsize=1, length=4, order=3)
coef = [[1, -0.5, -0.5], [1, -0.9, 0], [1, 0, -0.5], [0.3, 0.3, 0.3]]
coef = torch.tensor(coef).unsqueeze(0)
# signal length
signal_l = data.shape[1]
# order of the time varint filter
order_k = coef.shape[-1]
# do filter
l_tv_filter = TimeVariantFiltering()
output = l_tv_filter(data, coef)
# +
import plot_lib
plot_lib.plot_tensor(data.permute(0, 2, 1), color_on_value=True, title="Input signal (permuted)")
# for plotting we flip the filter coefficients
for time_idx in range(coef.shape[1]):
plot_lib.plot_tensor(torch.flip(coef[:, time_idx:time_idx+1, ], dims=[2]),
color_on_value=False, colorgrad_x=False, colorgrad_y=False,
colormap="Greys", alpha=1.0, title = 'filter %d ' % (time_idx))
plot_lib.plot_tensor(output.permute(0, 2, 1), color_on_value=True, title="Output signal (permuted)")
# -
# ### 3. Plot the operation step by step
# Here we plot the operation in the Module step by step
# +
# signal length
signal_l = data.shape[1]
# order of the time varint filter
order_k = coef.shape[-1]
signal = data
# step1. padding:
# pad to signal (batchsize, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
#plot_lib.plot_tensor(signal, color_on_value=True, title="Input signal")
#plot_lib.plot_tensor(padded_signal, color_on_value=True, title="Padded")
#plot_lib.plot_tensor(coef, color_on_value=False, colorgrad_x=False, colorgrad_y=False, title="Filter coefs")
# step2. circular shift and weighted sum
output = torch.zeros_like(signal)
for k in range(order_k):
output += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] * coef[:, :, k:k+1]
#plot_lib.plot_tensor(torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :], color_on_value=True)
#plot_lib.plot_tensor(coef[:, :, k:k+1], color_on_value=False, colorgrad_x=False, colorgrad_y=False)
#plot_lib.plot_tensor(output, color_on_value=True, title="output")
# -
# ### 4. Example for signal processing
#
# This is the example of using time-variant filtering to process random signals.
#
# First, we create a tensor to store the time variant filter coefficients. Notice that we use different filter coefficients *lp_v*, *hp_v*, and *hp_u*.
# ```
# tv_filter_coefs[:, signal_length//4*0:signal_length//4*1, 0:len(lp_v)] = lp_v
# tv_filter_coefs[:, signal_length//4*1:signal_length//4*3, 0:len(hp_v)] = hp_v
# tv_filter_coefs[:, signal_length//4*3:, 0:len(hp_u)] = hp_u
# ```
# +
from scipy import signal as sci_signal
import scipy.fft
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.figsize'] = (10, 5)
# Prepare input signal
signal_length = 1000
signal_dim = 2
batch_size = 2
signal_input = torch.randn(batch_size, signal_length, signal_dim, requires_grad = False)
# Prepare a time-variant filter based on the following filter coefficients
# The filter coefficients should be in shape [batch_size, signal_length, filter_order]
# In this example, different batches use the same filter coefs
lp_v = torch.tensor([0.08538414199291068, 0.04920229475534168, -0.1470178606967731, 0.24737764593887432, 0.7103067853166558, 0.24737764593887432, -0.1470178606967731, 0.04920229475534168, 0.08538414199291068])
lp_u = torch.tensor([0.00936455546502, 0.0416254862901, 0.0878313219556, 0.146086321198, 0.192602581136, 0.211221591449, 0.192602581136, 0.146086321198, 0.0878313219556, 0.0416254862901, 0.00936455546502])
hp_v = torch.tensor([-0.00936455546502148, 0.04162548629009957, -0.08783132195564508, 0.1460863211980122, -0.19260258113649556, 0.21122159144894015, -0.19260258113649556, 0.1460863211980122, -0.08783132195564508, 0.04162548629009957, -0.00936455546502148])
hp_u = torch.tensor([0.0853841419929, -0.0492022947553, -0.147017860697, -0.247377645939, 0.710306785317, -0.247377645939, -0.147017860697, -0.0492022947553, 0.0853841419929])
# initialize buffer for tv-filter coefficients
tv_filter_coefs = torch.zeros(batch_size, signal_length, max([len(lp_v), len(lp_u), len(hp_v), len(hp_u)]))
# fill in the values
tv_filter_coefs[:, signal_length//4*0:signal_length//4*1, 0:len(lp_v)] = lp_v
tv_filter_coefs[:, signal_length//4*1:signal_length//4*3, 0:len(hp_v)] = hp_v
tv_filter_coefs[:, signal_length//4*3:, 0:len(hp_u)] = hp_u
# +
# plot the filter coefficients (only (0, :, :))
plot_batch = 0
fig = plt.figure()
ax = fig.gca(projection='3d')
import importlib
importlib.reload(plot_lib)
plot_lib.plot_surface(tv_filter_coefs[plot_batch].numpy().T, fig, ax,
ylabel='time step', xlabel='filter order', zlabel='Coef value',
angleX=40, angleY=70)
ax.set_title("Fig1. Filter coefficients")
ax.set_xlim(tv_filter_coefs.shape[2], 0)
# plot the frequency response of filters for each time step (this may take some time)
# for simplicity, just plot the three sets of cofficients
def get_amp_(data):
return 20*np.log10(np.abs(data) + np.finfo(np.float32).eps)
nfft=4096
w, h_lp_v = scipy.signal.freqz(lp_v, [1], worN=nfft, whole=True)
w, h_hp_v = scipy.signal.freqz(hp_v, [1], worN=nfft, whole=True)
w, h_hp_u = scipy.signal.freqz(hp_u, [1], worN=nfft, whole=True)
filter_res = np.zeros([tv_filter_coefs.shape[1], nfft//2+1])
filter_res[signal_length//4*0:signal_length//4*1, :] = get_amp_(h_lp_v[0:nfft//2+1])
filter_res[signal_length//4*1:signal_length//4*3, :] = get_amp_(h_hp_v[0:nfft//2+1])
filter_res[signal_length//4*3:, :] = get_amp_(h_hp_u[0:nfft//2+1])
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_lib.plot_surface(filter_res.T, fig, ax,
ylabel='time step', xlabel='frequency bins', zlabel='Amplitude (dB)',
angleX=70, angleY=70)
ax.set_title("Fig2. Frequency response of filters at each time step")
ax.set_xlim(nfft//2+1, 0)
# -
# As Fig.2 shows, the frequency response at time step.
#
# We will use this time variant filter to process the random signals.
tv_layer = TimeVariantFiltering()
signal_output = tv_layer(signal_input, tv_filter_coefs)
# Now, let's plot the spectral of the input and output signals.
# +
# Plot
# let's try to plot the spectrogram
# let's analyze only one signal
import tool_lib
signal_in_examine = signal_input[0, :, 0]
signal_out_examine = signal_output[0, :, 0]
nfft=4096
spec_in_examine = tool_lib.spec_amplitude(signal_in_examine, frame_length=200, frame_shift=20, fft_bins=nfft)
spec_out_examine = tool_lib.spec_amplitude(signal_out_examine, frame_length=200, frame_shift=20, fft_bins=nfft)
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_lib.plot_surface(spec_in_examine, fig, ax,
ylabel='frame idx', xlabel='frequency bins', zlabel='Amplitude (dB)',
angleX=70, angleY=70)
ax.set_title("Fig3. Short-time spectral amplitude of random signal")
ax.set_xlim(nfft//2+1, 0)
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_lib.plot_surface(spec_out_examine, fig, ax,
ylabel='frame idx', xlabel='frequency bins', zlabel='Amplitude (dB)',
angleX=70, angleY=70)
ax.set_title("Fig4. Short-time spectral amplitude of filtered random signal")
ax.set_xlim(nfft//2+1, 0)
# -
# From the above figure, we can see how the spectral of the random noise is shaped by the time-variant filters.
#
# The spectral shape in Fig4 is decided by the spectral shape of filters in Fig2.
# # 5. Summary
#
# This notebook defines the time variant filtering wrapper:
# 1. Input tensor has shape (batchsize, length, dim)
# 2. Input filter coefficients is in shape (batchsize, length, filter_order)
# 3. Output tensor has shape (batchsize, length, dim)
#
# This time variant filtering wrapper will be used in the next notebook for time variant windowed sinc filters.
| tutorials/c07_time_variant_filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bayesian-modelling-tutorial
# language: python
# name: bayesian-modelling-tutorial
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# ## Introduction
#
# In this notebook, we are going to walk through modelling situations
# that involve some form of "cluster identification",
# where the number of clusters isn't exactly known beforehand.
# ## Chinese Restaurant Process
#
# First customer always chooses the first table.
#
# The $n$th customers afterwards occupy the first _unoccupied_ table
# with probability $\frac{\alpha}{n-1+\alpha}$,
# and occupies an _already occupied_ table
# with probability $\frac{c}{n-1+\alpha}$.
#
# Here:
#
# - $n$ is the index of the customers after the first.
# - $c$ is the number of people already sitting at that table.
# - $\alpha$ is a parameter of the Chinese Restaurant Process.
#
# ## Let's simulate this!
# +
import jax.numpy as np
from jax import jit
from jax.ops import index_update, index
def create_alpha_vector(alpha, table_assignments, n, current_open_table):
v = np.zeros_like(table_assignments)
v = index_update(v, index[current_open_table], alpha)
return v / (n - 1 + alpha)
# +
table_assignments = np.zeros(shape=(10,))
table_assignments = index_update(table_assignments, index[0], 1)
create_alpha_vector(5, table_assignments, 2, 1)
# +
def create_occupied_vector(alpha, table_assignments, n, current_open_table):
v = table_assignments / (n - 1 + alpha)
return v
# -
create_occupied_vector(5, table_assignments, 2, 1)
# +
table_assignments = np.zeros(shape=(10,))
table_assignments = index_update(table_assignments, index[0], 1)
np.min(np.where(table_assignments == 0)[0])
# +
from jax.scipy.special import logit
from jax import vmap, lax, jit
from jax.random import categorical, PRNGKey, split
p = np.array([0.1, 0.8, 0.1])
logit_p = np.log(p / (1 -p))
categorical(k, logit_p)
# +
k = PRNGKey(42)
def one_draw(k, p, zeros):
logits = logit(p)
idx = categorical(k, logits)
draw = index_update(zeros, index[idx], 1)
return draw
def f(carry, x):
k, p = carry
# x is our zeros
draw = one_draw(k, p, x)
k, _ = split(k)
return (k, p), draw
def multinomial(k, n, p):
n_draws = 10000
a = np.zeros(shape=(n_draws, len(p)))
(k, p), draws = lax.scan(f, (k, p), a)
return np.sum(draws, axis=0)
multinomial(k, n=1000, p=np.array([0.3, 0.3, 0.4]))
# -
draws = multinomial(k, n=1000, p=np.array([0.3, 0.3, 0.4]))
draws
# +
import numpy as onp
from tqdm import tqdm
alpha = 3
table_assignments = np.zeros(shape=(alpha * 10,))
table_assignments = index_update(table_assignments, index[0], 1)
current_open_table = np.min(np.where(table_assignments == 0)[0])
n_customers = 1000
for n in tqdm(range(2, n_customers+1)):
prob_vect = create_alpha_vector(alpha, table_assignments, n, current_open_table) + create_occupied_vector(alpha, table_assignments, n, current_open_table)
assignment_vect = one_draw(k, prob_vect, np.zeros_like(prob_vect))
table_assignments = np.squeeze(table_assignments + assignment_vect)
current_open_table = np.min(np.where(table_assignments == 0)[0])
k, _ = split(k)
prob_vect
# -
# ## Stick-breaking process
# +
import jax
import numpy as onp
# Taken from https://stats.stackexchange.com/questions/396315/coding-a-simple-stick-breaking-process-in-python
def stick_breaking(k, num_weights, alpha):
k, _ = split(k)
betas = onp.random.beta(1,alpha, size=(num_weights,))
betas[1:] *= onp.cumprod(1 - betas[:-1])
return betas
stick_breaking(k, num_weights=max_num_classes, alpha=3).sum()
# -
betas.sum()
# +
import matplotlib.pyplot as plt
k, _ = split(k)
max_num_classes = 30
def stick_breaking_jax(k, num_weights, alpha):
k, _ = split(k)
betas = jax.random.beta(k, a=1, b=alpha, shape=(num_weights,))
products = np.cumprod(1 - betas[:-1])
betas = index_update(betas, index[1:], products * betas[1:])
return betas
weights = stick_breaking_jax(k, num_weights=max_num_classes, alpha=2)
plt.plot(weights)
# -
# The $\alpha$ parameter is proportional to the number of components that we end up using.
# ## Generate mixture gaussian from weights
# +
from jax.random import categorical
from jax.scipy.special import logit
k, _ = split(k)
n_observations = 300
indices = categorical(k, logit(weights), shape=(n_observations,))
indices
# -
# $X \sim N(\mu, \sigma)$ is equivalent to:
#
# $$ \hat{X} \sim N(0, 1) $$
# $$ X = \sigma\hat{X} + \mu$$
# +
from jax.random import normal
mus = np.linspace(0, 350, num=max_num_classes)
sigmas = np.ones(shape=(max_num_classes)) * 2
mus[indices] + sigmas[indices] * normal(k, shape=(n_observations,))
# +
def dp_mixture_gaussian(k, alpha, max_num_classes, num_observations, mus, sigmas):
weights = stick_breaking_jax(k, num_weights=max_num_classes, alpha=alpha)
indices = categorical(k, logit(weights), shape=(n_observations,))
return mus[indices] + sigmas[indices] * normal(k, shape=(n_observations,))
mus = np.linspace(0, 350, num=max_num_classes)
sigmas = np.ones(shape=(max_num_classes))
draws = dp_mixture_gaussian(k, alpha=0.7, max_num_classes=45, num_observations=100, mus=mus, sigmas=sigmas)
# -
plt.hist(draws)
# +
def ecdf_scatter(data):
x, y = np.sort(data), np.arange(1, len(data)+1) / len(data)
plt.scatter(x, y)
plt.show()
ecdf_scatter(draws)
# -
# ## Generate Multiple MvNormals
# +
from jax import random as npr
k, _ = split(k)
draws = npr.multivariate_normal(k, mean=np.array([1, 3]), cov=np.array([[1, 0.8], [0.8, 1]]), shape=(30,))
draws.shape
# -
from sklearn.datasets import make_spd_matrix
max_num_classes = 45
num_states = 2
means = np.linspace(0, 1000, max_num_classes * num_states).reshape(max_num_classes, num_states)
cov = np.stack([make_spd_matrix(num_states) for i in range(max_num_classes)])
alpha = 1
n_observations = 300
k, _ = split(k)
weights = stick_breaking_jax(k, num_weights=max_num_classes, alpha=alpha)
indices = categorical(k, logit(weights), shape=(n_observations,))
indices
# +
from functools import partial
def generate_mvnorm_func(means, covs):
def mvnorm(key, idx):
k, _ = split(key)
return npr.multivariate_normal(k, mean=means[idx], cov=cov[idx])
return mvnorm
k = PRNGKey(42)
ks = []
for i in range(n_observations):
k, _ = split(k)
ks.append(k)
ks = np.vstack(ks)
print(ks.shape)
mvnorm = generate_mvnorm_func(means, cov)
draws = vmap(mvnorm)(ks, indices)
# -
# Generate some data now.
# +
from jax.random import multivariate_normal
mus = np.array(
[
[-10, 3],
[5, 5],
[10, 1],
]
)
# plt.scatter(mus[:, 0], mus[:, 1])
covs = np.stack([make_spd_matrix(n_dim=2) for i in range(3)])
k, _ = split(k)
ks = []
for i in range(150):
k, _ = split(k)
ks.append(k)
ks = np.vstack(ks)
indices = np.array([0] * 50 + [1] * 50 + [2] * 50)
mvnorm = generate_mvnorm_func(mus, covs)
draws = vmap(mvnorm)(ks, indices)
draws.shape
# -
plt.scatter(draws[:, 0], draws[:, 1])
import pymc3 as pm
with pm.Model() as model:
# Mus should be Guassian priors of shape (max_num_components, 2)
mus = pm.Normal("mus", mu=0, sigma=5, shape=(max_num_components, 2))
covs = pm.LKJCholeskyCov("covs", )
# MvNormal component distributions that we can index into.
comp_dists = pm.MvNormal.dist(mu=mus, cov=covs, shape=(max_num_components, 2))
# +
import numpy as onp
mu = onp.zeros(3)
true_cov = onp.array([[1.0, 0.5, 0.1],
[0.5, 2.0, 0.2],
[0.1, 0.2, 1.0]])
data = onp.random.multivariate_normal(mu, true_cov, 10)
with pm.Model() as model:
sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
chol_packed = pm.LKJCholeskyCov('chol_packed',
n=3, eta=2, sd_dist=sd_dist)
chol = pm.expand_packed_triangular(3, chol_packed)
vals = pm.MvNormal.dist(mu=mu, chol=chol, observed=data)
# -
with model:
trace = pm.sample(2000)
# +
import arviz as az
az.plot_trace(trace)
# -
with model
def stick_breaking(beta):
portion_remaining = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]])
return beta * portion_remaining
| docs/cookbook/dirichlet-process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
#import modules
import pandas as pd
import numpy as np
import Bio
from Bio import Seq
from Bio import SeqIO
import torch
import matplotlib.pyplot as plt
import sys
from torch.utils.data import TensorDataset, DataLoader
#define the fasta_to_classified_df function; which inputs fasta seqs and classifies them in a df
def fasta_to_classified_df(fasta_path,protein_class='',sample=False):
seq_dict = {} #define empty dict to store sequence ids and sequences
with open(fasta_path) as fasta_file: # Will close handle cleanly
identifiers = [] #define empty id list
sequence = [] #define empty seq list
for seq_record in SeqIO.parse(fasta_path, 'fasta'): # (generator)
identifiers.append(str(seq_record.id)) #append ids to id list
sequence.append(str(seq_record.seq)) #append seqs to seq list
seq_dict[str(seq_record.id)] = str(seq_record.seq) #define an ID, seq dictionary
seq_list = list(seq_dict.items()) #enumerate the dictionary
df_seqs = pd.DataFrame(seq_list) #create a df from enumerated dictionary
df_seqs.columns = ['protein','sequence'] #define column names
df_seqs['class'] = protein_class #define the class of each imported csv
if sample == True:
df_seqs = df_seqs.sample(frac=0.20) #if sample == True, then sample 1/5 of the data(i.e. for Thermo proteins)
print(len(df_seqs.index))
return df_seqs
# define the combine_dfs function; which concatenates the three dataframes
def combine_dfs(list_of_dfs):
df_combine = pd.concat(list_of_dfs).reset_index(drop=True)
return df_combine
#define the filter_seqs function
def filter_seqs(df_seqs):
good_list = []
bad_list = []
sequence_list = df_seqs['sequence'].tolist()
for seq in sequence_list:
if seq.startswith('M'):
if len(seq) > 75:
good_list.append(seq)
else:
bad_list.append(seq)
boolean_series = df_combine.sequence.isin(good_list)
df_filter = df_combine[boolean_series]
return df_filter
# define the seq1hot function
def seq1hot(seq_list):
amino_acids = "ARNDCQEGHILKMFPSTWYVUX_?-" # the order of the one hot encoded amino acids and other symbols
aa2num= {x:i for i,x in enumerate(amino_acids)} # create a dictionary that maps amino acid to integer
X_data = torch.tensor([]) #define an empty tensor to store one hot encoded proteins seqs
for i,seq in enumerate(seq_list):
if len(seq) > 500: #crop sequences longer than 500 aas
seq = seq[:500]
protein1hot = np.eye(len(amino_acids))[np.array([aa2num.get(res) for res in seq])] #one hot encode protein seq
tensor = torch.tensor(protein1hot) #create a tensor of one hot encoded proteins sequences
tensor = torch.nn.functional.pad(tensor, (0,0,0,500-len(seq))) #for sequences less than 500 aas pad the end with zeros
if X_data.size()[0] == 0: #for the first iteration create an empty tensor
X_data = tensor[None]
print('Just made new tensor X_data')
else:
X_data = torch.cat((X_data,tensor[None]), axis=0) #for each iteration concatenate the new sequence tensor to existing tensor
if i % 1000 == 0: #update user on the status of 1hotencoding, which is quite computationally expensive
print(f'Looped through {int(i)} sequences...')
print(X_data.shape)
print(type(X_data))
return X_data
#define the class1hot function
def class1hot(class_list):
classes = ['Thermophillic','Mesophillic','Psychrophillic'] #the order of the one hot encoded classes
class2num= {x:i for i,x in enumerate(classes)} # create a dictionary that maps class to integer
y_data = torch.tensor([]) #define empty tensor to store class data
print('Just made new tensor y_data')
class_temp = [class2num[s] for s in class_list] #loop through each class in the clast list and map string to dict
y_data = torch.nn.functional.one_hot(torch.tensor(class_temp),3) #one hot encode the classes as defined by the dict
print(type(y_data))
print(y_data.shape)
return y_data
#define the save_tensor function
def save_tensor(tensor,file_path_name):
torch.save(tensor, file_path_name) #download tensor
| data_loader/jupyter_files/functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
# %matplotlib inline
morange = u'#ff7f0e'
mblue = u'#1f77b4'
mgreen = u'#2ca02c'
mred = u'#d62728'
mpurple = u'#9467bd'
import h5py
import os
from astropy.table import Table
# +
fof_dirname = "/Volumes/simulation_data_2018/alphaq_halo_catalogs_hdf5"
v4p9_dirname = "/Volumes/simulation_data_2018/protoDC2_v4p9"
mock_prefix = "logsm_gt_sm9p0_recolored_umachine_color_mock_"
available_snapnums = (499, 392, 331, 272, 247)
fof_basename_z0p0 = "m000-{0}.fofproperties.hdf5".format(499)
v4p9_basename_z0p0 = mock_prefix + "v4.9_m000-{0}.hdf5".format(499)
fof_basename_z0p25 = "m000-{0}.fofproperties.hdf5".format(392)
v4p9_basename_z0p25 = mock_prefix + "v4.9_m000-{0}.hdf5".format(392)
fof_basename_z0p5 = "m000-{0}.fofproperties.hdf5".format(331)
v4p9_basename_z0p5 = mock_prefix + "v4.9_m000-{0}.hdf5".format(331)
fof_basename_z0p75 = "m000-{0}.fofproperties.hdf5".format(272)
v4p9_basename_z0p75 = mock_prefix + "v4.9_m000-{0}.hdf5".format(272)
fof_basename_z1p0 = "m000-{0}.fofproperties.hdf5".format(247)
v4p9_basename_z1p0 = mock_prefix + "v4.9_m000-{0}.hdf5".format(247)
v4p9_mock_z0p0 = Table.read(os.path.join(v4p9_dirname, v4p9_basename_z0p0), path='data')
halos_z0p0 = Table.read(os.path.join(fof_dirname, fof_basename_z0p0), path='data')
v4p9_mock_z0p25 = Table.read(os.path.join(v4p9_dirname, v4p9_basename_z0p25), path='data')
halos_z0p25 = Table.read(os.path.join(fof_dirname, fof_basename_z0p25), path='data')
v4p9_mock_z0p5 = Table.read(os.path.join(v4p9_dirname, v4p9_basename_z0p5), path='data')
halos_z0p5 = Table.read(os.path.join(fof_dirname, fof_basename_z0p5), path='data')
v4p9_mock_z0p75 = Table.read(os.path.join(v4p9_dirname, v4p9_basename_z0p75), path='data')
halos_z0p75 = Table.read(os.path.join(fof_dirname, fof_basename_z0p75), path='data')
v4p9_mock_z1p0 = Table.read(os.path.join(v4p9_dirname, v4p9_basename_z1p0), path='data')
halos_z1p0 = Table.read(os.path.join(fof_dirname, fof_basename_z1p0), path='data')
print(v4p9_mock_z0p0.keys())
print("\n")
# +
from halotools.utils import compute_richness
redmask_z0p0 = v4p9_mock_z0p0['is_on_red_sequence_gr'] == True
redmask_z0p25 = v4p9_mock_z0p25['is_on_red_sequence_gr'] == True
redmask_z0p5 = v4p9_mock_z0p5['is_on_red_sequence_gr'] == True
redmask_z0p75 = v4p9_mock_z0p75['is_on_red_sequence_gr'] == True
redmask_z1p0 = v4p9_mock_z1p0['is_on_red_sequence_gr'] == True
magr_cut = -19
magr_mask_z0p0 = v4p9_mock_z0p0['restframe_extincted_sdss_abs_magr'] < magr_cut
magr_mask_z0p25 = v4p9_mock_z0p25['restframe_extincted_sdss_abs_magr'] < magr_cut
magr_mask_z0p5 = v4p9_mock_z0p5['restframe_extincted_sdss_abs_magr'] < magr_cut
magr_mask_z0p75 = v4p9_mock_z0p75['restframe_extincted_sdss_abs_magr'] < magr_cut
magr_mask_z1p0 = v4p9_mock_z1p0['restframe_extincted_sdss_abs_magr'] < magr_cut
halos_z0p0['red_richness'] = compute_richness(
halos_z0p0['fof_halo_tag'], v4p9_mock_z0p0['target_halo_id'][redmask_z0p0 & magr_mask_z0p0])
halos_z0p25['red_richness'] = compute_richness(
halos_z0p25['fof_halo_tag'], v4p9_mock_z0p25['target_halo_id'][redmask_z0p25 & magr_mask_z0p25])
halos_z0p5['red_richness'] = compute_richness(
halos_z0p5['fof_halo_tag'], v4p9_mock_z0p5['target_halo_id'][redmask_z0p5 & magr_mask_z0p5])
halos_z0p75['red_richness'] = compute_richness(
halos_z0p75['fof_halo_tag'], v4p9_mock_z0p75['target_halo_id'][redmask_z0p75 & magr_mask_z0p75])
halos_z1p0['red_richness'] = compute_richness(
halos_z1p0['fof_halo_tag'], v4p9_mock_z1p0['target_halo_id'][redmask_z1p0 & magr_mask_z1p0])
# -
halos_z0p0['richness'] = compute_richness(
halos_z0p0['fof_halo_tag'], v4p9_mock_z0p0['target_halo_id'][ magr_mask_z0p0])
halos_z0p25['richness'] = compute_richness(
halos_z0p25['fof_halo_tag'], v4p9_mock_z0p25['target_halo_id'][magr_mask_z0p25])
halos_z0p5['richness'] = compute_richness(
halos_z0p5['fof_halo_tag'], v4p9_mock_z0p5['target_halo_id'][magr_mask_z0p5])
halos_z0p75['richness'] = compute_richness(
halos_z0p75['fof_halo_tag'], v4p9_mock_z0p75['target_halo_id'][magr_mask_z0p75])
halos_z1p0['richness'] = compute_richness(
halos_z1p0['fof_halo_tag'], v4p9_mock_z1p0['target_halo_id'][magr_mask_z1p0])
# +
from scipy.stats import binned_statistic
logmass_bins = np.linspace(11.25, 14.2, 20)
logmass_mids = 0.5*(logmass_bins[:-1] + logmass_bins[1:])
mass_mids = 10**logmass_mids
mass_bins = 10**logmass_bins
red_richness_z0p0, __, __ = binned_statistic(
halos_z0p0['fof_halo_mass'], halos_z0p0['red_richness'], bins=mass_bins, statistic='mean')
red_richness_z0p25, __, __ = binned_statistic(
halos_z0p25['fof_halo_mass'], halos_z0p25['red_richness'], bins=mass_bins, statistic='mean')
red_richness_z0p5, __, __ = binned_statistic(
halos_z0p5['fof_halo_mass'], halos_z0p5['red_richness'], bins=mass_bins, statistic='mean')
red_richness_z0p75, __, __ = binned_statistic(
halos_z0p75['fof_halo_mass'], halos_z0p75['red_richness'], bins=mass_bins, statistic='mean')
red_richness_z1p0, __, __ = binned_statistic(
halos_z1p0['fof_halo_mass'], halos_z1p0['red_richness'], bins=mass_bins, statistic='mean')
richness_z0p0, __, __ = binned_statistic(
halos_z0p0['fof_halo_mass'], halos_z0p0['richness'], bins=mass_bins, statistic='mean')
richness_z0p25, __, __ = binned_statistic(
halos_z0p25['fof_halo_mass'], halos_z0p25['richness'], bins=mass_bins, statistic='mean')
richness_z0p5, __, __ = binned_statistic(
halos_z0p5['fof_halo_mass'], halos_z0p5['richness'], bins=mass_bins, statistic='mean')
richness_z0p75, __, __ = binned_statistic(
halos_z0p75['fof_halo_mass'], halos_z0p75['richness'], bins=mass_bins, statistic='mean')
richness_z1p0, __, __ = binned_statistic(
halos_z1p0['fof_halo_mass'], halos_z1p0['richness'], bins=mass_bins, statistic='mean')
fig, ax = plt.subplots(1, 1)
__=ax.loglog()
__=ax.plot(mass_mids, red_richness_z0p0, color=mpurple, label=r'${\rm z=0}$')
__=ax.plot(mass_mids, red_richness_z0p25, color=mblue, label=r'${\rm z=0.25}$')
__=ax.plot(mass_mids, red_richness_z0p5, color=mgreen, label=r'${\rm z=0.5}$')
__=ax.plot(mass_mids, red_richness_z0p75, color=morange, label=r'${\rm z=0.75}$')
__=ax.plot(mass_mids, red_richness_z1p0, color=mred, label=r'${\rm z=1}$')
legend = ax.legend()
xlabel = ax.set_xlabel(r'${\rm M_{halo}}$')
ylabel = ax.set_ylabel(r'${\rm \langle N_{red}\vert M_{halo}\rangle}$')
title = ax.set_title(r'${\rm M_{r} < -19}$')
# -
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True)
__=ax1.loglog()
__=ax1.plot(mass_mids, red_richness_z0p0, color=mpurple, label=r'${\rm z=0}$')
__=ax1.plot(mass_mids, red_richness_z0p25, color=mblue, label=r'${\rm z=0.25}$')
__=ax1.plot(mass_mids, red_richness_z0p5, color=mgreen, label=r'${\rm z=0.5}$')
__=ax1.plot(mass_mids, red_richness_z0p75, color=morange, label=r'${\rm z=0.75}$')
__=ax1.plot(mass_mids, red_richness_z1p0, color=mred, label=r'${\rm z=1}$')
ax2.yaxis.set_label_position("right") # y-labels on the right
ax2.yaxis.tick_right() # y-ticks on the right
ylim2 = ax2.set_ylim(0.1, 2)
xlim = ax1.set_xlim(5e11, 1e14)
xscale2 = ax2.set_xscale('log')
__=ax2.plot(mass_mids, red_richness_z0p25/red_richness_z0p0, color=mblue, label=r'${\rm z=0.25}$')
__=ax2.plot(mass_mids, red_richness_z0p5/red_richness_z0p0, color=mgreen, label=r'${\rm z=0.5}$')
__=ax2.plot(mass_mids, red_richness_z0p75/red_richness_z0p0, color=morange, label=r'${\rm z=0.75}$')
__=ax2.plot(mass_mids, red_richness_z1p0/red_richness_z0p0, color=mred, label=r'${\rm z=1}$')
__=ax2.plot(np.logspace(10, 20, 100), np.ones(100), ':', color='k')
legend1 = ax1.legend()
xlabel1 = ax1.set_xlabel(r'${\rm M_{halo}}$')
xlabel2 = ax2.set_xlabel(r'${\rm M_{halo}}$')
ylabel1 = ax1.set_ylabel(r'${\rm \langle N_{red}\vert M_{halo}\rangle}$')
ylabel2 = ax2.set_ylabel(
r'${\rm \langle N_{red}\vert M_{halo}\rangle}/{\rm \langle N_{red}\vert M_{halo}\rangle}_{\rm z=0}$',
rotation=-90, labelpad=20)
title1 = ax1.set_title(r'${\rm M_{r} < -19}$')
title1 = ax2.set_title(r'${\rm M_{r} < -19}$')
leg2 = ax2.legend()
figname = 'mass_richness_vs_z0.png'
fig.savefig(figname, bbox_extra_artists=[xlabel1, ylabel1], bbox_inches='tight')
# +
fig, ax = plt.subplots(1, 1)
fred_z0p0 = red_richness_z0p0/richness_z0p0
fred_z0p25 = red_richness_z0p25/richness_z0p25
fred_z0p5 = red_richness_z0p5/richness_z0p5
fred_z0p75 = red_richness_z0p75/richness_z0p75
fred_z1p0 = red_richness_z1p0/richness_z1p0
__=ax.plot(mass_mids, fred_z0p0, color=mpurple, label=r'${\rm z=0}$')
__=ax.plot(mass_mids, fred_z0p25, color=mblue, label=r'${\rm z=0.25}$')
__=ax.plot(mass_mids, fred_z0p5, color=mgreen, label=r'${\rm z=0.5}$')
__=ax.plot(mass_mids, fred_z0p75, color=morange, label=r'${\rm z=0.75}$')
__=ax.plot(mass_mids, fred_z1p0, color=mred, label=r'${\rm z=1}$')
ylabel = ax.set_ylabel(r'${\rm red\ fraction}$')
xlabel1 = ax.set_xlabel(r'${\rm M_{halo}}$')
xscale = ax.set_xscale('log')
xlim = ax.set_xlim(5e11, 2e14)
ylim = ax.set_ylim(0, 1.)
legend = ax.legend()
figname = 'cluster_red_fraction_redshift_evolution.png'
fig.savefig(figname, bbox_extra_artists=[xlabel1, ylabel], bbox_inches='tight')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True)
__=ax1.loglog()
__=ax1.plot(mass_mids, richness_z0p0, color=mpurple, label=r'${\rm z=0}$')
__=ax1.plot(mass_mids, richness_z0p25, color=mblue, label=r'${\rm z=0.25}$')
__=ax1.plot(mass_mids, richness_z0p5, color=mgreen, label=r'${\rm z=0.5}$')
__=ax1.plot(mass_mids, richness_z0p75, color=morange, label=r'${\rm z=0.75}$')
__=ax1.plot(mass_mids, richness_z1p0, color=mred, label=r'${\rm z=1}$')
ax2.yaxis.set_label_position("right") # y-labels on the right
ax2.yaxis.tick_right() # y-ticks on the right
ylim2 = ax2.set_ylim(0.1, 2)
xlim = ax1.set_xlim(5e11, 1e14)
xscale2 = ax2.set_xscale('log')
__=ax2.plot(mass_mids, richness_z0p25/richness_z0p0, color=mblue, label=r'${\rm z=0.25}$')
__=ax2.plot(mass_mids, richness_z0p5/richness_z0p0, color=mgreen, label=r'${\rm z=0.5}$')
__=ax2.plot(mass_mids, richness_z0p75/richness_z0p0, color=morange, label=r'${\rm z=0.75}$')
__=ax2.plot(mass_mids, richness_z1p0/richness_z0p0, color=mred, label=r'${\rm z=1}$')
__=ax2.plot(np.logspace(10, 20, 100), np.ones(100), ':', color='k')
legend1 = ax1.legend()
xlabel1 = ax1.set_xlabel(r'${\rm M_{halo}}$')
xlabel2 = ax2.set_xlabel(r'${\rm M_{halo}}$')
ylabel1 = ax1.set_ylabel(r'${\rm \langle N_{gal}\vert M_{halo}\rangle}$')
ylabel2 = ax2.set_ylabel(
r'${\rm \langle N_{gal}\vert M_{halo}\rangle}/{\rm \langle N_{gal}\vert M_{halo}\rangle}_{\rm z=0}$',
rotation=-90, labelpad=20)
title1 = ax1.set_title(r'${\rm M_{r} < -19}$')
title1 = ax2.set_title(r'${\rm M_{r} < -19}$')
leg2 = ax2.legend()
figname = 'mass_richness_vs_z0_allgal.png'
fig.savefig(figname, bbox_extra_artists=[xlabel1, ylabel1], bbox_inches='tight')
# -
| notebooks/study_mass_richness_v4p12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# # Qiskit Tutorials
#
# ***
#
#
# Welcome Qiskitters.
#
# The easiest way to get started is to use [the Binder image](https://mybinder.org/v2/gh/qiskit/qiskit-tutorials/master?filepath=index.ipynb), which lets you use the notebooks via the web. This means that you don't need to download or install anything, but is also means that you should not insert any private information into the notebooks (such as your API key). We recommend that after you are done using mybinder that you regenerate your token.
#
# The tutorials can be downloaded by clicking [here](https://github.com/Qiskit/qiskit-tutorials/archive/master.zip) and to set them up follow the installation instructions [here](https://github.com/Qiskit/qiskit-tutorial/blob/master/INSTALL.md).
#
# ***
#
# ## Contents
# We have organized the tutorials into two sections:
#
#
# ### 1. Qiskit
#
# These tutorials aim to explain how to use Qiskit. We assume you have installed Qiskit if not please look at [qiskit.org](http://www.qiskit.org) or the install [documentation](https://github.com/qiskit/qiskit-tutorial/blob/master/INSTALL.md).
#
#
# We've collected a core reference set of notebooks in this section outlining the features of Qiskit. We will be keeping them up to date with the latest Qiskit version, currently 0.7. The focus of this section will be how to use Qiskit and not so much on teaching you about quantum computing. For those interested in learning about quantum computing we recommend the awesome notebooks in the community section.
#
#
# Qiskit is made up of four elements: Terra, Aer, Ignis, and Aqua with each element having its own goal and together they make the full Qiskit framework.
#
# #### 1.1 Getting started with Qiskit
#
# A central goal of Qiskit is to build a software stack that makes it easy for anyone to use quantum computers. To get developers and researchers going we have a set of tutorials on the basics.
#
# * [Getting started with Qiskit](qiskit/basics/getting_started_with_qiskit.ipynb) - how to use Qiskit
# * [The IBM Q provider](qiskit/basics/the_ibmq_provider.ipynb) - working with the IBM Q devices
# * [Plotting data in Qiskit](qiskit/basics/plotting_data_in_qiskit.ipynb) - illustrates the different ways of plotting data in Qiskit
#
# #### 1.2 Qiskit Terra
#
# Terra, the ‘earth’ element, is the foundation on which the rest of the software lies. Terra provides a bedrock for composing quantum programs at the level of circuits and pulses, to optimize them for the constraints of a particular device, and to manage the execution of batches of experiments on remote-access devices. Terra defines the interfaces for a desirable end-user experience, as well as the efficient handling of layers of optimization, pulse scheduling and backend communication.
# * [Quantum circuits](qiskit/terra/quantum_circuits.ipynb) - gives a summary of the `QuantumCircuit` object
# * [Visualizing a quantum circuit](qiskit/terra/visualizing_a_quantum_circuit.ipynb) - details on drawing your quantum circuits
# * [Summary of quantum operations](qiskit/terra/summary_of_quantum_operations.ipynb) - list of quantum operations (gates, reset, measurements) in Qiskit Terra
# * [Monitoring jobs and backends](qiskit/terra/backend_monitoring_tools.ipynb) - tools for monitoring jobs and backends
# * [Parallel tools](qiskit/terra/terra_parallel_tools.ipynb) - executing tasks in parallel using `parallel_map` and tracking progress
# * [Creating a new provider](qiskit/terra/creating_a_provider.ipynb) - a guide to integration of a new provider with Qiskit structures and interfaces
#
# #### 1.3 Qiskit Interacitve Plotting and Jupyter Tools
#
# To improve the Qiskit user experience we have made many of the visualizations interactive and developed some very cool new job monitoring tools in Jupyter.
#
# * [Jupyter tools for Monitoring jobs and backends](qiskit/jupyter/jupyter_backend_tools.ipynb) - Jupyter tools for monitoring jobs and backends
#
# #### 1.4 Qiskit Aer
#
# Aer, the ‘air’ element, permeates all Qiskit elements. To really speed up development of quantum computers we need better simulators with the ability to model realistic noise processes that occur during computation on actual devices. Aer provides a high-performance simulator framework for studying quantum computing algorithms and applications in the noisy intermediate scale quantum regime.
# * [Aer provider](qiskit/aer/aer_provider.ipynb) - gives a summary of the Qiskit Aer provider containing the Qasm, statevector, and unitary simulator
# * [Device noise simulation](qiskit/aer/device_noise_simulation.ipynb) - shows how to use the Qiskit Aer noise module to automatically generate a basic noise model for simulating hardware backends
#
# #### 1.5 Qiskit Ignis
# Ignis, the ‘fire’ element, is dedicated to fighting noise and errors and to forging a new path. This includes better characterization of errors, improving gates, and computing in the presence of noise. Ignis is meant for those who want to design quantum error correction codes, or who wish to study ways to characterize errors through methods such as tomography, or even to find a better way for using gates by exploring dynamical decoupling and optimal control. While we have already released parts of this element as part of libraries in Terra, an official stand-alone release will come soon. For now we have some tutorials for you to explore.
# * [Relaxation and decoherence](qiskit/ignis/relaxation_and_decoherence.ipynb) - how to measure coherence times on the real quantum hardware
# * [Quantum state tomography](qiskit/ignis/state_tomography.ipynb) - how to identify a quantum state using state tomography, in which the state is prepared repeatedly and measured in different bases
# * [Quantum process tomography](qiskit/ignis/process_tomography.ipynb) - using quantum process tomography to reconstruct the behavior of a quantum process and measure its fidelity, i.e., how closely it matches the ideal version
#
# #### 1.6 Qiskit Aqua
# Aqua, the ‘water’ element, is the element of life. To make quantum computing live up to its expectations, we need to find real-world applications. Aqua is where algorithms for NISQ computers are built. These algorithms can be used to build applications for quantum computing. Aqua is accessible to domain experts in chemistry, optimization, AI or finance, who want to explore the benefits of using quantum computers as accelerators for specific computational tasks, without needing to worry about how to translate the problem into the language of quantum machines.
# * [Chemistry](qiskit/aqua/chemistry/index.ipynb) - using variational quantum eigensolver to experiment with molecular ground-state energy on a quantum computer
# * [Optimization](qiskit/aqua/optimization/index.ipynb) - using variational quantum eigensolver to experiment with optimization problems (maxcut and traveling salesman problem) on a quantum computer
# * [Artificial Intelligence](qiskit/aqua/artificial_intelligence/index.ipynb) - using quantum-enhanced support vector machine to experiment with classification problems on a quantum computer
# * [Finance](qiskit/aqua/finance/index.ipynb) - using variational quantum eigensolver to optimize portfolio on a quantum computer
#
# ### 2. Community Notebooks
#
# Teaching quantum and qiskit has so many different paths of learning. We love our community and we love the contributions so keep them coming. Because Qiskit is changing so much we can't keep this updated (we will try our best) but there are some great notebooks in here.
#
# #### 2.1 [Hello, Quantum World with Qiskit](community/hello_world/)
# Learn from the community how to write your first quantum program.
#
# #### 2.2 [Quantum Games with Qiskit](community/games/)
# Learn quantum computing by having fun. How is there a better way!
#
# #### 2.3 [Quantum Information Science with Qiskit Terra](community/terra/index.ipynb)
# Learn about and how to program quantum circuits using Qiskit Terra.
#
# #### 2.4 [Textbook Quantum Algorithms with Qiskit Terra](community/algorithms/index.ipynb)
# Learn about textbook quantum algorithms, like Deutsch-Jozsa, Grover, and Shor using Qiskit Terra.
#
# #### 2.5 [Developing Quantum Applications with Qiskit Aqua](community/aqua/index.ipynb)
# Learn how to develop and the fundamentals of quantum applications using Qiskit Aqua
#
# #### 2.6 Awards
# Learn from the great contributions to the [IBM Q Awards](https://qe-awards.mybluemix.net/)
# * [Teach Me Qiskit 2018](community/awards/teach_me_qiskit_2018/index.ipynb)
# * [Teach Me Quantum 2018](community/awards/teach_me_quantum_2018/index.ipynb)
#
#
#
from IPython.display import display, Markdown
with open('index.md', 'r') as readme: content = readme.read(); display(Markdown(content))
# ***
#
# ## License
# This project is licensed under the Apache License 2.0 - see the [LICENSE](https://github.com/Qiskit/qiskit-tutorials/blob/master/LICENSE) file for details.
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="d6uetxR3Bker" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="7b013a15-df1d-4f6f-e6e6-abae78e50f3d"
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
trueLables = pd.read_csv('bbcsport_classes.csv',delimiter=",", header=None).values
print(trueLables.shape)
terms = pd.read_csv('bbcsport_terms.csv',delimiter=",", header=None).values
print(terms.shape)
X = pd.read_csv('bbcsport_mtx.csv',delimiter=",", header=None).values
print(X.shape)
kmeans = KMeans(n_clusters=5)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
print(centroids.shape)
print(labels.shape)
# + id="4F5l2HFv9y3V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1822} outputId="b47c220b-75fa-4488-887d-c4b6cf48b48e"
totalrandom=0
totalmutual=0
for k in range(50):
kmeans = KMeans(n_clusters=5, n_init=10)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
trueLables = np.ravel(trueLables)
totalrandom = totalrandom + metrics.adjusted_rand_score(labels, trueLables)
totalmutual = totalmutual + metrics.adjusted_mutual_info_score(labels, trueLables)
print("rand index over 50 averages iterations ",totalrandom/50)
print("mutual information",totalmutual/50)
# + id="pTcGpmmG-m_v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 321} outputId="0e953da2-2b9f-48be-9419-210641ade5f2"
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import numpy as np
import pandas as pd
trueLables = pd.read_csv('bbcsport_classes.csv',delimiter=",", header=None).values
print(trueLables.shape)
terms = pd.read_csv('bbcsport_terms.csv',delimiter=",", header=None).values
print(terms.shape)
X = pd.read_csv('bbcsport_mtx.csv',delimiter=",", header=None).values
print(X.shape)
kmeans = KMeans(n_clusters=5)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
centroids = sum(map(np.array, centroids))
Y = pd.read_csv('bbcsport_terms.csv',delimiter=",", header=None).values
Y = Y.reshape(len(Y),)
Z = dict(zip(Y.T,centroids))
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white', width=1200, height=1000).generate_from_frequencies(Z)
fig = plt.figure()
plt.imshow(wordcloud)
plt.show()
# + id="sNij03NB_X2I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1106} outputId="b4a7d027-ded3-4ee3-c3a3-855946811b87"
X = pd.read_csv('bbcsport_mtx.csv',delimiter=",", header=None).values
print(X.shape)
mu = X.mean(axis=0)
sigma = X.std(axis=0)
Xnorm = (X - mu)/sigma
print (Xnorm[:,:])
m = len(Xnorm)
covmat = np.dot(Xnorm.T, Xnorm)/m
print(covmat)
S,U = np.linalg.eig(covmat)
print('Eigen values: {}'.format(S))
print('Eigen vectors:')
print(U)
Z = np.dot(Xnorm,U)
plt.plot(Z, '.', markersize=10)
plt.title('Data after PCA')
# + id="pnWH1J2uCRuI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="d60f6852-f1e7-4abe-e86e-65b87b0f31fb"
k = 1
Ured = U[:,0:k]
Zred = np.dot(Xnorm,Ured)
Xrec = np.dot(Zred, Ured.T)
plt.plot(Xrec, '.', markersize=10)
plt.title('Reconstructed Normalized Data')
# + id="mP9sdAn8DBFU" colab_type="code" colab={}
| BBC_Sport_Articles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python3
# name: python3
# ---
# # Images
#
# Collection of tests for **.. image::** and **.. figure::** directives
# ## Image
#
# [Docutils Reference](http://docutils.sourceforge.net/docs/ref/rst/directives.html#images)
#
# Most basic image directive
#
# <img src="_static/hood.jpg" style="">
#
# A scaled down version with 25 % width
#
# <img src="_static/hood.jpg" style="width:25%;">
#
# A height of 50px
#
# <img src="_static/hood.jpg" style="height:50px;">
#
# Including *alt*
#
# <img src="_static/hood.jpg" alt="A Mountain View" style="">
#
# An image with a *right* alignment
#
# <img src="_static/hood.jpg" style="width:75%;height:75%" align="right">
#
# An image with a *left* alignment
#
# <img src="_static/hood.jpg" style="width:50%;height:50%" align="left">
# ## Figure
#
# [Docutils Reference](http://docutils.sourceforge.net/docs/ref/rst/directives.html#figure)
#
# Testing the **.. figure::** directive
#
# <img src="_static/hood.jpg" style="width:50%;height:50%">
| tests/base/ipynb/images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# metadata:
# interpreter:
# hash: 916e78022459d02449eb0e919933c0dffe7558a101efc3cf53aea168c8e1e9f5
# name: 'Python 3.8.3 64-bit (''base'': conda)'
# ---
# +
# # !pip38 install camelot-py[cv]
# -
# file path
pdf_path = r'\src\foo.pdf'
# root = !cd
path = str(root[0]) + pdf_path
# +
import camelot
output_camelot = camelot.read_pdf(
filepath=path, pages=str(0), flavor="stream"
)
print(output_camelot)
table = output_camelot[0]
print(table)
print(table.parsing_report)
# -
print(table)
print(type(table))
table.df
df = table.df
df.columns
# +
# cleanse the table by dropping specific rows
# check of rows with null data
# df.isnull()
# -
# drop the initial set of rows
df1 = df.drop(axis=0, index=[0,1,2,3,4,5,6])
df1
columns = ['Cycle Name', 'KI (1/km)', 'Distance (mi)', 'Improved Speed', 'Decreased Accel', 'Eliminate Stops', 'Decreased Idle']
df1.columns = columns
df1
| category_machine_learning/pdf_parsing/01_camelot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/agemagician/ProtTrans/blob/master/Embedding/PyTorch/Advanced/ProtT5-XL-UniRef50.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="sRHqbHqAQw-M"
# ## Important Notes:
# 1. ProtT5-XL-UniRef50 has both encoder and decoder, for feature extraction we only load and use the encoder part.
# 2. Loading only the encoder part, reduces the inference speed and the GPU memory requirements by half.
# 2. In order to use ProtT5-XL-UniRef50 encoder, you must install the latest huggingface transformers version from their GitHub repo.
# 3. If you are intersted in both the encoder and decoder, you should use T5Model rather than T5EncoderModel.
# + [markdown] id="C5hW5wfM5i0Y"
# <h3>Extracting protein sequences' features using ProtT5-XL-UniRef50 pretrained-model</h3>
# + [markdown] id="9wCEAM9F5wTA"
# **1. Load necessry libraries including huggingface transformers**
# + id="GXAKFATm-mbs" colab={"base_uri": "https://localhost:8080/"} outputId="b18f9a93-811a-4d23-f7d2-a2763fb63248"
# !pip install -q SentencePiece transformers
# + id="jd3YQUd1-h2I"
import torch
from transformers import T5EncoderModel, T5Tokenizer
import re
import numpy as np
import gc
# + [markdown] id="hAKCMu_2-h2V"
# <b>2. Load the vocabulary and ProtT5-XL-UniRef50 Model<b>
# + id="HS8i5sOJ-h2W" colab={"base_uri": "https://localhost:8080/", "height": 164, "referenced_widgets": ["b261ba83900142beaa71e46259291408", "1afbeca8c28e49189382730ccf23959c", "<KEY>", "<KEY>", "8e9ca7b67b584d87aa21ddccd214baf1", "<KEY>", "<KEY>", "<KEY>", "f2843951b4ee48dfad7799d34dbc3710", "<KEY>", "<KEY>", "3e23b6d4eaae4850b26affb8a0c3f838", "3fc0a097c55b4ae9b70a292f8031dd0b", "c988a0848ba94fc7ae26a1a9502ccb51", "8a42964d76884641abaded96c4be7210", "5125469967144b478a04b7a4dbeed3ab", "<KEY>", "f009ecfa5ea2469f92f478296e47031c", "<KEY>", "<KEY>", "bd744af794a14c5cabfedc40f190f661", "7f4c9a3430c6499cafc35c1c30856e6a", "<KEY>", "<KEY>"]} outputId="cb538668-113f-4d80-a792-a042b5104194"
tokenizer = T5Tokenizer.from_pretrained("Rostlab/prot_t5_xl_uniref50", do_lower_case=False )
# + id="ERtkR05t-h2c" colab={"base_uri": "https://localhost:8080/", "height": 186, "referenced_widgets": ["882035c67ea940c9993c316a5b596291", "e4e602b6972b4e64b9f0e3be279b27a4", "<KEY>", "<KEY>", "2b6dac1e39af4fb4a0ec9ef19551fec4", "<KEY>", "28e2a6d847ab4afa9a370f3e516b40ed", "ba4b4454e42b4f1a98c7ac5ed4934650", "010531dfdacf4ffba02c6169e60c67b1", "<KEY>", "86992c0759b64546abe09a9520c7c4b6", "531f85a14f6d4775ab3de9d8b53148c5", "<KEY>", "6faa44adcef44558b050af9ececef845", "9cff747025b34e978f9607a7bce51ad0", "7969a4ae61cd497fbb7e4321676c8659"]} outputId="f4608a63-4a3c-4bf8-c576-7e9bef8dc6f1"
model = T5EncoderModel.from_pretrained("Rostlab/prot_t5_xl_uniref50")
# + id="YN6nqLFuY0k2" colab={"base_uri": "https://localhost:8080/"} outputId="c53da930-1ebb-4cd3-9418-e194759ec621"
gc.collect()
# + [markdown] id="eM-12RxodZnK"
# <b>3. Load the model into the GPU if avilabile and switch to inference mode<b>
# + id="xxElo34RdZnL"
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# + id="YyQf6mwQdZnP"
model = model.to(device)
model = model.eval()
# + [markdown] id="ZkqAotTcdZnW"
# <b>4. Create or load sequences and map rarely occured amino acids (U,Z,O,B) to (X)<b>
# + id="a0zwKinIdZnX"
sequences_Example = ["A E T C Z A O","S K T Z P"]
# + id="EkINwL9DdZna"
sequences_Example = [re.sub(r"[UZOB]", "X", sequence) for sequence in sequences_Example]
# + [markdown] id="66BZEB3MdZnf"
# <b>5. Tokenize, encode sequences and load it into the GPU if possibile<b>
# + id="xt5uYuu7dZnf"
ids = tokenizer.batch_encode_plus(sequences_Example, add_special_tokens=True, padding=True)
# + id="Grl3ieUhdZnj"
input_ids = torch.tensor(ids['input_ids']).to(device)
attention_mask = torch.tensor(ids['attention_mask']).to(device)
# + [markdown] id="Zylf1HyBdZnl"
# <b>6. Extracting sequences' features and load it into the CPU if needed<b>
# + id="i8CVGPRFdZnm"
with torch.no_grad():
embedding = model(input_ids=input_ids,attention_mask=attention_mask)
# + id="_Gng5Pw9QWZy"
embedding = embedding.last_hidden_state.cpu().numpy()
# + [markdown] id="R6oeRZ7xdZns"
# <b>7. Remove padding (\<pad\>) and special tokens (\</s\>) that is added by ProtT5-XL-UniRef50 model<b>
# + id="1XXoVSPDdZns"
features = []
for seq_num in range(len(embedding)):
seq_len = (attention_mask[seq_num] == 1).sum()
seq_emd = embedding[seq_num][:seq_len-1]
features.append(seq_emd)
# + id="UYXal_h9QhPI" colab={"base_uri": "https://localhost:8080/"} outputId="e19f247f-0bab-488e-df12-4930a952d416"
print(features)
| Embedding/PyTorch/Advanced/ProtT5-XL-UniRef50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# +
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# -
# We can view all of the classes that automap found
Base.classes.keys()
inspector = inspect(engine)
# Print column names for station table
columns = inspector.get_columns('station')
for c in columns:
print(c['name'])
# Print column names for measurement table
columns = inspector.get_columns('measurement')
for c in columns:
print(c['name'])
# Save references to each table
Meas = Base.classes.measurement
Stations = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# # Precipitation Analysis
# ### Design a query to retrieve the last 12 months of precipitation data and plot the results
# Find the most recent date in the data set
session.query(Meas.date).order_by(Meas.date.desc()).first()
# Calculate the date 1 year ago from the last data point in the database
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
print(year_ago)
# Perform a query to retrieve the data and precipitation scores
annual_prcp = session.query(Meas.date,Meas.prcp).filter(Meas.date >= year_ago).all()
annual_prcp
# +
# Save the query results as a Pandas DataFrame and set the index to the date column
df = pd.DataFrame(annual_prcp)
df.set_index('date', inplace=True, )
# df.head(10)
# Sort the dataframe by date
df.sort_values('date')
# +
# Use Pandas Plotting with Matplotlib to plot the data
plt.figure(figsize = (12,10))
df.plot()
plt.xticks(rotation = 'vertical')
plt.show()
# -
# Use Pandas to calcualte the summary statistics for the precipitation data
df.describe()
# # Precipitation Analysis
# Design a query to show how many stations are available in this dataset?
session.query(func.count(Stations.station)).all()
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
session.query(Meas.station, func.count(Meas.station)).group_by(Meas.station).order_by(func.count(Meas.date).desc()).all()
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
session.query(func.min(Meas.tobs), func.max(Meas.tobs), func.avg(Meas.tobs)).filter_by(station = 'USC00519281').all()
# +
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
last_year_tobs = session.query(Meas.date, Meas.tobs).filter(Meas.station == 'USC00519281').filter(Meas.date >= year_ago).all()
WAIHEE_tobs_df = pd.DataFrame(last_year_tobs)
WAIHEE_tobs_df.plot.hist(by=None, bins=12)
| climate_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="v8T-foUHzpRx" colab_type="code" outputId="44db6945-7737-483c-d0da-581b4db59c2f" executionInfo={"status": "ok", "timestamp": 1581685770250, "user_tz": -60, "elapsed": 7371, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 261}
# !pip install eli5
# + id="bSFcGT9x0WgJ" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
# + id="FvbrehAQ027Q" colab_type="code" outputId="ceb12667-6d39-45ba-db83-856e697ab897" executionInfo={"status": "ok", "timestamp": 1581685974383, "user_tz": -60, "elapsed": 719, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="xA1eHAa41AOB" colab_type="code" outputId="c94b5b46-1b37-4ee9-b6e5-87e351a28d42" executionInfo={"status": "ok", "timestamp": 1581685977931, "user_tz": -60, "elapsed": 2285, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
# ls
# + id="LUvyxBAQ1AyI" colab_type="code" outputId="621ffd65-371a-4a93-f6fc-0794575e9d66" executionInfo={"status": "ok", "timestamp": 1581685996873, "user_tz": -60, "elapsed": 2468, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
# ls data
# + id="R0ExpMxp1PL4" colab_type="code" colab={}
df = pd.read_csv('data/men_shoes.csv', low_memory = False)
# + id="gdGDVXoR1gbX" colab_type="code" outputId="a685377d-4f44-4b37-81f1-6c672521fee8" executionInfo={"status": "ok", "timestamp": 1581686133023, "user_tz": -60, "elapsed": 886, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 212}
df.columns
# + id="EM7TzN1A1wzv" colab_type="code" colab={}
def run_model(feats):
X = df [feats].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model,X,y,scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="qwzL_Bmc2PKS" colab_type="code" outputId="85b3aead-bb59-4378-92f5-e618aa00c749" executionInfo={"status": "ok", "timestamp": 1581687098315, "user_tz": -60, "elapsed": 741, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
df['brand_cat'] = df['brand'].map(lambda x:str(x).lower()).factorize()[0]
run_model(['brand_cat'])
# + id="ER6YErZd3O4p" colab_type="code" outputId="51f3e7a7-fd63-4792-966e-be022c49faad" executionInfo={"status": "ok", "timestamp": 1581687112415, "user_tz": -60, "elapsed": 727, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'])
# + id="Fz2ALdkz4kTX" colab_type="code" outputId="79640b0f-33a6-4237-9b41-b2ab3fd05dc0" executionInfo={"status": "ok", "timestamp": 1581687148228, "user_tz": -60, "elapsed": 718, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 212}
df.features
# + id="GkcvkofW5osr" colab_type="code" outputId="3e4e80f2-3781-4b06-aa9a-a402ef324349" executionInfo={"status": "ok", "timestamp": 1581687239502, "user_tz": -60, "elapsed": 859, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
test = {'key':'value'}
test['key']
str(test)
# + id="xsj_rJRc57nD" colab_type="code" colab={}
def parse_features(x):
output_dict = {}
if str(x) == 'nan': return output_dict
features = literal_eval(x.replace('\\"','"'))
for item in features:
key = item ['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
# + id="qwrV47L67KaX" colab_type="code" outputId="ba801b86-1e6d-4c07-b72c-0ad84dc32822" executionInfo={"status": "ok", "timestamp": 1581687640107, "user_tz": -60, "elapsed": 836, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 115}
df['features_parsed'].head()
# + id="IO61FuQv7guB" colab_type="code" outputId="5e3da181-5772-45cf-c270-6c3b4678da4e" executionInfo={"status": "ok", "timestamp": 1581688318630, "user_tz": -60, "elapsed": 926, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
keys = set()
df['features_parsed'].map ( lambda x: keys.update(x.keys()))
len(keys)
# + id="YqW1RLk--GYB" colab_type="code" outputId="6d88c3fd-6a0d-4bec-999c-263ee5e8e7d5" executionInfo={"status": "ok", "timestamp": 1581688755584, "user_tz": -60, "elapsed": 4694, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": ["a38087e3af9e47669a1bf78aac3e720d", "009733d9a9c54168b92c5063e80d0101", "4df7039f5f1148e6a5893ee33b517590", "<KEY>", "ae66ad4a12f8444c82c58bde973928d1", "<KEY>", "<KEY>", "<KEY>"]}
def get_name_feat(key):
return 'feat_'+ key
for key in tqdm_notebook(keys):
df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)
# + id="nHtdc284_oiz" colab_type="code" colab={}
keys_stat = {}
for key in keys:
keys_stat [key] = df [False == df[get_name_feat(key)].isnull()].shape[0]/ df.shape[0]*100
# + id="E91gNkR9AobT" colab_type="code" outputId="fbd04986-77f0-418f-c66b-5b56b3730eb7" executionInfo={"status": "ok", "timestamp": 1581696309213, "user_tz": -60, "elapsed": 877, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 98}
{k:v for k,v in keys_stat.items() if v> 30}
# + id="9nuo0KqmcVDL" colab_type="code" colab={}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_gender_cat'] = df['feat_gender'].factorize()[0]
df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cate'] = df[get_name_feat(key)].factorize()[0]
# + id="NUHLBlBLdUVw" colab_type="code" colab={}
feats = ['']
# + id="fAPLdo8oczi5" colab_type="code" colab={}
feats = ['brand_cat','feat_brand_cat','feat_gender_cat','feat_material_cat','feat_style_cat']
feats += ['feat_sizearm_cate',
'feat_number of compartments_cate',
'feat_charger included_cate',
'feat_capacity_cate',
'feat_target audience_cate',
'feat_bridge width_cate',
'feat_age gender group_cate',
'feat_case tone_cate',
'feat_pattern_cate',
'feat_special features_cate',
'feat_hood_cate',
'feat_cushioning_cate',
'feat_carats_cate',
'feat_power type_cate',
'feat_profession_cate',
'feat_shirt size_cate',
'feat_manufacturer number_cate',
'feat_general warranty_cate',
'feat_diamond clarity_cate']
model = RandomForestRegressor(max_depth=5, n_estimators=100)
result = run_model(feats)
# + id="wMwwbouVnK-G" colab_type="code" outputId="ea50dc21-7e15-4821-e9d3-ccb5f67c2391" executionInfo={"status": "ok", "timestamp": 1581699129327, "user_tz": -60, "elapsed": 767, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
feats_cat = [x for x in df.columns if 'cat' in x]
feats_cat
# + id="Jmoye8jheF5Q" colab_type="code" outputId="0010e0a5-7e95-44a4-b5f0-e41051271e0d" executionInfo={"status": "ok", "timestamp": 1581699519703, "user_tz": -60, "elapsed": 10528, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 391}
X= df[feats].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100,random_state=0)
m.fit(X, y)
print (result)
perm = PermutationImportance(m, random_state=1).fit(X,y);
eli5.show_weights(perm,feature_names = feats)
# + id="V4Nm-uoXfxI4" colab_type="code" outputId="d2b712f8-cf6c-47c7-b30f-9ef5b6320481" executionInfo={"status": "ok", "timestamp": 1581698491000, "user_tz": -60, "elapsed": 714, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 115}
df[df['brand']== 'nike'].features_parsed.sample(5).values
# + id="RXYfpA_ziVq2" colab_type="code" outputId="67d48e87-1589-43e4-8563-bff1408685fb" executionInfo={"status": "ok", "timestamp": 1581699971528, "user_tz": -60, "elapsed": 4009, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
# !git add matrix_one/Matrix_day5.ipynb
# + id="trlBSAl4pQSb" colab_type="code" outputId="45812f4d-bb9b-44e5-b031-0e53ad7ccf06" executionInfo={"status": "ok", "timestamp": 1581699981684, "user_tz": -60, "elapsed": 2093, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
# ls
# + id="tsHUPDzhqJ5n" colab_type="code" colab={}
# !git add matrix_one/Matrix_day5.ipynb
# + id="9ZCK0Kd6qVQq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 212} outputId="13e25512-1fac-4cc2-f9d4-248d0f1f3dd7" executionInfo={"status": "ok", "timestamp": 1581700037115, "user_tz": -60, "elapsed": 10810, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}}
# !git commit -m "day 5 matrix"
# + id="_U6Z5_hGqw7z" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "Marta"
# + id="RJwjas0Yq25w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="46d61457-c1c6-42ff-a328-95a7c82ff3bf" executionInfo={"status": "ok", "timestamp": 1581700351869, "user_tz": -60, "elapsed": 7039, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10155564932329731088"}}
# !git push -u origin master
# + id="75CJZsk3q_sf" colab_type="code" colab={}
| matrix_one/Matrix_day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Вероятный семинар 13: Еее рок!!!
#
# 
# Тут мы импортируем все вкусности:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# from scipy import stats
# import statsmodels.formula.api as smf
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc, precision_recall_curve, confusion_matrix
from sklearn.metrics import plot_precision_recall_curve, plot_roc_curve
# ### Задачина 1
#
# 
#
# 1. Загрузи данные по покемонам, `pokemon.csv`.
#
# 2. Подели данные на обучающую и тестовую выборки в пропорции 80 на 20. Не забудь зафиксировать зерно генератора случайных чисел! Кстати, зачем фиксируют зерно генератора?
#
# 3. Оцени параметры логистической регрессии:
#
# $$P(legendary_i=1|attack_i, defense_i, speed_i)=\Lambda(\beta_1 + \beta_2 attack_i +\beta_3 defense_i +\beta_4 speed_i)$$.
#
# 4. Найди для каждого покемона прогноз вероятности быть легендарным (на обеих выборках).
#
# 5. Построй на одном графике две наложенные плотности предсказанных вероятностей (для легендарных и обычных покемонов) на обучающей выборке.
#
# 6. Пара вопросов по графику. Правда ли, что для легендарных покемонов спрогнозированные вероятности в среднем выше? Существует ли возможность выбрать порог, при котором легендарные идеально отличались бы от обычных покемонов?
#
# Легендарных покемонов мало, поэтому разумно строить именно плотности, чтобы нормировка площади у легендарных и нелегендарных на единицу выходила своя :)
#
# 7. И построй аналогичный график для тестовой выборки.
# ### Задачилла 2.
#
# 1. Оцени параметры обычной регрессии регрессии:
#
# $$P(legendary_i=1|attack_i, defense_i, speed_i)= \beta_1 + \beta_2 attack_i +\beta_3 defense_i +\beta_4 speed_i + u_i$$.
#
# 2. Найди для каждого покемона прогноз вероятности быть легендарным (на обеих выборках).
#
# 3. Построй на одном графике две наложенные плотности предсказанных вероятностей (для легендарных и обычных покемонов) на обучающей выборке.
#
# 4. Пара вопросов по графику. Правда ли, что для легендарных покемонов спрогнозированные вероятности в среднем выше? Существует ли возможность выбрать порог, при котором легендарные идеально отличались бы от обычных покемонов?
#
# Легендарных покемонов мало, поэтому разумно строить именно плотности, чтобы нормировка площади у легендарных и нелегендарных на единицу выходила своя :)
#
# 5. И построй аналогичный график для тестовой выборки.
#
# ### Задачадища 3
# Рассмотри модель логистической регрессии.
#
# 1. Прочти про тпр, фпр и пр. и пр. [https://en.wikipedia.org/wiki/Receiver_operating_characteristic](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)
#
# 2. Составь таблицу сопряженности по тестовым данным для порога 0.5, а потом для порога 0.8.
#
# 3. Что происходит с TPR, FPR, TNR, FNR, чувствительностью, специфичностью, точностью и полнотой при росте порога?
#
# 4. Построй ROC-кривую и кривую точность-полнота.
#
#
#
#
# ### Задачудище 4
#
# 1. Найди таблички сопряженности для порога 0.5 и 0.8 для прогнозов на тестовой выборке с помощью обычной регрессии.
#
# 2. Построй ROC-кривую и кривую точность-полнота для обычной регрессии.
#
# 2. Сравни качество обычной регрессии и логит-регрессии.
# ### Задачища n+4
#
# 1. Посмотри няшную визуализацию про ROC-кривую от Насти Чирковой, [https://kawaiiuroboros.github.io/roc-auc/](https://kawaiiuroboros.github.io/roc-auc/)
#
# 2. Что отложено по горизонтали и вертикали на левом и правом графике?
#
# 3. При каких условиях в визуализации AUC меньше половины?
#
# 4. Почему на практике редко складывается ситуация, когда AUC меньше половины?
| sem12_13_logisticRegression/13_logit_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### ECE 572 Course Project - Python Network Analyzer for Wireshark Captures
# Project Description:
# Python-based program designed to extract and visualize network traffic information from a Wireshark generated .csv file.
#
# Team Members:
#
# <NAME> - V00931210 - <EMAIL>
#
# <NAME> - V00818626 - <EMAIL>
#
# <NAME> - V00938179 - <EMAIL>
#
# Imports and essentials
import numpy as np
import pandas as pd
from pandas import plotting
import matplotlib.pyplot as plt
import datetime
import re
import seaborn as sns
sns.set(color_codes=True)
# %matplotlib inline
# # 1) ECE572Benign
# Import data from csv files - ECE572Benign.csv, ECE572Test.csv, & ECE572DoS.csv
benign_df = pd.read_csv('ECE572Benign_n.csv',
delimiter = ',', encoding='latin-1', header=0) # Header labels all in row = 0
# ## Cleanup
# +
benign_df = benign_df.fillna(0)
benign_df = benign_df.drop(columns="No.")
#print(benign_df.head())
print(benign_df[["Source", "Source Port",
"Destination", "Dest Port", "Protocol"]].head(10)) #print only required data
print("\nDataset dimensions", benign_df.shape)
# -
# ## Network Graph
#https://towardsdatascience.com/from-dataframe-to-network-graph-bbb35c8ab675
import networkx as nx
network_graph = nx.from_pandas_edgelist(benign_df, 'Source', 'Destination')
from matplotlib.pyplot import figure
figure(figsize=(12, 10))
nx.draw_shell(network_graph, with_labels=True, font_size=10)
# ## Statistics
def df_to_num(unique_df=None):
num_unique_df = pd.Series([])
i = -1
for val in unique_df:
if not (re.search('[a-zA-Z]', val)):
i+=1
num_unique_df.loc[i] = val
return num_unique_df
# ### Top level view of the Benign PCap
# +
# Source Adddress with the highest amount of data transfer
print("# Source Address with the highest amount of data transfer")
print(benign_df['Source'].describe(),'\n\n')
# Destination Address with the highest amount of data transfer
print("# Destination with the highest amount of data transfer")
print(benign_df['Destination'].describe(),'\n\n')
frequent_address = benign_df['Source'].describe()['top']
# Destination address to which most of the connections/data transfers are made
print("# Destination address receiving most amount of data")
print(benign_df[benign_df['Source'] == frequent_address]['Destination'].unique(),"\n\n")
# Most frequently used destination port by top source address (dst ports)
print("# Most frequently used Destination ports by top source address (Destination Ports)")
print((benign_df[benign_df['Source'] == frequent_address]['Dest Port'].unique()),"\n\n")
# Most frequently used source port by top source address(src ports)
print("# Most frequently used source ports by top source address (Source ports)")
print(benign_df[benign_df['Source'] == frequent_address]['Source Port'].unique(),"\n\n")
# -
# ### Deep analysis of Source Stats
# Unique Source Addresses
print("Unique Source Addresses")
src_unique_df = benign_df['Source'].unique()
print(src_unique_df)
print('\nUnique IPv4 Source Addresses\n'+str(df_to_num(src_unique_df)))
# +
# Group by Source Address and Payload Length Sum
source_addresses = benign_df.groupby('Source')['Length'].sum()
print("Total data transmitted by Source:\n"+str(source_addresses))
print(type(source_addresses))
source_addresses.plot.barh(title="Addresses Sending Payloads",figsize=(17,13),
logx=True, color=["y","c","m", "b", "g", "r", "k"])
# -
# ### Deep analysis of Destination Stats
# Unique Destination Addresses
print("\nUnique Destination Addresses")
dest_unique_df = benign_df['Destination'].unique()
print(dest_unique_df)
print('\nUnique IPv4 Destination Addresses\n'+str(df_to_num(dest_unique_df)))
# +
# Group by Source Address and Payload Length Sum
dest_addresses = benign_df.groupby('Destination')['Length'].sum()
print("Total data transmitted by Destination:\n"+str(dest_addresses))
print(type(dest_addresses))
dest_addresses.plot.barh(title="Addresses Receiving Payloads",figsize=(17,13),
logx=True, color=["y","c","m", "b", "g", "r", "k"])
# -
# ### Deep analysis of Source port
# Group by Source Port and Packet Length Sum
src_pkt_length_df = benign_df[benign_df['Source Port'] != int(0.0)]
src_pkt_length_df = src_pkt_length_df.groupby('Source Port')['Length'].sum().sort_values(ascending=False)
src_pkt_length_df = src_pkt_length_df.head(20)
print("Total data transmitted by Source Port (Top 20):\n"+str(src_pkt_length_df))
src_pkt_length_df.plot.barh(title="Source Ports (Bytes Sent)",figsize=(50,45),
logx=True, color=["y","c","m", "b", "g", "r", "k"], fontsize=40)
# Group by Destination Port and Packet Length Sum
dest_pkt_length_df = benign_df[benign_df['Dest Port'] != int(0.0)]
dest_pkt_length_df = dest_pkt_length_df.groupby('Dest Port')['Length'].sum().sort_values(ascending=False)
dest_pkt_length_df = dest_pkt_length_df.head(20)
print("Total data transmitted by Destination Port (Top 20):\n"+str(dest_pkt_length_df))
dest_pkt_length_df.plot.barh(title="Destination Ports (Bytes Sent)",figsize=(50,45),
logx=True, color=["y","c","m", "b", "g", "r", "k"], fontsize=40)
# +
from pandas.plotting import parallel_coordinates
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
nmap_check_df = benign_df[(benign_df['Source'] == "192.168.56.105")]
print(nmap_check_df[['Source', 'Source Port', 'Destination', 'Dest Port', 'Length']])
figure(figsize=(20,15))
plotting.parallel_coordinates(nmap_check_df, "Destination", cols=['Source Port', 'Dest Port'],
color=('r', 'b', 'y', 'c', 'm', 'g', 'k'))
plt.title('Nmap check visualization for the packets coming from 105\'s Source Port to Dest Port')
plt.show()
# +
from pandas.plotting import parallel_coordinates
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
figure(figsize=(20,15))
plotting.parallel_coordinates(benign_df, "Destination", cols=['Source Port', 'Dest Port'],
color=('r', 'b', 'y', 'c', 'm', 'g', 'k'))
plt.title('Nmap Visualization from Source Port to Dest Port')
plt.show()
# -
# #### Timeline analysis for 192.168.56.104
# ##### Note: There wasn't any data transfer between 101/102 and 105
# +
#groupby("time")['payload'].sum().plot(kind='barh',title="Destination Ports (Bytes Received)",figsize=(8,5))
time_slice_df = benign_df[(benign_df['Source'] == "192.168.56.105") & (benign_df['Destination'] == "192.168.56.104")]
time_slice_df = time_slice_df[["Time", "Length"]]
print(time_slice_df)
start_time = float(1595011927.678)
end_time = float(1595012441.255)
bins = np.arange(start_time, end_time+1, 30)
time_slice_df = time_slice_df.groupby(pd.cut(time_slice_df['Time'], bins=bins)).Length.sum()
print("Data received by 192.168.56.101 every 30 seconds:\n"+str(time_slice_df))
time_slice_df.plot.barh(title="Data received by 192.168.56.104 every 30 seconds",
figsize=(20,18), color=["m", "b", "y"], fontsize=16)
| ECE572ProjectCode_benign.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import random as rd
D = pd.read_json (r'disease_db.json')
# put the json into the same direct where this file lies
D
# D is the Pandas Dataframe
DD = D.drop(['symptoms'], axis=1)
DD = DD[DD.columns[::-1]]
DD.to_csv("Diseases/Diseases.csv", index=False, encoding='utf8')
# Function that parses one Input str into an array
# gets Symptom row -> return array has form [id1,acc1,id2,acc2...]
def get_att_for_line(d):
d = d.split(',')
# 1,20 because of 10 Symptom cap ? higher this number if more symptomslots are needed
matTemp = np.zeros((1,100))
for n,i in enumerate(d):
f = i.partition(':')[2]
f = f.partition('}')[0]
matTemp[0][n] = f
return matTemp
# returns a attribute matrix for Symptoms:
# returns a matrix like :[[id1(disease1),acc1(disease1),id2(disease1)...],
# [id1(disease2),acc1(diesease2),id2(disease1)..],
# ...]
def get_att_matrix():
mat = np.zeros((D.shape[0],100))
for index, row in D.iterrows():
rowstr = get_att_for_line(str(row['symptoms']))
for i in range(rowstr.shape[1]):
mat[index][i] = rowstr[0][i]
#print(mat)
return mat
mat = get_att_matrix()
# every row == 1 disease every column one Symptom ID
# IDS is a matrix looking like this: [[id1(disease1), id2(disease1), id3(disease1),...],
# [id1(disease2), id2(disease2), id3(disease2),...],
# ,..,]
IDS = mat[::,0::2]
# every row == 1 disease every column one Acc %
# SYM is a matrix looking like this: [[acc1(disease1), acc2(disease1), acc3(disease1),...],
# [acc1(disease2), acc2(disease2), acc3(disease2),...],
# ,..,]
SYM = mat[::,1::2]
def get_symptoms_and_acc_cut(ids,sym,row):
# get the indicies of nonNull ID's aka all valid ID's and return the cut arrays
IDSS = ids[row].nonzero()
# get the count of valid Symptoms
length = len(IDSS[0])
# cut the array to req length
IDST = ids[row,0:length:]
SYMT = sym[row,0:length:]
# del -1 in ids
neg = np.argwhere(IDST < 0).flatten()
IDST = np.delete(IDST,neg)
SYMT = np.delete(SYMT,neg)
# del -1 in acc
# neg = np.argwhere(SYMT == -1).flatten()
# IDST = np.delete(IDST,neg)
# SYMT = np.delete(SYMT,neg)
return IDST,SYMT
# +
''' SYMPTOMS APPLICATION'''
def get_symptoms_used(IDS,SYM):
IDST,SYMT = get_symptoms_and_acc_cut(IDS,SYM,0)
for x in range(1,mat.shape[0]):
IDSTt,SYMTt = get_symptoms_and_acc_cut(IDS,SYM,x)
IDST = np.concatenate((IDST, IDSTt), axis=0)
IDST =np.unique(IDST,axis=0)
return IDST
# -
symliste = get_symptoms_used(IDS,SYM)
flist = []
for sss in symliste:
flist.append(int(sss))
df=pd.read_json('Symptoms/Symptoms.json')
df.to_csv('Symptoms/Symptoms.csv')
patientData_reviews = pd.read_csv('Symptoms/Symptoms.csv')
df=pd.read_csv('Symptoms/Symptoms.csv', index_col=[0])
boolean_series = df.ID.isin(flist)
filtered_df = df[boolean_series]
filtered_df.to_csv('Symptoms/Symptoms.csv',index=None)
symidvec = np.asarray(flist)
def generate_patients(Label,IDST,SYMT,count):
vec = np.full(count,int(Label))
for n,i in enumerate(symidvec):
argdes = np.argwhere(IDST == i).flatten()
if len(argdes )== 0:
vec = np.vstack((vec, np.zeros(count,)))
else:
ii = argdes[0]
if SYMT[ii] < 0:
rz = rd.uniform(0, 0.5)
vec = np.vstack((vec,rd.choices([1,0], weights=[rz,1-rz],k=count)))
else:
vec = np.vstack((vec,rd.choices([1,0], weights=[SYMT[ii],1-SYMT[ii]],k=count)))
return vec.transpose()
def generate_database_mat(nr):
IDST,SYMT = get_symptoms_and_acc_cut(IDS,SYM,0)
Patients = generate_patients(D['id'][0],IDST,SYMT,nr)
print(Patients.shape)
for x in range(1,mat.shape[0]):
IDST,SYMT = get_symptoms_and_acc_cut(IDS,SYM,x)
Patients = np.vstack((Patients, generate_patients(D['id'][x],IDST,SYMT,nr)))
#ListPatients.append(generate_patients(D['id'][x],IDST,SYMT,nr) )
return Patients
def get_frames(nr):
head = np.append([0],symidvec).astype(int)
ListPatientss = generate_database_mat(nr)
ListPatientss = np.vstack(( np.append([0],symidvec).astype(int), ListPatientss))
print(ListPatientss.shape)
DIS = pd.DataFrame(data=ListPatientss[1:,:],columns=ListPatientss[0,:])
return DIS
# +
kk = get_frames(100)
kk.to_csv("./Patients/patients{}.csv".format("Patients"), index=False, encoding='utf8')
#data.to_csv("output_excel_file.xlsx", sheet_name="Sheet 1", index=False)
# +
npKK = kk
for i in range(10000):
x = num1 = rd.randint(0, 1999)
y = num1 = rd.randint(0, 1999)
w = npKK.iloc[x]
npKK.iloc[x] = npKK.iloc[y]
npKK.iloc[y] = w
npKK.to_csv("./Patients/patients{}.csv".format("PatientsMIXED"), index=False, encoding='utf8')
# -
npKK
| PatientGeneratorBigMatrix/RoboDocPatientGeneratorPlusMixer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Cleaning
# The purpose of this code document is to take the raw data from two subreddits (bodyweight fitness and powerlifting) and prepare it for the EDA and modeling process. Cleaning steps include handling null values, removing useless words, combining two text columns into on one column, and applying Natural Language Processing Toolkit (NLTK). The result will be a document-term matrix ready for the EDA process.
# +
#Imported Libraries
import re
import string
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
# -
# To expand Jupyter Notebook's output to more rows and columns
pd.set_option('display.max_rows', 999)
pd.set_option('display.max_columns', 999)
# Move CSV files to dataframes
body = pd.read_csv('../datasets/bodyweight_fitness', low_memory=False)
power = pd.read_csv('../datasets/powerlifting', low_memory=False)
# Note: passed low_memory parameter per error message
# ### Combining the Two Subreddits into One Dataframe and Limiting the Data to Three Columns
# The scope of this project is to analyze the text in the subreddits. The only useful columns include the following:
# - title
# - selftext
# - subreddit
# +
dataframe_columns = ['title', 'selftext', 'subreddit']
body = body[dataframe_columns]
power = power[dataframe_columns]
df = pd.concat([body, power]).reset_index().drop(columns='index')
df.head()
# -
# ### Cleaning Null Values
# According to the code below, the selftext column contains 2,796 null values while the title column has 0 null values. In order to retain the rows with text in the 'title' but not in the 'selftext', I replace the null values with an empty string. In a subsequent step I will combine the two columns so all the text is in one column.
print(df.isnull().sum())
# Fill NA with an empty string
df.selftext.fillna('', inplace=True)
# Confirmed that the null values were replaced with an empty string
print(df.isnull().sum())
# After evaluating the value counts, I noticed that "selftext" has two values that are equivalent to null values. These values were also replaced with en empty string. The values and number of instances were the following:
# - deleted: 100
# - removed: 6,049
# +
removing_values = ['[removed]',
'[deleted]',
]
df.replace(to_replace = removing_values,
value ="",
inplace=True)
# -
# The evaluation into the value counts also revealed a handful of repeated posts. Subreddit writers will often publish the same thread on a periodic bases (weekly, daily, etc.). My concern is that the repeated posts will influence the model in a way that reduces its predicability; however, I decided to leave the posts in the data to more accurately reflect the Reddit environment. See below for the top three most frequent examples.
print(df.selftext.value_counts()[1:4])
# ### Creating an All Text Column
df['all_text'] = df.title + ' ' + df.selftext
# ### Changing Target Variable to 0/1
# bodyweight fitness = 0
# powerlifting = 1
df.subreddit = df.subreddit.map({'bodyweightfitness':0,
'powerlifting':1})
# ### Natural Language Processing
# To prepare the data for the model the equations below will accomplish the following:
# - Change all words to lowercase
# - Remove numbers
# - Remove the reddit names
# - Remove website url
# - Remove English stop words
# +
nltk.download('stopwords')
corpus = []
words = set(nltk.corpus.words.words())
for i in range(0, 20_000):
text = re.sub('[^a-zA-Z]', ' ', df['all_text'][i])
text = text.lower()
pattern = r"http\S+"
text = re.sub(pattern, "", text)
text = re.sub('(bodyweight)|(powerlifting)', "", text)
text = text.split()
ps = PorterStemmer()
text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]
text = ' '.join(text)
if i % 2000 == 0:
print(f'Post {i} of 20000')
corpus.append(text)
# Code Reference:
# remove words -- https://www.kite.com/python/answers/how-to-use-re.sub()-in-python
# remove punctuation --> https://stackoverflow.com/questions/18429143/strip-punctuation-with-regex-python/50985687
# stop words and list comprehension --> https://towardsdatascience.com/build-and-compare-3-models-nlp-sentiment-prediction-67320979de61
# remove youtube url websites - https://www.youtube.com/watch?v=O2onA4r5UaY
# porter stemmer guidance --> https://pythonprogramming.net/stemming-nltk-tutorial/
# list comprehension --> https://www.learnpython.dev/03-intermediate-python/20-advanced-looping/30-other-comprehensions/
# join python list --> https://www.w3schools.com/python/ref_string_join.asp
# -
# Save the scrubbed list the 'all_text' column
text = pd.DataFrame(corpus)
df['all_text'] = text
# ### Vectorize the Data
# I vectorized the data in two way: Tf-idf Vectorizer and CountVectorizer. Both methods have a min_df=3 and use stop words.
# +
tvec = TfidfVectorizer(min_df=3,stop_words='english')
tvec_title = tvec.fit(df['all_text'])
data_df_tvec = pd.DataFrame(tvec.transform(df['all_text']).todense(),
columns=tvec.get_feature_names())
data_df_tvec['subreddit'] = df['subreddit']
data_df_tvec.head()
# +
cvec = CountVectorizer(min_df=3, stop_words='english')
cvec_title = cvec.fit_transform(df['all_text'])
data_df = pd.DataFrame(cvec_title.toarray(), columns=cvec.get_feature_names())
data_df['subreddit'] = df['subreddit']
data_df.head()
# -
# ### Save DataFrames as CSV Files
# data_df_tvec.to_csv('../datasets/model_ready_data_tvec.csv', index=False)
data_df.to_csv('../datasets/model_ready_data.csv', index=False)
# df.to_csv('../datasets/cleaned_data.csv', index=False)
| code/Data_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sudarshansb143/Data-Pipelines/blob/master/titanic_using_logistic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="g8PFtjcSmLRw" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# + id="dQVE-WI-nIMl" colab_type="code" colab={}
df =pd.read_csv("titanic_data.csv")
# + id="AETg_6dmnSh8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="8b072168-660a-4a0d-8ca6-a4fa37f21d47"
df.head()
# + id="7HjDZ4PZnTck" colab_type="code" colab={}
df.head()
y = df.Survived
# + id="X8Rj6fxHni6e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69b920f3-a21e-491e-e77f-d19c84d5e763"
df.shape
# + id="w9R1E0s_nnZU" colab_type="code" colab={}
df.drop(columns= ["Name", "Ticket", "Cabin"], inplace = True )
# + id="25kYjciunvob" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="8101697a-b19e-4056-b890-3f25a752d57a"
df.head()
# + id="b3TGSkYhnwhb" colab_type="code" colab={}
df.drop("PassengerId", axis = 1, inplace = True)
# + id="CZ_OeAJBn6F2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="748b4253-92e3-4863-b3ba-77d3e5d86e56"
df.Parch.value_counts()
# + id="sdllzSnFn75d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="f084e50d-7632-4d49-d7f2-9b62798f3800"
df.SibSp.value_counts()
# + id="bN9bZpMPoHjd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="7f919e01-2748-4044-d1b9-ae5ebaa9c591"
df.isna().sum()
# + id="rIl8EyHioKs1" colab_type="code" colab={}
df.Sex.replace({"male": 1, "female" : 0}, inplace = True)
# + id="RlPKmD21oco5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="b8703964-e9c8-43d4-db71-622217853c14"
df.head()
# + id="z8JeH2i-od_R" colab_type="code" colab={}
df.Age = df.Age.fillna(df.Age.median())
# + id="CF8TQ8FqolKU" colab_type="code" colab={}
df.Embarked.fillna("S", inplace = True)
# + id="QIixudxvqNw2" colab_type="code" colab={}
df.isna().sum()
df = pd.get_dummies(df, drop_first = True)
# + id="2OPNhe3IqT0d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 658} outputId="b809d3e1-29cd-4ce9-ff70-7036e017b348"
import seaborn as sns
sns.set()
plt.figure(figsize=(10,10))
sns.heatmap(df.corr(), annot = True, linewidths = 2)
plt.show()
# + id="Gf7-czh6qgR9" colab_type="code" colab={}
df = df[["Pclass", "Sex", "Fare", "Embarked_Q"]]
# + id="WYv5OanBrE4u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="0856da4c-5dc1-4f11-e32b-0a9b5042dc2e"
df.head()
# + id="jv349OdQrIgd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="07df977a-74b4-4d25-dd27-2cdf4dec2f47"
df.head()
# + id="pBIOyeHRrhrV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="c0c8bb7c-f95e-491e-cb9a-007805407e6a"
y
# + id="4SsI93kArjEk" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split (df, y, test_size = 0.2, random_state = 42, stratify = y)
# + id="8mbOr04brsQt" colab_type="code" colab={}
# Scaling the data
scaler = StandardScaler()
X_train_scale = scaler.fit_transform(X_train)
X_test_scale = scaler.transform(X_test)
# + id="GdWTBjksr5-l" colab_type="code" colab={}
# Fitting the default Logistic Regression
lr = LogisticRegression()
# + id="2Uaz1yIGsAJE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="e0a6eb69-2b37-44cd-aec2-35fae6e7bf83"
lr.fit(X_train_scale, y_train)
# + id="AUXEUvH3sEUF" colab_type="code" colab={}
y_pred = lr.predict(X_test_scale)
# + id="cwRWPZ8ksKLk" colab_type="code" colab={}
from sklearn.metrics import classification_report, confusion_matrix
# + id="Sndj8wqFsO50" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="59402375-4581-4617-ccaa-a8c079d3a2d9"
print (classification_report(y_test, y_pred))
# + id="eaG3dHMmsRFE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="bcdbc1ec-953f-4040-8d7b-5c7aa1e35a31"
print(confusion_matrix(y_test, y_pred))
# + id="Fh1Io7L1sZYc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="850ec62f-b26c-4481-c879-2d20580151b8"
# cross validation
from sklearn.model_selection import cross_val_score
score = cross_val_score(lr, X_train_scale, y_train, cv=5)
print(score)
# + id="YKLKtwsas5W8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="09ba1a9a-707b-4e86-98b2-f97fa50c9c84"
# Tuning model
param = {
'C': [0.001, 0.01, 0.1, 1, 10, 100]
}
from sklearn.model_selection import GridSearchCV
gsc = GridSearchCV(lr, param, cv=5)
gsc.fit(X_train_scale, y_train)
# + id="kvWabXEttXkX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3198acd2-5da5-4b3b-8275-84bc9215f23a"
gsc.best_params_
# + id="BL_9VYbota1c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6c0512ed-41fe-4f8e-9a6f-b8a8ab163899"
round(gsc.best_score_ * 100, 2)
# + id="SG-6PIZttdV0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6dced464-a770-4f80-d6bf-aa364cf7c841"
print("Training Score", lr.score(X_train_scale, y_train))
# + id="tprG7_agt8Z-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2f78668f-e568-4582-e81e-89e53a344c50"
print("Testing Score", lr.score(X_test_scale, y_test))
# + id="cDAtQWvNuEzt" colab_type="code" colab={}
# Reviewing data
# + id="G45le-45vthl" colab_type="code" colab={}
num_col = df.drop("Embarked_Q", axis=1)
# + id="FQDmbE-fvz8d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 553} outputId="da51356b-22f5-4582-ee98-81b0886c0876"
num_col = df[["Fare", "Pclass"]]
for i in num_col.columns.to_list():
sns.set()
sns.distplot(num_col[i])
plt.show()
# + id="MFhMEmOav4xM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="bb575982-540d-405c-bcf8-5af295902225"
sns.distplot(df.Fare)
# + id="mjFmp6dPxzob" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="98b6b847-4170-441c-ab65-8b1c05b093cb"
temp = np.log1p(df.Fare)
sns.distplot(temp)
# + id="YYw77P6hx8NH" colab_type="code" colab={}
| titanic_using_logistic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Explore ABCD medication dataframe
# +
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
from definitions import RAW_DATA_DIR
# -
medication_df_info = pd.read_csv(
RAW_DATA_DIR / 'medsy01.txt', sep='\t', nrows=1
)
medication_df = pd.read_csv(
RAW_DATA_DIR / 'medsy01.txt', sep='\t', skiprows=(1, 1)
)
medication_df = medication_df[
medication_df['eventname'] == 'baseline_year_1_arm_1'
]
print(f'Shape: {medication_df.shape}')
print('Columns:')
for col in medication_df.columns:
print(f'\t{col}')
medication_df_info
# `med{i}_rxnorm_p` refers to the medication name. From the NIMH Data Dictionary:
#
# ```
# Medication Name (please choose the formulation that most closely matches the medication taken by your child; no need to include dosage)
# ```
#
# ```
# This variable may include all variants available in the Bioportal RxNorm database (https://bioportal.bioontology.org). Please refer to this database for additional information on trade/generic names and formulations.
# ```
#
# Example values for the first medication:
medication_df['med1_rxnorm_p'].unique()[:20]
# Over the counter medications are encoded in the same way by `med_otc_1_rxnorm_p`. Here, we want to exclude all subjects who take psychatric medicines.
#
# First, for each subject, create a list of all medicines taken.
df_temp = medication_df[
['src_subject_id'] + \
[f'med{i+1}_rxnorm_p' for i in range(15)] + \
[f'med_otc_{i+1}_rxnorm_p' for i in range(15)]
]
df_temp.index = df_temp['src_subject_id']
df_temp = df_temp.drop(columns=['src_subject_id'])
df_temp.head()
med_dict = df_temp.to_dict('index')
for subject_id in med_dict.keys():
med_list = []
for _, med_name in med_dict[subject_id].items():
if type(med_name) == str:
med_list.append(med_name)
med_dict[subject_id] = med_list
med_dict
# Now we will create a list of all psychiatric medicines included in the ABCD study to later exclude all subjects taking any of these medicines.
# First, create list of all medicines in the ABCD study
unique_meds = np.unique(sum(list(med_dict.values()), []))
for m in unique_meds:
print(m)
| notebooks/exploratory/0.1-rg-explore_medsy01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Map Training Analysis
# *written by <NAME>*
# #### Import Dependencies
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy
import scipy.io as spio
from scipy.spatial import distance
import ezodf
from matplotlib.patches import Arrow, Circle
from PIL import Image
import itertools
import ptitprince as pt
from __future__ import print_function
from statsmodels.compat import lzip
from statsmodels.stats.anova import AnovaRM
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
from scipy import stats
# #### Define Paths
# These two fields need to be set for each user individually. Set mapPath to your path leading to your map training data and taskPath to lead to your alignment task results.
#mapPath = "/Users/ingen/Dropbox/VR alignment/bachelor_master_Arbeiten/Laura/scripts/viewed_data/"
#taskPath = "/Users/ingen/Dropbox/VR alignment/bachelor_master_Arbeiten/Laura/scripts/over_all_subjects/trials_mat/"
#mapPath = "/Users/ingen/Dropbox/Project Seahaven/Tracking/MapResults/"
#taskPath = "/Users/ingen/Dropbox/Project Seahaven/Tracking/TaskPerformance/"
mapPath = "C:/Users/vivia/Dropbox/Project Seahaven/Tracking/MapResults/"
taskPath = "C:/Users/vivia/Dropbox/Project Seahaven/Tracking/TaskPerformance/"
# #### File Dependencies:
#
# For this script you need the following files:
# * #.ods from Map training (location specified by mapPath)
# * AlignmentVR_SubjNo_#.mat from Task (location specified by taskPath)
#
# In the same folder as this script should be:
#
# * complete_list_houses.txt (for house angles)
# * HouseList.txt (for house coordinates)
# * Seahaven alingment project.csv -> download from google spreadsheet with list of all recordings & discard marks
# ## Map Training
#
# #### Function to Check Usability of a Subject (Discarded? Right Condition?)
def checkUsability(SNum,Rep=False):
overview = pd.read_csv("./Seahaven alingment project.csv")
if (overview.loc[overview['Subject#']==SNum].empty):
#print(str(SNum)+" not in list.")
if Rep == True:
return False," "
else:
return False
else:
if (overview.loc[overview['Subject#']==SNum]['Discarded']=='yes').bool():
#print(str(SNum)+" discarded.")
if Rep == True:
return False," "
else:
return False
if Rep==False:
if (overview.loc[overview['Subject#']==SNum]['Measurement#']>1).bool():
#print(str(SNum)+" repeated measure.")
return False
if Rep==True:
if (overview.loc[overview['Subject#']==SNum]['Repeated'].isnull()).bool():
return False," "
else:
return True,(overview.loc[overview['Subject#']==SNum]['Repeated']).values[0]
return True
# #### Load All Map Training Data Into a DataFrame - For .osc Files
# Only run once! If you already have ClickStatsAll.csv saved just load this file.
# +
allFiles = os.listdir(mapPath)
g = open("./complete_list_houses.txt","r")
allHouses = []
for i in g:
allHouses.append(str(int(i.split('_',1)[0])))
AllDf = pd.DataFrame(allHouses,columns=['House'])
for e in allFiles:
start = 0
lastI = 0
lastV = 0
if e.endswith(".ods") and checkUsability(int(e.split('.',1)[0])):
doc = ezodf.opendoc(mapPath+e)
sheet = doc.sheets[0]
for i, row in enumerate(sheet.rows()):
for cell in row:
if cell.value=='Mouse Click Stats:':#only get mouse click stats, not hovering
start = i
Subjectdf = pd.DataFrame(columns=['House',str(int(e.split('.',1)[0]))])
if start>0 and start<i-1 and cell.value!=None:
if lastI==i:
#print(str(int(lastV.split('_',1)[0])))#.split('\t',1)[1].split('\n',1)[0])
Subjectdf = Subjectdf.append({'House': str(int(lastV.split('_',1)[0])),str(e.split('.',1)[0]):int(cell.value)}, ignore_index=True)
lastI = i
lastV = cell.value
AllDf = AllDf.merge(Subjectdf,on='House',sort=True,how='outer')
AllDf = AllDf.fillna(int(0))
AllDf = AllDf.set_index('House')
AllDf = AllDf[~AllDf.index.duplicated(keep='first')]
# -
# #### Save Table in Excel Format
AllDf.to_csv('Results/ClickStatsAll.csv')
# ### Take a Look at Map Training Data
# #### Load Table
AllDf = pd.read_csv('Results/ClickStatsAll.csv').set_index('House')
len(AllDf.columns)# following analysis is of 64 subjects
# #### Excerpt from Data Table
AllDf.head()
# #### Overall Statistics
AllDf.describe()
# #### Data Distributions
plt.figure(figsize=(15,5))
sns.distplot(AllDf.mean(axis=1),norm_hist=False,kde=False,color='royalblue')# if you don't want pdf, set norm_hist=False,kde=False
plt.plot([np.mean(AllDf.mean(axis=1)), np.mean(AllDf.mean(axis=1))], [0, 41], linewidth=2)
plt.legend(['mean: '+str(np.mean(AllDf.mean(axis=1)))[:4],'distribution'],fontsize=15)
plt.title("Distribution of Mean Number of Clicks on one House over Subjects",fontsize=20)
plt.xlabel('Mean Number of Clicks on one House',fontsize=15)
plt.ylabel('Subject Count',fontsize=15)
plt.show()
#plt.savefig('Results/MeanClickDistNoTitle.png')
plt.figure(figsize=(15,5))
sns.distplot(AllDf.sum(axis=1),color='royalblue')
plt.plot([np.mean(AllDf.sum(axis=1)), np.mean(AllDf.sum(axis=1))], [0, 0.0037], linewidth=2)
plt.legend(['mean: '+str(np.mean(AllDf.sum(axis=1)))[:4],'distribution'],fontsize=15)
plt.title("Distribution of Overall Number of Clicks on one House over Subjects",fontsize=20)
plt.xlabel('Mean Number of Clicks on one House',fontsize=15)
plt.ylabel('Probability Density',fontsize=15)
plt.show()
#plt.savefig('Results/HouseClickDistNoTitle.png')
# This means an average amount of 271/64 = 4.23 on each house
plt.figure(figsize=(15,5))
sns.distplot((AllDf > 0).astype(int).sum(axis=0),norm_hist=False,kde=False,color='royalblue')
plt.plot([np.mean((AllDf > 0).astype(int).sum(axis=0)), np.mean((AllDf > 0).astype(int).sum(axis=0))], [0, 17], linewidth=2)
plt.yticks(np.arange(0, 21, step=5))
plt.legend(['mean: '+str(np.mean((AllDf > 0).astype(int).sum(axis=0)))[:4],'distribution'],fontsize=15)
plt.title("Distribution of Number of Houses That Were Looked at",fontsize=20)
plt.xlabel('Number of Houses That Were Looked at by a Subject',fontsize=15)
plt.ylabel('Subject Count',fontsize=15)
plt.show()
#plt.savefig('Results/ClickedDistAbsCountNoTitle.png')
# #### Plot Whole Click Distribution -> Any (Ir)regularities?
plt.figure(figsize=(15,35))
sns.heatmap(AllDf)
plt.title('Number of Clicks on Each House by Each Subject',fontsize=20)
plt.ylabel('House Number',fontsize=15)
plt.xlabel('Subject Number',fontsize=15)
#plt.show()
plt.savefig('Results/ClickHeatmap.png')
# ## Task Performance
#
# #### Load Data of Task Performance (.mat Files) into DataFrame
def mat_to_py(AlignmentPath,number):
'''
converts mat struct with task results into (numpy) array
also adds extra column with information whether trial was correct or wrong
conditions = ["Absolute - 3s ","Absolute - inf","Relative - 3s ","Relative - inf","Pointing 3s ","Pointing - inf"]
'''
path = AlignmentPath+"/AlignmentVR_SubjNo_"+number+".mat"
mat_contents = spio.loadmat(path)
type_array = []
for i,cond_1 in enumerate(["Absolute", "Relative","Pointing"]):
for j,cond_2 in enumerate(["Trial_3s", "Trial_Inf"]):
trials_array = []
for line in range(len(mat_contents['Output'][0][0][cond_1][cond_2][0][0])):
value_array = []
for column in range(len(mat_contents['Output'][0][0][cond_1][cond_2][0][0][line][0])):
value = mat_contents['Output'][0][0][cond_1][cond_2][0][0][line][0][column][0][0]
value_array.append(value)
# check if trial is correct(true or false
value_array.append(value_array[-1] == value_array[-3])
trials_array.append(value_array)
type_array.append(trials_array)
return np.array(type_array)
conditions = ["Absolute - 3s ","Absolute - inf","Relative - 3s ","Relative - inf","Pointing 3s ","Pointing - inf"]
vp_nums = list(AllDf)
AllResults = np.zeros((6,len(vp_nums),36))#AllResults[condition][subjectNum][Trial]
AllHouses = np.zeros((6,len(vp_nums),36))
LeastClickHouse = np.zeros((6,len(vp_nums),36))
for i,e in enumerate(vp_nums):
try:
m = mat_to_py(taskPath,e)
for c in range(6):
condperf = []
house = []
lchouse = []
for t in range(36):
condperf.append(int(m[c][t][-1]))
#print(m[c][t][0])
house.append(str(m[c][t][0]))
if c<2:#absolute condition -> only one house, take this one
lchouse.append(str(m[c][t][0]))
else:#relative or pointing condition -> look if prime or target had more clicks, pick house with least clicks
if AllDf.loc[int(m[c][t][0])][e]<AllDf.loc[int(m[c][t][1])][e]:
lchouse.append(str(m[c][t][0]))
else:
lchouse.append(str(m[c][t][1]))
AllResults[c][i] = condperf
AllHouses[c][i] = house
LeastClickHouse[c][i] = lchouse
except:
print(str(e)+" Not in folder")
# ### Create Performance Matrix and Save as .cvs File
performances = np.zeros((6,len(AllDf.columns)))#pd.DataFrame()
vpN = pd.DataFrame(vp_nums,columns=['vp_number'])
for cond in range(6):
performances[cond] = np.mean(AllResults[cond],axis=1)
p = pd.DataFrame(np.transpose(performances))
p.columns = conditions
p = vpN.join(p).set_index('vp_number')
#p.to_csv('Results/MapPerformances.csv')#comment in to save file
p.describe()
# ### Put Data into DataFrame
TaskList = ['Absolute','Absolute','Relative','Relative','Pointing','Pointing']
CondList = ['3s','inf','3s','inf','3s','inf']
AllPerformances = pd.DataFrame(columns=['Task','Condition','Performance','Subject'])
for sj in list(p.index):
for i,c in enumerate(conditions):
AllPerformances = AllPerformances.append({'Task':TaskList[i],'Condition':CondList[i],'Performance':p.loc[sj][c],'Subject':sj}, ignore_index=True)
# ### Visualize Overall Statistics
#group tasks
#color by time condition
fig,ax = plt.subplots(figsize=(10,7))
plt.plot([-5,10],[0.5,0.5],':',color='black', linewidth=5)
sns.boxplot(data=AllPerformances,hue='Condition',x='Task',y='Performance', palette=["red", "royalblue"],linewidth=2.5)
ax.set_xticklabels(['Absolute','Relative','Pointing'],fontsize=15)
ax.set_ylim((0,1))
plt.legend(fontsize=20,loc=4)
#plt.title('Performance of Subjects in the Tasks',fontsize=25)
plt.ylabel('Performance (%)',fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5,dtype=int),fontsize=15)
plt.xlabel("Task",fontsize=20)
plt.show()
#plt.savefig('Results/TaskPerformancesGrouped.png', bbox_inches='tight')
# +
#Plotting adapted from https://peerj.com/preprints/27137v1/
ax = pt.RainCloud(data=AllPerformances,hue='Condition',x='Task',y='Performance', palette=["red", "royalblue"],bw = 0.2,
width_viol = .5, figsize = (10,7),pointplot = False, alpha = .85, dodge = True, move = 0.2)
ax.set_xticklabels(['Absolute','Relative','Pointing'],fontsize=15)
#ax.legend(['3s','inf'],fontsize=20,loc=1)
plt.title('Performance of Subjects in the Tasks',fontsize=25)
plt.ylabel('Performance (%)',fontsize=20)
plt.xlabel("Task",fontsize=20)
plt.yticks(np.linspace(0.25,0.75,3),np.linspace(25,75,3),fontsize=15)
plt.show()
#plt.savefig('Results/TaskPerformancesRainCloud.png', bbox_inches='tight')
# +
ax = pt.RainCloud(data=AllPerformances[AllPerformances['Condition']=='inf'],x='Task',y='Performance', palette=["royalblue"],bw = 0.2,
width_viol = .5, figsize = (10,7),pointplot = False, alpha = .85, dodge = True, move = 0.2)
plt.plot([-5,10],[0.5,0.5],':',color='black', linewidth=3)
ax.set_xticklabels(['Absolute','Relative','Pointing'],fontsize=15)
#ax.set_ylim((0,1))
#ax.legend(['3s','inf'],fontsize=20)
#plt.title('Performance of Subjects in the Tasks - Infinite',fontsize=25)
plt.ylabel('Performance (%)',fontsize=20)
plt.yticks(np.linspace(0.25,0.75,3),np.linspace(25,75,3,dtype=int),fontsize=15)
plt.xlabel("Task",fontsize=20)
plt.show()
#plt.savefig('Results/TaskPerformancesRainCloud_Infinite_NoTitle.png', bbox_inches='tight')
# -
# ## Repeated Measure ANOVA for Tasks and Conditions
anovarm = AnovaRM(AllPerformances,'Performance','Subject',within=['Task','Condition'])
fit = anovarm.fit()
fit.summary()
# ## Factorial ANOVA (One Way Repeated Measure) on Infinite Conditions
infPerformances = AllPerformances[AllPerformances['Condition']=='inf']
anovarm = AnovaRM(infPerformances,'Performance','Subject',within=['Task'])
fit = anovarm.fit()
fit.summary()
# ## Post-Hoc Paired T-Test on Infinite Conditions
print("Absolute - Relative: "+str(stats.ttest_rel(infPerformances[infPerformances['Task']=='Absolute']['Performance'],infPerformances[infPerformances['Task']=='Relative']['Performance'])))
print("Absolute - Pointing: "+str(stats.ttest_rel(infPerformances[infPerformances['Task']=='Absolute']['Performance'],infPerformances[infPerformances['Task']=='Pointing']['Performance'])))
print("Relative - Pointing: "+str(stats.ttest_rel(infPerformances[infPerformances['Task']=='Relative']['Performance'],infPerformances[infPerformances['Task']=='Pointing']['Performance'])))
# #### Performance Different from chance?
stats.ttest_1samp(newDF['Performance'], 0.5)
# #### Difference in Performance Between Inf and 3 Sec Condition?
stats.ttest_ind(newDF['Performance'][newDF['Condition']=='inf'], newDF['Performance'][newDF['Condition']=='3s'])
# #### Fit Linear Regression Model
perf_model = ols("Performance ~ Condition + Task", data=newDF).fit()
print(perf_model.summary())
# # Performance in Relation to Clicks
# ### Create DataFrame
AllClickPerf2 = pd.DataFrame(columns = {'numClicks','Performance','Subject'})
conds = [1,3,5]
for c in conds:#range(6):
for i,s in enumerate(vp_nums):
for t in range(36):
house = LeastClickHouse[c][i][t]#AllHouses[c][i][t]
#print(int(house))
numviews = AllDf.loc[int(house)][s]
AllClickPerf2 = AllClickPerf2.append({'numClicks':numviews,'Performance':AllResults [c][i][t],'Subject':float(s)}, ignore_index=True)
# ### One Point for Each Subject-NumClick Combination Averaged Over Tasks and Trials
# Same procedure as explained in Lauras Bachelors Thesis.
grouped2 = AllClickPerf2.groupby(['Subject','numClicks'], as_index=False)['Performance'].mean()
grouped2.to_csv('Results/SubjectClickPerfSorted.csv')
grouped2 = pd.read_csv('Results/SubjectClickPerfSorted.csv')
plot = sns.lmplot(x='numClicks',y='Performance',data = grouped2,height=7,aspect=2,scatter_kws={"s": 30},x_jitter=.03,order=1,x_estimator=np.mean,fit_reg=True)
plt.title('Performance in Relation to Number of Clicks - Infinite - Averaged over Tasks and Trials for Each Number of Clicks',fontsize=20)
plt.xlabel('Number of Clicks',fontsize=15)
plt.ylabel('Average Performance in %',fontsize=15)
plt.subplots_adjust(top=0.9)
plt.show()
#plt.savefig('Results/ClickPerfInf_NumCAvg.png', bbox_inches='tight')
# ### Plot Using Log(Clicks)
grouped2['numClicks'] = np.log(grouped2['numClicks']+1)
plt.figure(figsize=(15,10))
sns.lmplot(x='numClicks',y='Performance',data = grouped2[grouped2['numClicks']<100], height=7,aspect=2,palette=["royalblue"],x_jitter=.09,lowess=False)
#plt.title('Performance in Relation to Number of Clicks - Infinite - Averaged over Tasks and Trials for Each Number of Clicks',fontsize=20)
plt.xlabel('Log(Number of Clicks)',fontsize=25)
plt.ylabel('Performance (%)',fontsize=25)
#plt.xticks(np.linspace(0,60,7),fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5),fontsize=20)
plt.subplots_adjust(top=0.9)
plt.show()
#plt.savefig('Results/ClickPerf_TTAvg_NoTitle_All.png', bbox_inches='tight')
# ### Linear Regression Model Based on Performance ~ Number of Clicks
clickperf_model = ols("Performance ~ numClicks", data=grouped2).fit()
print(clickperf_model.summary())
# #### Pearson Correlation:
# (correlation coefficient, p-value)
scipy.stats.pearsonr(grouped2['Performance'], grouped2['numClicks'])
# ### Weighted Linear Regression
# Weighted by number of trials in one data point
groupedWeighted = AllClickPerf2.groupby(['Subject','numClicks'], as_index=False).agg(['mean', 'count'])
groupedWeighted.reset_index(inplace=True)
weighted2 = pd.DataFrame(groupedWeighted.to_records())
weighted2.columns = ['Ix','Subject','numClicks','Performance','Count']
weighted2.head()
from statsmodels.formula.api import wls
WLS = wls("Performance ~ numClicks", data=weighted2,weights=np.array(1./weighted2['Count'])).fit()
WLS.summary()
# ### Plot it:
grouped2 = pd.read_csv('Results/SubjectClickPerfSorted.csv')
grouped2['numClicks'] = np.log(grouped2['numClicks']+1)
sns.lmplot(x='numClicks',y='Performance',data = grouped2[grouped2['numClicks']<100], height=7,aspect=1.4,palette=["royalblue"],x_jitter=.03,lowess=False)
#plt.title('Performance in Relation to Number of Clicks - Infinite - Averaged over Tasks and Trials for Each Number of Clicks',fontsize=20)
plt.plot([0, 4], [0.4958, 0.4958+0.018], linewidth=3,color='orange',linestyle='-')
plt.xlabel('Log(Number of Clicks)',fontsize=25)
plt.ylabel('Performance (%)',fontsize=25)
plt.xticks(np.linspace(0,4,5),fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5),fontsize=20)
plt.legend(['Linear Regression','Weighted Linear Regression'],fontsize=15)
plt.xlim([0,4])
plt.subplots_adjust(top=0.9)
plt.show()
#plt.savefig('Results/ClickPerf_TTAvg_NoTitle_All.png', bbox_inches='tight')
# ## Spatial Coverage of Seahaven
SeahavenMap = Image.open('map5.png')
coordinates = open("HouseList.txt","r")
coords = pd.DataFrame(columns={'House','x','y'})
for co in coordinates:
x = float(co.split(':',1)[1].split(';',1)[0])
y = float(co.split(';',1)[1])
house = str(co.split(':',1)[0])
coords = coords.append({'House':house,'x':x,'y':y},ignore_index=True)
coords = coords.set_index('House').sort_index()
overallClicks = np.sum(AllDf,axis=1)
SeahavenMap = Image.open('map5.png')
coordinates = open("HouseList.txt","r")
coords = pd.DataFrame(columns={'House','x','y'})
for co in coordinates:
x = float(co.split(':',1)[1].split(';',1)[0])
y = float(co.split(';',1)[1])
house = str(co.split(':',1)[0])
coords = coords.append({'House':house,'x':x,'y':y},ignore_index=True)
coords = coords.set_index('House').sort_index()
SJNumClicks = np.sum(AllDf>0,axis=1)
# #### Color Houses by Amount of Clicks (Green-Few, Red-Many, Black-Not Included)
# +
fig = plt.figure(figsize=(15,15))
SeahavenMap = SeahavenMap.resize((450,500))
ax = plt.subplot2grid((10, 10), (0, 0), colspan=9,rowspan=10)
plt.imshow(SeahavenMap,aspect = 'equal')
cmap = plt.cm.get_cmap('Greens')
a=np.outer(np.arange(0,1,0.01),np.ones(3))
for i in list(coords.index.values):
try:
clicks = overallClicks.loc[int(i)]
rgba = cmap((clicks-min(overallClicks))/(max(overallClicks)-min(overallClicks)))
ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=(rgba)))
#ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=((clicks-min(overallClicks))/(max(overallClicks)-min(overallClicks)),1-(clicks-min(overallClicks))/(max(overallClicks)-min(overallClicks)),0)))
except:
ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=(0,0,0)))
#plt.title('Overall Number of Clicks During Map Training',fontsize=20)
ax2 = plt.subplot2grid((10, 10), (0, 9),rowspan=10)
plt.imshow(a,aspect='auto',cmap='Greens',origin="lower")
ax2.get_xaxis().set_ticks([])
ax2.get_yaxis().set_ticks(np.linspace(0,99,10))
ax2.get_yaxis().set_ticklabels(np.around(np.linspace(min(overallClicks)/len(AllDf.columns),max(overallClicks)/len(AllDf.columns),10),2))
ax2.yaxis.tick_right()
ax2.set_ylabel("Average Number of Clicks on House",rotation=270, fontsize=15, labelpad=20)
ax2.yaxis.set_label_position("right")
plt.show()
#plt.savefig('Results/MapClicks.png', bbox_inches='tight')
# +
import math
fig = plt.figure(figsize=(15,15))
SeahavenMap = SeahavenMap.resize((450,500))
ax = plt.subplot2grid((10, 10), (0, 0), colspan=9,rowspan=10)
plt.imshow(SeahavenMap,aspect = 'equal')
cmap = plt.cm.get_cmap('Greens')
a=np.outer(np.arange(0,1,0.01),np.ones(3))
for i in list(coords.index.values):
try:
clicks = SJNumClicks.loc[int(i)]
rgba = cmap((clicks-min(SJNumClicks))/(max(SJNumClicks)-min(SJNumClicks)))
ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=(rgba)))
#ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=((clicks-min(overallClicks))/(max(overallClicks)-min(overallClicks)),1-(clicks-min(overallClicks))/(max(overallClicks)-min(overallClicks)),0)))
except:
continue
#ax.add_patch(Circle((coords['y'].loc[i]-535,coords['x'].loc[i]-180), radius=5, color=(0,0,0)))
#plt.title('Overall Number of Subjects Looking at Respective House During Map Training',fontsize=20)
ax2 = plt.subplot2grid((10, 10), (0, 9),rowspan=10)
plt.imshow(a,aspect='auto',cmap='Greens',origin="lower")
ax2.get_xaxis().set_ticks([])
ax2.get_yaxis().set_ticks(np.linspace(0,99,10))
ax2.get_yaxis().set_ticklabels(np.linspace((min(SJNumClicks)/len(AllDf.columns))*100,(max(SJNumClicks)/len(AllDf.columns))*100,10,dtype=int))
ax2.yaxis.tick_right()
ax2.set_ylabel("Percentage of Subjects That Have Seen This House",rotation=270, fontsize=15, labelpad=20)
ax2.yaxis.set_label_position("right")
plt.show()
#plt.savefig('Results/MapSujClicks.png', bbox_inches='tight')
# -
# ## Angular Differences
f2 = open("complete_list_houses.txt","r")
degreeDF = pd.DataFrame(columns={'Subject','Condition','AngularDiff','Performance'})
angles = {}
for line in f2:
house = int(line.split('_',1)[0].split('n',1)[0])
angle = int(line.split('_',1)[1].split('n',1)[0])
angles[house] = angle
vp_nums = list(AllDf)
degree_30 = np.zeros((6,2))
degree_60 = np.zeros((6,2))
degree_90 = np.zeros((6,2))
degree_120 = np.zeros((6,2))
degree_150 = np.zeros((6,2))
degree_180 = np.zeros((6,2))
degrees = []
for i,e in enumerate(vp_nums):
m = mat_to_py(taskPath,e)
for cond in range(6):
for trial in range(36):
degree = 0
if cond < 2 or cond >3: # abs und poi
degree = abs(int(m[cond][trial][-5])-int(m[cond][trial][-6])) # save angular diff in var
else: # rel
degree = abs(angles[m[cond][trial][-5]]-angles[m[cond][trial][-6]])
degrees.append(degree)
if degree <= 30 or degree >= 330:
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':30,'Performance':float(m[cond][trial][-1])},ignore_index=True)
degree_30[cond][0] += 1 # increment counter for overall trial with 30 degree diff
if m[cond][trial][-1]:
degree_30[cond][1] += 1 # increment counter for correct trial with 30 degree diff
elif degree <= 60 or degree >= 300:
degree_60[cond][0] += 1
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':60,'Performance':float(m[cond][trial][-1])},ignore_index=True)
if m[cond][trial][-1]:
degree_60[cond][1] += 1
elif degree <= 90 or degree >= 270:
degree_90[cond][0] += 1
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':90,'Performance':float(m[cond][trial][-1])},ignore_index=True)
if m[cond][trial][-1]:
degree_90[cond][1] += 1
elif degree <= 120 or degree >= 240:
degree_120[cond][0] += 1
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':120,'Performance':float(m[cond][trial][-1])},ignore_index=True)
if m[cond][trial][-1]:
degree_120[cond][1] += 1
elif degree <= 150 or degree >= 210:
degree_150[cond][0] += 1
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':150,'Performance':float(m[cond][trial][-1])},ignore_index=True)
if m[cond][trial][-1]:
degree_150[cond][1] += 1
else:
degree_180[cond][0] += 1
degreeDF = degreeDF.append({'Subject':e,'Condition':cond,'AngularDiff':180,'Performance':float(m[cond][trial][-1])},ignore_index=True)
if m[cond][trial][-1]:
degree_180[cond][1] += 1
allDegs = [degree_30,degree_60,degree_90,degree_120,degree_150,degree_180]
# ### Plot as Distribution:
# One dot = average performance of one participant over all trials with this orientation
#
# Plot like num click above
groupeddegreeInf = groupeddegree[(groupeddegree['Condition']==1)|(groupeddegree['Condition']==3)|(groupeddegree['Condition']==5)]
groupeddegreeAllInf = groupeddegreeInf.groupby(['Subject','AngularDiff'], as_index=False)['Performance'].mean()
groupeddegreeAllInf.head()
groupeddegreeAllInf.to_csv('DegreePerformanceInf.csv')#Average performance for each subject - angular difference combination
#over infinite task conditions
groupeddegreeAllInf = pd.read_csv('Results/DegreePerformanceInf.csv')
# ### Now Plot:
#plt.figure(figsize=(10,7))
sns.lmplot(x='AngularDiff',y='Performance',data = groupeddegreeAllInf, height=7,aspect=1.4,palette=["royalblue"],x_jitter=3,order=2)
#plt.title('Performance in Relation to Angular Difference - Infinite \n Averaged over Tasks and Trials with x Angular Difference for Each Subject',fontsize=20)
plt.xlabel('Angular Difference',fontsize=25)
plt.ylabel('Performance (%)',fontsize=25)
plt.xticks(np.linspace(0,180,7),fontsize=20)
plt.xlim(20,190)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5,dtype=int),fontsize=20)
plt.subplots_adjust(top=0.9)
plt.show()
#plt.savefig('Results/AngDiffPerfPoly.png', bbox_inches='tight')
# ### Plot as Box Plot
# +
ax = pt.RainCloud(data=groupeddegreeAllInf,x='AngularDiff',y='Performance', palette=["royalblue"],bw = 0.0,
width_viol = .0, figsize = (10,7),pointplot=True,alpha = 1, dodge = True, move = 0.0)
ax.set_xticklabels(np.linspace(30,180,6,dtype=int),fontsize=15)
#plt.title('Average Performance of Subjects Dependent on Angular Difference of Houses',fontsize=25)
plt.ylabel('Performance (%)',fontsize=20)
plt.xlabel("Angular Difference",fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5,dtype=int),fontsize=15)
#plt.plot([-0.5, 9.5], [0.5291, 0.5291], linewidth=3,color='black',linestyle=':')
plt.plot([-0.5, 9.5], [0.5, 0.5], linewidth=3,color='black',linestyle=':')
plt.scatter(groupeddegreeAllInf['AngularDiff'],poly_2.predict(groupeddegreeAllInf['AngularDiff']), linewidth=3)
plt.show()
#plt.savefig('Results/AngDiffPerfRainCloud_NoTitle.png', bbox_inches='tight')
# -
# Black line = median, Red line = Mean
# ### One Way ANOVA
anovarm = AnovaRM(groupeddegreeAllInf,'Performance','Subject',within=['AngularDiff'])
fit = anovarm.fit()
fit.summary()
poly_2 = smf.ols(formula='Performance ~ 1 + AngularDiff + I(AngularDiff **2)', data=groupeddegreeAllInf).fit()
poly_2.summary()
plt.scatter(groupeddegreeAllInf['AngularDiff'],poly_2.predict(groupeddegreeAllInf['AngularDiff']), linewidth=3)
plt.show()
# ### Linear Regression Model for Angular Differences
angdiffperf_model = ols("Performance ~ AngularDiff", data=groupeddegreeAllInf).fit()
print(angdiffperf_model.summary())
# ## Performance in Relation to Distance Between Houses
# ### No Binning, Average Over Subjects for Each House Combination
# Only run next 3 cells once, then just load the .csv file.
coordinates = open("HouseList.txt","r")
coords = pd.DataFrame(columns={'House','x','y'})
for co in coordinates:
x = float(co.split(':',1)[1].split(';',1)[0])
y = float(co.split(';',1)[1])
house = str(int(co.split(':',1)[0]))
coords = coords.append({'House':house,'x':x,'y':y},ignore_index=True)
coords = coords.set_index('House').sort_index()
vp_nums = list(AllDf)
m1 = mat_to_py(taskPath,vp_nums[0])
houseOrder = []
for c in range(6):
if c>1:
allHouseNum = [x[1] for x in np.array(m1[c])]
sort = np.sort(allHouseNum)
else:
allHouseNum = [x[0] for x in np.array(m1[c])]
sort = np.sort(allHouseNum)
houseOrder.append(list(sort))
# +
conditions = ["Absolute - 3s ","Absolute - inf","Relative - 3s ","Relative - inf","Pointing 3s ","Pointing - inf"]
tasks = ["Relative","Relative","Pointing","Pointing"]
Conds = ["3s","inf","3s","inf"]
DistPerfDF = pd.DataFrame(columns={'Subject','Task','Condition','Distance','Performance','HouseCombination'})
for i,e in enumerate(vp_nums):
try:
m = mat_to_py(taskPath,e)
for c in range(4):
for t in range(36):
h1 = (coords['x'].loc[str(m[c+2][t][0])],coords['y'].loc[str(m[c+2][t][0])])
h2 = (coords['x'].loc[str(m[c+2][t][1])],coords['y'].loc[str(m[c+2][t][1])])
dist = distance.euclidean(h1, h2)
hC = houseOrder[c+2].index(m[c+2][t][1])
DistPerfDF = DistPerfDF.append({'Subject':e,'Task':tasks[c],'Condition':Conds[c],
'Distance':dist,'Performance':float(m[c+2][t][-1]),'HouseCombination':hC},ignore_index=True)
except:
print(str(e)+" Not in folder")
# -
DistPerfDF.to_csv("Results/DistancePerformanceAll.csv")
DistPerfDF = pd.read_csv("Results/DistancePerformanceAll.csv")
group = DistPerfDF.groupby(['HouseCombination','Task','Condition'], as_index=False)['Performance','Distance'].mean()
group.head()
group.to_csv("Results/DistPerfGroupedMean.csv")
group = pd.read_csv("Results/DistPerfGroupedMean.csv")
# ### Plotting:
plt.figure(figsize=(10,7))
sns.regplot(x="Distance", y="Performance", data=group[group['Condition']=="inf"],color='royalblue',ci=95)
#plt.title("Task Performance - Distance Between Houses in Seahaven \n One Point = Task,Condition,House Combination Averaged Over Subjects",fontsize=20)
plt.xlabel("Distance (Unity Units)",fontsize=20)
plt.ylabel("Performance (%)",fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5,dtype=int),fontsize=15)
plt.xticks(np.linspace(0,400,9),fontsize=15)
plt.ylim(0.2,0.8)
plt.xlim(0,380)
#plt.savefig('Results/DistPerfRegression_NoTitle.png', bbox_inches='tight')
plt.show()
distperf_model = ols("Performance ~ Distance", data=group[group['Condition']=="inf"]).fit()
print(distperf_model.summary())
# ## Repeated Measurements
allFiles = os.listdir(mapPath)
conditions = ["Absolute - 3s ","Absolute - inf","Relative - 3s ","Relative - inf","Pointing 3s ","Pointing - inf"]
g = open("./complete_list_houses.txt","r")
allHouses = []
for i in g:
allHouses.append(str(int(i.split('_',1)[0])))
performances = pd.DataFrame(columns=['Subject','Measurement','Condition','Performance'])
for e in allFiles:
if e.endswith(".ods"):
usable,code = checkUsability(int(e.split('.',1)[0]),Rep=True)
if usable:
#print(ord(str(code)[1])-97)
m = mat_to_py(taskPath,(e.split('.',1)[0]))
for c in range(6):
for t in range(36):
performances = performances.append({'Subject': ord(str(code)[1])-97,'Measurement':int(str(code)[0])-1,
'Condition':c,'Performance':int(m[c][t][-1])}, ignore_index=True)
np.unique(performances['Subject'])# List of subjects
performances.to_csv("Results/RepeatedMPerformances.csv")
performances = pd.read_csv("Results/RepeatedMPerformances.csv")
# #### Plot Performances Averaged over 14 Repeated Measure Subjects
performances['Performance'] = performances['Performance'].astype(float)
ax = sns.factorplot(x="Condition", y="Performance", hue="Measurement",data=performances,
size=5, kind="bar", palette="Blues",aspect=2, legend_out = False)
ax.set_xticklabels(conditions,fontsize=12)
ax.set_yticklabels(fontsize=12)
ax.set_xlabels('Condition',fontsize=15)
ax.set_ylabels('Performance',fontsize=15)
l = plt.legend(title="Measurement",fontsize=15)
l.get_texts()[0].set_text('1')
l.get_texts()[1].set_text('2')
l.get_texts()[2].set_text('3')
plt.setp(l.get_title(),fontsize=15)
ax.fig.suptitle('Average Performance in Each Task for Three Measurements',fontsize=15)
plt.show()
#plt.savefig('Results/RepMeasPerf.png', bbox_inches='tight')
repgroup = performances.groupby(['Measurement','Subject','Condition'], as_index=False)['Performance'].mean()
repgroup.head()
repgroup.to_csv("Results/RepeatedMPerformanceGrouped.csv")
repgroup = pd.read_csv("Results/RepeatedMPerformanceGrouped.csv")
conditions = ["Absolute \n 3s ","Absolute \n inf","Relative \n 3s ","Relative \n inf","Pointing \n 3s ","Pointing \n inf"]
plt.figure(figsize=(10,7))
ax = sns.boxplot(x="Condition", y="Performance", hue="Measurement",data=repgroup,
palette=sns.xkcd_palette(['lightblue','blue','denim blue']))
ax.set_xticklabels(conditions,fontsize=15,rotation=0)
l = plt.legend(title="Measurement",fontsize=15,loc=4)
l.get_texts()[0].set_text('1')
l.get_texts()[1].set_text('2')
l.get_texts()[2].set_text('3')
plt.setp(l.get_title(),fontsize=15)
plt.plot([-0.5, 9.5], [0.5, 0.5], linewidth=3,color='black',linestyle=':')
plt.xlabel("Condition",fontsize=20)
plt.ylabel("Performance (%)",fontsize=20)
plt.yticks(np.linspace(0,1,5),np.linspace(0,100,5,dtype=int),fontsize=15)
#plt.title('Average Performance in Each Task for Three Measurements',fontsize=25)
#ax.fig.suptitle('Average Performance in Each Task for Three Measurements',fontsize=15)
plt.show()
#plt.savefig('Results/RepMeasPerfBox_NoTitle.png', bbox_inches='tight')
TaskTimeDF = pd.DataFrame(columns={'Subject','Measurement','Task','Time','Performance'})
tasks = ['Absolute','Absolute','Relative','Relative','Pointing','Pointing']
times = ['3s','inf','3s','inf','3s','inf']
for i in range(252):
TaskTimeDF = TaskTimeDF.append({'Subject':repgroup['Subject'][i],'Measurement':repgroup['Measurement'][i],'Task':tasks[repgroup['Condition'][i]],'Time':times[repgroup['Condition'][i]],'Performance':repgroup['Performance'][i]},ignore_index=True)
TaskTimeDF.head()
TaskTimeDF.to_csv("Results/RepeatedTaskTinePerformance.csv")
# ### Repeated Measure ANOVA Within Task, Time and Measurement
anovarm = AnovaRM(TaskTimeDF,'Performance','Subject',within=['Task','Time','Measurement'])
fit = anovarm.fit()
fit.summary()
# ## FRS Results
frs = pd.read_excel('FRS_MAP_64_final_sk copy.xlsx')
frs.head()
frsDF = pd.DataFrame(columns={'Performance','Scale','Task'})
for i in range(65):
frsDF = frsDF.append({'Performance':frs['AbsInf'][i],'Scale':frs['ScaleMean'][i],'Task':'Absolute Inf'},ignore_index=True)
frsDF = frsDF.append({'Performance':frs['RelInf'][i],'Scale':frs['ScaleMean'][i],'Task':'Relative Inf'},ignore_index=True)
frsDF = frsDF.append({'Performance':frs['PointInf'][i],'Scale':frs['ScaleMean'][i],'Task':'Pointing Inf'},ignore_index=True)
fig, ax = plt.subplots(figsize=(10,7))
xlim = [1,7]
ax.set_xlim(xlim)
sns.regplot(x='ScaleMean', y='AbsInf', data=frs, ci=None, ax=ax,color='royalblue')
sns.regplot(x='ScaleMean', y='RelInf', data=frs, ci=None, ax=ax,color='blue')
sns.regplot(x='ScaleMean', y='PointInf', data=frs, ci=None, ax=ax,color='darkblue')
plt.xlabel("Spatial Ability Score",fontsize=20)
plt.ylabel("Performance (%)",fontsize=20)
plt.yticks(np.linspace(20,80,4),np.linspace(20,80,4,dtype=int),fontsize=15)
plt.xticks(fontsize=15)
plt.legend(['Absolute','Relative','Pointing'],fontsize=15,loc=4)
ax.set_ylim([20,80])
plt.show()
#plt.savefig('Results/FRSPoiInfRegression_New.png', bbox_inches='tight')
# ### Linear Regression - Pointing Infinite
frsPoiperf_model = ols("PointInf ~ ScaleMean", data=frs).fit()
print(frsPoiperf_model.summary())
#Pearson Correlation:
scipy.stats.pearsonr(frs['ScaleMean'][:64],frs['PointInf'][:64])
# ### Linear Regression - Absolute Infinite
frsPoiperf_model = ols("AbsInf ~ ScaleMean", data=frs).fit()
print(frsPoiperf_model.summary())
#Pearson Correlation:
scipy.stats.pearsonr(frs['ScaleMean'][:64],frs['AbsInf'][:64])
# ### Linear Regression - Relative Infinite
frsPoiperf_model = ols("RelInf ~ ScaleMean", data=frs).fit()
print(frsPoiperf_model.summary())
#Pearson Correlation:
scipy.stats.pearsonr(frs['ScaleMean'][:64],frs['RelInf'][:64])
| Analysis/MapAnalysis/MapTrainingAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # So, show me how to align two vector spaces for myself!
# No problem. We're going to run through the example given in the README again, and show you how to learn your own transformation to align the French vector space to the Russian vector space.
#
# First, let's define a few simple functions...
# +
import numpy as np
from fasttext import FastVector
# from https://stackoverflow.com/questions/21030391/how-to-normalize-array-numpy
def normalized(a, axis=-1, order=2):
"""Utility function to normalize the rows of a numpy array."""
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2==0] = 1
return a / np.expand_dims(l2, axis)
def make_training_matrices(source_dictionary, target_dictionary, bilingual_dictionary):
"""
Source and target dictionaries are the FastVector objects of
source/target languages. bilingual_dictionary is a list of
translation pair tuples [(source_word, target_word), ...].
"""
source_matrix = []
target_matrix = []
for (source, target) in bilingual_dictionary:
if source in source_dictionary and target in target_dictionary:
source_matrix.append(source_dictionary[source])
target_matrix.append(target_dictionary[target])
# return training matrices
return np.array(source_matrix), np.array(target_matrix)
def learn_transformation(source_matrix, target_matrix, normalize_vectors=True):
"""
Source and target matrices are numpy arrays, shape
(dictionary_length, embedding_dimension). These contain paired
word vectors from the bilingual dictionary.
"""
# optionally normalize the training vectors
if normalize_vectors:
source_matrix = normalized(source_matrix)
target_matrix = normalized(target_matrix)
# perform the SVD
product = np.matmul(source_matrix.transpose(), target_matrix)
U, s, V = np.linalg.svd(product)
# return orthogonal transformation which aligns source language to the target
return np.matmul(U, V)
# -
# Now we load the French and Russian word vectors, and evaluate the similarity of "chat" and "кот":
# seem to only work for Python 2
fr_dictionary = FastVector(vector_file='wiki.id.vec')
ru_dictionary = FastVector(vector_file='wiki.ms.vec')
fr_vector = fr_dictionary["keretaapi"]
ru_vector = ru_dictionary["melatih"]
print(FastVector.cosine_similarity(fr_vector, ru_vector))
fr_vector = fr_dictionary["china"]
ru_vector = ru_dictionary["china"]
print(FastVector.cosine_similarity(fr_vector, ru_vector))
# "chat" and "кот" both mean "cat", so they should be highly similar; clearly the two word vector spaces are not yet aligned. To align them, we need a bilingual dictionary of French and Russian translation pairs. As it happens, this is a great opportunity to show you something truly amazing...
#
# Many words appear in the vocabularies of more than one language; words like "alberto", "london" and "presse". These words usually mean similar things in each language. Therefore we can form a bilingual dictionary, by simply extracting every word that appears in both the French and Russian vocabularies.
ru_words = set(ru_dictionary.word2id.keys())
fr_words = set(fr_dictionary.word2id.keys())
overlap = list(ru_words & fr_words)
bilingual_dictionary = [(entry, entry) for entry in overlap]
# Let's align the French vectors to the Russian vectors, using only this "free" dictionary that we acquired without any bilingual expert knowledge.
# +
# form the training matrices
source_matrix, target_matrix = make_training_matrices(
fr_dictionary, ru_dictionary, bilingual_dictionary)
# learn and apply the transformation
transform = learn_transformation(source_matrix, target_matrix)
fr_dictionary.apply_transform(transform)
# -
# Finally, we re-evaluate the similarity of "chat" and "кот":
fr_vector = fr_dictionary["keretaapi"]
ru_vector = ru_dictionary["melatih"]
print(FastVector.cosine_similarity(fr_vector, ru_vector))
fr_vector = fr_dictionary["china"]
ru_vector = ru_dictionary["china"]
print(FastVector.cosine_similarity(fr_vector, ru_vector))
# "chat" and "кот" are pretty similar after all :)
#
# Use this simple "identical strings" trick to align other language pairs for yourself, or prepare your own expert bilingual dictionaries for optimal performance.
| align_your_own.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy.spatial.distance import cdist
import commoncode as commoncode
import numpy as np
import matplotlib.pyplot as plt
import SkewGP as SkewGP
import GPy as GPy
from matplotlib import pyplot as plt
from scipy.stats import bernoulli
from scipy.stats import norm
import pymc3 as pm
# ## We generate some data
# +
def fun(x,noise_std=0.1):
v=(4*np.sin(x/2*np.pi)-0.5*np.sin(2*x*np.pi))/6+np.random.randn(len(x))*noise_std
return v
σ=0.05
np.random.seed(42)
x=np.random.rand(60)*5
indd=np.where((x<2)|(x>4))[0]
y=fun(x,noise_std=σ)
plt.scatter(x[indd],y[indd])
#We generate the matrices for preferences
w=-np.ones(len(x))
w[indd]=1
W=np.diag(w)
Z=np.zeros((W.shape[0],1))
c=np.zeros((len(indd),len(x)))
for i in range(len(indd)):
c[i,indd[i]]=1
C=c
X=x.reshape(-1,1)
Y=y[indd].reshape(-1,1)
xx = np.linspace(0,5,60)
plt.plot(xx,fun(xx,noise_std=σ))
plt.axvspan(2, 4, color='gray', alpha=0.2)
plt.scatter(x,w,color='C23')
# -
# The above plot shows the true function we used to generate the data (in blue), the numeric data (blue points) and binary data (red points where $y_1=1$ means valid and $y_i=-1$ means non-valid)
# We define the Kernel and an empty skewness function for SkewGP. We are going to use a zero dimension SkewGP, which is a GP prior and so we do not need Delta.
# +
def Kernel(X1,X2,params,diag_=False):
lengthscale=params['lengthscale']['value']
variance =params['variance']['value']
if diag_==False:
diffs = cdist(np.atleast_2d(X1)/ lengthscale, np.atleast_2d(X2) / lengthscale, metric='sqeuclidean')
else:
diffs = np.sum((np.atleast_2d(X1)/ lengthscale-np.atleast_2d(X2)/ lengthscale)*(np.atleast_2d(X1)/ lengthscale-np.atleast_2d(X2)/ lengthscale),axis=1)
return variance * np.exp(-0.5 * diffs)
def Delta(X,params):
#empty
return []
noise_variance = σ**2
logexp=commoncode.logexp()
latent_dim=0
params0={'lengthscale': {'value':np.array([1.0]),
'range':np.vstack([[0.00001, 50.0]]),
'transform': logexp},
'variance': {'value':np.array([1.0]),
'range':np.vstack([[0.001, 100.0]]),
'transform': logexp},
'noise_variance': {'value':np.array([noise_variance]),
'range':np.vstack([[0.000001, 50.001]]),
'transform': logexp}
}
model = SkewGP.SkewGP(X,Kernel, Delta, params0, W=W, C=C, Y=Y, Z=Z, latent_dim =0, type_y='mixed', jitter=1e-4)
#we optimize the hyperparameters
ml=model.optimize(max_iters=300)
print("Marginal Likelihood",ml)
# -
model.params
Xpred=np.linspace(0,5.5,200)[:,None]
print(Xpred.shape)
predictions=model.predict(Xpred,nsamples=10000);
# +
plt.figure(figsize=(16,4))
plt.scatter(x[indd],y[indd])
plt.plot(xx,fun(xx,noise_std=σ))
plt.axvspan(2, 4, color='gray', alpha=0.2)
plt.scatter(x,w,color='C23')
#we compute the credible intervals
credib_int = pm.stats.hpd(predictions.T)
#we plot the latent function mean and credible interval
plt.plot(Xpred[:,0],credib_int[:,1],color='C2', linestyle=':')
plt.plot(Xpred[:,0],credib_int[:,0],color='C2', linestyle=':')
plt.plot(Xpred[:,0],np.mean(predictions,axis=1), label='mean',color='C2')
plt.xlabel("x",fontsize=16)
plt.ylabel("f(x)",fontsize=16);
# -
| notebooks/Mixed_regression_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LeamvpPVXuS_" colab_type="text"
# # Random Forest Regression
# + [markdown] id="O2wvZ7SKXzVC" colab_type="text"
# ## Importing the libraries
# + id="PVmESEFZX4Ig" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="zgbK_F8-X7em" colab_type="text"
# ## Importing the dataset
# + id="adBE4tjQX_Bh" colab_type="code" colab={}
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# + [markdown] id="ptNjcy7bOBlD" colab_type="text"
# ## Splitting the dataset into the Training set and Test set
# + id="C7NdofoCOFQF" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# + [markdown] id="v4S2fyIBYDcu" colab_type="text"
# ## Training the Random Forest Regression model on the whole dataset
# + id="o8dOCoJ1YKMc" colab_type="code" colab={}
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X_train, y_train)
# + [markdown] id="DM_jh0frOPKE" colab_type="text"
# ## Predicting the Test set results
# + id="JGa9ZfM4OTNw" colab_type="code" colab={}
y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# + [markdown] id="Cd8VZM5POWXN" colab_type="text"
# ## Evaluating the Model Performance
# + id="a7y1rXlfOZJo" colab_type="code" colab={}
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
| Regression/Regression/random_forest_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import urllib2
from bs4 import BeautifulSoup
import sys
url = "http://www.hprc.org.cn/wxzl/wxysl/lczf/"
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content, 'html.parser')
links = soup.find_all('td', {'class', 'bl'}) #links??
links = soup.find_all('td', class_='bl') #??
print len(links)
hyperlinks = [url + i.a['href'].split('./')[1] for i in links]
hyperlinks
def crawler(url_i):
content = urllib2.urlopen(url_i).read().decode('gb18030')
soup = BeautifulSoup(content, 'html.parser')
year = soup.find('span', {'class', 'huang16c'}).text[:4]
year = int(year)
report = ''.join(s.text for s in soup('p'))
scripts = soup.find_all('script')
countPage = int(''.join(scripts[1]).split('countPage = ')[1].split('//')[0])
if countPage == 1:
pass
else:
for i in range(1, countPage):
url_child = url_i.split('.html')[0] +'_'+str(i)+'.html'
content = urllib2.urlopen(url_child).read().decode('gb18030')
soup = BeautifulSoup(content)
report_child = ''.join(s.text for s in soup('p'))
report = report + report_child
return year, report
reports = {}
for link in hyperlinks:
year, report = crawler(link)
print year
reports[year] = report
url2016 = 'http://news.xinhuanet.com/fortune/2016-03/05/c_128775704.htm'
content = urllib2.urlopen(url2016).read()
soup = BeautifulSoup(content, 'html.parser')
report2016 = ''.join(s.text for s in soup('p'))
with open('D:/GitHub/computational-communication-2016/shenliting/gov_reports1954-2016.txt', 'wb') as f:
for r in reports:
line = str(r)+'\t'+reports[r].replace('\n', '\t') +'\n'
f.write(line.encode('utf-8'))
| shenliting/homework5/Untitled8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training/Optimizing a basic model with a Built Algorithm
#
#
# ## Summary:
#
# This exercise is about executing all the steps of the Machine Learning development pipeline, using some features SageMaker offers. We'll use here a public dataset called iris. The dataset and the model aren't the focus of this exercise. The idea here is to see how SageMaker can accelerate your work and avoid wasting your time with tasks that aren't related to your business. So, we'll do the following:
# <a id='contents' />
#
# ## Table of contents
#
# 1. [Train/deploy/test a multiclass model using XGBoost](#part1)
# 2. [Optimize the model](#part2)
# 3. [Run batch predictions](#part3)
# 4. [Check the monitoring results, created in **Part 1**](#part4)
# <a id='part1' />
#
# # 1. Train deploy and test
# [(back to top)](#contents)
# ## Let's start by importing the dataset and visualize it
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import datasets
sns.set(color_codes=True)
iris = datasets.load_iris()
X=iris.data
y=iris.target
dataset = np.insert(iris.data, 0, iris.target,axis=1)
df = pd.DataFrame(data=dataset, columns=['iris_id'] + iris.feature_names)
## We'll also save the dataset, with header, give we'll need to create a baseline for the monitoring
df.to_csv('full_dataset.csv', sep=',', index=None)
df['species'] = df['iris_id'].map(lambda x: 'setosa' if x == 0 else 'versicolor' if x == 1 else 'virginica')
df.head()
# -
df.describe()
# ## Checking the class distribution
ax = df.groupby(df['species'])['species'].count().plot(kind='bar')
x_offset = -0.05
y_offset = 0
for p in ax.patches:
b = p.get_bbox()
val = "{}".format(int(b.y1 + b.y0))
ax.annotate(val, ((b.x0 + b.x1)/2 + x_offset, b.y1 + y_offset))
# ### Correlation Matrix
# +
corr = df.corr()
f, ax = plt.subplots(figsize=(15, 8))
sns.heatmap(corr, annot=True, fmt="f",
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
ax=ax);
# -
# ### Pairplots & histograms
sns.pairplot(df.drop(['iris_id'], axis=1), hue='species', size=2.5,diag_kind="kde");
# ### Now with linear regression
sns.pairplot(df.drop(['iris_id'], axis=1), kind="reg", hue='species', size=2.5,diag_kind="kde");
# ### Fit a plot a kernel density estimate.
# We can see in this dimension an overlaping between **versicolor** and **virginica**. This is a better representation of what we identified above.
# +
tmp_df = df[(df.iris_id==0.0)]
sns.kdeplot(tmp_df['petal width (cm)'], tmp_df['petal length (cm)'], bw='silverman', cmap="Blues", shade=False, shade_lowest=False)
tmp_df = df[(df.iris_id==1.0)]
sns.kdeplot(tmp_df['petal width (cm)'], tmp_df['petal length (cm)'], bw='silverman', cmap="Greens", shade=False, shade_lowest=False)
tmp_df = df[(df.iris_id==2.0)]
sns.kdeplot(tmp_df['petal width (cm)'], tmp_df['petal length (cm)'], bw='silverman', cmap="Reds", shade=False, shade_lowest=False)
plt.xlabel('species')
# -
# Ok. Petal length and petal width have the highest linear correlation with our label. Also, sepal width seems to be useless, considering the linear correlation with our label.
#
# Since versicolor and virginica cannot be split linearly, we need a more versatile algorithm to create a better classifier. In this case, we'll use XGBoost, a tree ensable that can give us a good model for predicting the flower.
# ## Ok, now let's split the dataset into training and test
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42, stratify=y)
# -
# #### First we build the datasets with *X* and *y* for train and test:
iris_train = pd.concat([X_train, y_train], axis = 1, ignore_index = True)
iris_test = pd.concat([X_test, y_train], axis = 1, ignore_index = True)
# #### Then we save the datasets:
iris_train.to_csv('iris_train.csv', index = False)
iris_test.to_csv('iris_test.csv', index = False)
# ## Now it's time to train our model with the builtin algorithm XGBoost
# +
import sagemaker
import boto3
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
from sklearn.model_selection import train_test_split
role = get_execution_role()
prefix='mlops/iris'
# Retrieve the default bucket
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
# -
# #### We will launch an async job to create the baseline for the monitoring process
# A baseline is a what the monitoring will consider **normal**.
#
# The training dataset with which you trained the model is usually a good baseline dataset. Note that the training dataset data schema and the inference dataset schema should exactly match (i.e. the number and order of the features).
#
# From the training dataset you can ask Amazon SageMaker to suggest a set of baseline constraints and generate descriptive statistics to explore the data. For this example, upload the training dataset that was used to train the pre-trained model included in this example. If you already have it in Amazon S3, you can directly point to it.
# +
from sagemaker.model_monitor import DefaultModelMonitor
from sagemaker.model_monitor.dataset_format import DatasetFormat
endpoint_monitor = DefaultModelMonitor(
role=role,
instance_count=1,
instance_type='ml.m5.xlarge',
volume_size_in_gb=20,
max_runtime_in_seconds=3600,
)
endpoint_monitor.suggest_baseline(
baseline_dataset='full_dataset.csv',
dataset_format=DatasetFormat.csv(header=True),
output_s3_uri='s3://{}/{}/monitoring/baseline'.format(bucket, prefix),
wait=False,
logs=False
)
# -
# #### Ok. Let's continue, upload the dataset and train the model
# Upload the dataset to an S3 bucket
input_train = sagemaker_session.upload_data(path='iris_train.csv', key_prefix='%s/data' % prefix)
input_test = sagemaker_session.upload_data(path='iris_test.csv', key_prefix='%s/data' % prefix)
train_data = sagemaker.session.s3_input(s3_data=input_train,content_type="csv")
test_data = sagemaker.session.s3_input(s3_data=input_test,content_type="csv")
# **Note:** If there are other packages you want to use with your script, you can include a requirements.txt file in the same directory as your training script to install other dependencies at runtime. Both requirements.txt and your training script should be put in the same folder. You must specify this folder in `source_dir` argument when creating the estimator.
# +
# get the URI for new container
container_uri = get_image_uri(boto3.Session().region_name, 'xgboost', repo_version='0.90-2');
# Create the estimator
xgb = sagemaker.estimator.Estimator(container_uri,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sagemaker_session)
# Set the hyperparameters
xgb.set_hyperparameters(eta=0.1,
max_depth=10,
gamma=4,
num_class=len(np.unique(y)),
alpha=10,
min_child_weight=6,
silent=0,
objective='multi:softmax',
num_round=30)
# -
# ### Train the model
# %%time
# takes around 3min 11s
xgb.fit({'train': train_data, 'validation': test_data, })
# ### Deploy the model and create an endpoint for it
# The following action will:
# * get the assets from the job we just ran and then create an input in the Models Catalog
# * create a endpoint configuration (a metadata for our final endpoint)
# * create an enpoint, which is our model wrapped in a format of a WebService
#
# After that we'll be able to call our deployed endpoint for doing predictions
# %%time
# Enable log capturing in the endpoint
data_capture_configuration = sagemaker.model_monitor.data_capture_config.DataCaptureConfig(
enable_capture=True,
sampling_percentage=100,
destination_s3_uri='s3://{}/{}/monitoring'.format(bucket, prefix),
sagemaker_session=sagemaker_session
)
xgb_predictor = xgb.deploy(
initial_instance_count=1,
instance_type='ml.m4.xlarge',
data_capture_config=data_capture_configuration
)
# ### Alright, now that we have deployed the endpoint, with data capturing enabled, it's time to setup the monitor
# Let's start by configuring our predictor
# +
from sagemaker.predictor import csv_serializer
from sklearn.metrics import f1_score
endpoint_name = xgb_predictor.endpoint
model_name = boto3.client('sagemaker').describe_endpoint_config(
EndpointConfigName=endpoint_name
)['ProductionVariants'][0]['ModelName']
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
xgb_predictor.deserializer = None
# -
# ### *Monitoring*:
# - We will generate a **baseline** for the monitoring. Yes, we can monitor a deployed model by collecting logs from the payload and the model output. SageMaker can suggest some statistics and constraints that can be used to compare with the collected data. Then we can see some **metrics** related to the **model performance**.
# - We'll also create a monitoring scheduler. With this scheduler, SageMaker will parse the logs from time to time to compute the metrics we need. Given it takes some time to get the results, we'll check these metrics at the end of the exercice, in **Part 4**.
# And then, we need to create a **Monitoring Schedule** for our endpoint. The command below will create a cron scheduler that will process the log each hour, then we can see how well our model is going.
# +
from sagemaker.model_monitor import CronExpressionGenerator
from time import gmtime, strftime
endpoint_monitor.create_monitoring_schedule(
endpoint_input=endpoint_name,
output_s3_uri='s3://{}/{}/monitoring/reports'.format(bucket, prefix),
statistics=endpoint_monitor.baseline_statistics(),
constraints=endpoint_monitor.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.hourly(),
enable_cloudwatch_metrics=True,
)
# -
# ### Just take a look on the baseline created/sugested by SageMaker for your dataset
# This set of statistics and constraints will be used by the Monitoring Scheduler to compare the incoming data with what is considered **normal**. Each invalid payload sent to the endpoint will be considered a violation.
baseline_job = endpoint_monitor.latest_baselining_job
schema_df = pd.io.json.json_normalize(baseline_job.baseline_statistics().body_dict["features"])
constraints_df = pd.io.json.json_normalize(baseline_job.suggested_constraints().body_dict["features"])
report_df = schema_df.merge(constraints_df)
report_df.drop([
'numerical_statistics.distribution.kll.buckets',
'numerical_statistics.distribution.kll.sketch.data',
'numerical_statistics.distribution.kll.sketch.parameters.c'
], axis=1).head(10)
# ### Start generating some artificial traffic
# The cell below starts a thread to send some traffic to the endpoint. Note that you need to stop the kernel to terminate this thread. If there is no traffic, the monitoring jobs are marked as `Failed` since there is no data to process.
# +
import random
import time
from threading import Thread
traffic_generator_running=True
def invoke_endpoint_forever():
print('Invoking endpoint forever!')
while traffic_generator_running:
## This will create an invalid set of features
## The idea is to violate two monitoring constraings: not_null and data_drift
null_idx = random.randint(0,3)
sample = [random.randint(500,2000) / 100.0 for i in range(4)]
sample[null_idx] = None
xgb_predictor.predict(sample)
time.sleep(0.5)
print('Endpoint invoker has stopped')
Thread(target = invoke_endpoint_forever).start()
# -
# ## Now, let's do a basic test with the deployed endpoint
# In this test, we'll use a helper object called predictor. This object is always returned from a **Deploy** call. The predictor is just for testing purposes and we'll not use it inside our real application.
# +
predictions_test = [ float(xgb_predictor.predict(x).decode('utf-8')) for x in X_test]
score = f1_score(y_test,predictions_test,labels=[0.0,1.0,2.0],average='micro')
print('F1 Score(micro): %.1f' % (score * 100.0))
# -
# ## Then, let's test the API for our trained model
# This is how your application will call the endpoint. Using boto3 for getting a sagemaker runtime client and then we'll call invoke_endpoint
# +
from sagemaker.predictor import csv_serializer
sm = boto3.client('sagemaker-runtime')
resp = sm.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='text/csv',
Body=csv_serializer(X_test[0])
)
prediction = float(resp['Body'].read().decode('utf-8'))
print('Predicted class: %.1f for [%s]' % (prediction, csv_serializer(X_test[0])) )
# -
# <a id='part2' />
#
# # 2. Model optimization with Hyperparameter Tuning
# [(back to top)](#contents)
# ## Hyperparameter Tuning Jobs
# #### A.K.A. Hyperparameter Optimization
#
# ## Let's tune our model before using it for our batch prediction
# We know that the iris dataset is an easy challenge. We can achieve a better score with XGBoost. However, we don't want to wast time testing all the possible variations of the hyperparameters in order to optimize the training process.
#
# Instead, we'll use the Sagemaker's tuning feature. For that, we'll use the same estimator, but let's create a Tuner and ask it for optimize the model for us.
# +
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
hyperparameter_ranges = {'eta': ContinuousParameter(0, 1),
'min_child_weight': ContinuousParameter(1, 10),
'alpha': ContinuousParameter(0, 2),
'gamma': ContinuousParameter(0, 10),
'max_depth': IntegerParameter(1, 10)}
objective_metric_name = 'validation:merror'
tuner = HyperparameterTuner(xgb,
objective_metric_name,
hyperparameter_ranges,
max_jobs=20,
max_parallel_jobs=4,
objective_type='Minimize')
tuner.fit({'train': train_data, 'validation': test_data, })
# -
tuner.wait()
job_name = tuner.latest_tuning_job.name
attached_tuner = HyperparameterTuner.attach(job_name)
xgb_predictor2 = attached_tuner.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
first_endpoint_name = endpoint_name
endpoint_name = xgb_predictor2.endpoint
model_name = boto3.client('sagemaker').describe_endpoint_config(
EndpointConfigName=endpoint_name
)['ProductionVariants'][0]['ModelName']
# ## A simple test before we move on
# +
from sagemaker.predictor import csv_serializer
from sklearn.metrics import f1_score
xgb_predictor2.content_type = 'text/csv'
xgb_predictor2.serializer = csv_serializer
xgb_predictor2.deserializer = None
# +
predictions_test = [ float(xgb_predictor2.predict(x).decode('utf-8')) for x in X_test]
score = f1_score(y_test,predictions_test,labels=[0.0,1.0,2.0],average='micro')
print('F1 Score(micro): %.1f' % (score * 100.0))
# -
# <a id='part3' />
#
# # 3. Batch prediction
# [(back to top)](#contents)
#
# ## Batch transform job
# If you have a file with the samples you want to predict, just upload that file to an S3 bucket and start a Batch Transform job. For this task, you don't need to deploy an endpoint. Sagemaker will create all the resources needed to do this batch prediction, save the results into an S3 bucket and then it will destroy the resources automatically for you
batch_dataset_filename='batch_dataset.csv'
with open(batch_dataset_filename, 'w') as csv:
for x_ in X:
line = ",".join( list(map(str, x_)) )
csv.write( line + "\n" )
csv.flush()
csv.close()
input_batch = sagemaker_session.upload_data(path=batch_dataset_filename, key_prefix='%s/data' % prefix)
# +
import sagemaker
# Initialize the transformer object
transformer=sagemaker.transformer.Transformer(
base_transform_job_name='mlops-iris',
model_name=model_name,
instance_count=1,
instance_type='ml.c4.xlarge',
output_path='s3://{}/{}/batch_output'.format(bucket, prefix),
)
# To start a transform job:
transformer.transform(input_batch, content_type='text/csv', split_type='Line')
# Then wait until transform job is completed
transformer.wait()
# +
import boto3
predictions_filename='iris_predictions.csv'
s3 = boto3.client('s3')
s3.download_file(bucket, '{}/batch_output/{}.out'.format(prefix, batch_dataset_filename), predictions_filename)
df2 = pd.read_csv(predictions_filename, sep=',', encoding='utf-8',header=None, names=[ 'predicted_iris_id'])
df3 = df.copy()
df3['predicted_iris_id'] = df2['predicted_iris_id']
df3.head()
# +
from sklearn.metrics import f1_score
score = f1_score(df3['iris_id'], df3['predicted_iris_id'],labels=[0.0,1.0,2.0],average='micro')
print('F1 Score(micro): %.1f' % (score * 100.0))
# +
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
cnf_matrix = confusion_matrix(df3['iris_id'], df3['predicted_iris_id'])
f, ax = plt.subplots(figsize=(15, 8))
sns.heatmap(cnf_matrix, annot=True, fmt="f", mask=np.zeros_like(cnf_matrix, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
# -
# <a id='part4' />
#
# # 4. Check the monitoring results
# [(back to top)](#contents)
#
# The HPO took something like 20 minutes to run. The batch prediction, 3-5 more. It is probably enough time to have at least one execution of the monitor schedule. Since we created a thread for generating **invalid** features, we must have some data drift detected in our monitoring. Let's check
# +
mon_executions = endpoint_monitor.list_executions()
print("We created a hourly schedule above and it will kick off executions ON the hour (plus 0 - 20 min buffer.\nWe will have to wait till we hit the hour...")
while len(mon_executions) == 0:
print("Waiting for the 1st execution to happen...")
time.sleep(60)
mon_executions = endpoint_monitor.list_executions()
print('OK. we have %d execution(s) now' % len(mon_executions))
# +
import time
import pandas as pd
from IPython.display import display, HTML
def print_constraint_violations():
violations = endpoint_monitor.latest_monitoring_constraint_violations()
pd.set_option('display.max_colwidth', -1)
constraints_df = pd.io.json.json_normalize(violations.body_dict["violations"])
display(HTML(constraints_df.head(10).to_html()))
while True:
resp = mon_executions[-1].describe()
status = resp['ProcessingJobStatus']
msg = resp['ExitMessage']
if status == 'InProgress':
time.sleep(30)
elif status == 'Completed':
print("Finished: %s" % msg)
print_constraint_violations()
break
else:
print("Error: %s" % msg)
break
# -
# You can also check these metrics on CloudWatch. Just open the CloudWatch console, click on **Metrics**, then select:
# All -> aws/sagemaker/Endpoints/data-metrics -> Endpoint, MonitoringSchedule
#
# Use the *endpoint_monitor* name to filter the metrics
# ## Cleaning up
traffic_generator_running=False
time.sleep(3)
endpoint_monitor.delete_monitoring_schedule()
time.sleep(10) # wait for 10 seconds before trying to delete the endpoint
xgb_predictor.delete_endpoint()
xgb_predictor2.delete_endpoint()
# # The end
| lab/00_Warmup/01_Basic model with a Builtin Algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 気象学における重要な数式: Important mathematical formulas in meteorology
# 数式の記号の意味
#
# $
# P ...圧力(気圧)\qquad V ...体積 \qquad T ...絶対温度 \qquad \rho ...空気の密度 \\
# g ...重力加速度 \qquad \theta ...温位 \qquad \theta_{e} ...相当温位
# $
# ## 大気の熱力学に関する数式
#
#
# * 気体の状態方程式
#
# \begin{equation}
# PV = mRT \tag{3.1}
# \end{equation}
#
# (nは物質量mol, Rは気体定数 (gas sonctant))
# Rの気体定数は機体の種類によって異なる。乾燥空気では、$287 m^2s^{-2}K^{-1} $である
#
#
# * 気体の密度
#
# \begin{equation}
# \rho= \frac{m}{V}\\
# \end{equation}
#
# * 比容
#
# \begin{equation}
# \alpha = \frac{1}{\rho}\\
# \end{equation}
#
# 従って気体の状態方程式は、
# \begin{equation}
# P = \rho R T \tag{3.2}
# \end{equation}
#
# \begin{equation}
# P\alpha = RT \tag{3.3}
# \end{equation}
#
# となる。
#
# \begin{equation}
# \frac{pV}{T} = mR = MR = R^* \tag{3.5}
# \end{equation}
#
# $R^*$ は一般気体定数
# \begin{equation}
# R^* = 8.3143 × 10^3\ JK^{-1}kmol^{-1}
# \end{equation}
#
# $\ 1kg\ $の乾燥空気に対する気体定数: $\ R_d \ $
#
# 乾燥空気の分子量: $\ M_d\ $
#
# \begin{eqnarray}
# R_d &=& \frac{R^*}{M_d} \nonumber \\
# &=& \frac{8.3143 * 10^3}{28.9} \nonumber \\
# &≒& 287\ JK^{-1}kg^{-1} \nonumber \\
# \end{eqnarray}
# ## 静力学平衡の式
#
# \begin{equation}
# \Delta p = - g \rho \Delta Z \tag{3.12}
# \end{equation}
#
# 乾燥空気の状態方程式より、
#
# \begin{equation}
# \frac{\Delta p}{\Delta z} = - \frac{p g}{R_d T}
# \end{equation}
# ## 熱力学第一法則の式
#
# * Qは熱量、Wは仕事量、Uは内部エネルギー
#
# \begin{equation}
# Q = W + \Delta U
# \end{equation}
# ## 温位と相当温位の関係
#
# * $\theta$ は温位、$\theta_e$ は相当温位、$w$ は混合比
#
# \begin{equation}
# \theta_e = \theta + 2.8w
# \end{equation}
#
#
# ## 比熱
#
# * 1gの物質の温度を1K上昇させるのに必要な熱量のことを**比熱**といい、Cとする。単位は $\ JK^{-1}g^{-1}\ $
#
#
# \begin{equation}
# Q = C m \Delta T
# \end{equation}
# ## 定容比熱と定圧比熱
#
# * 定積変化の比熱を**定容比熱(定積比熱)**といい、$\ C_v\ $という。単位は $\ JK^{-1}kg^{-1}\ $
# * 定圧変化の比熱を**定圧比熱**といい、$\ C_p\ $という。単位は $\ JK^{-1}kg^{-1}\ $
# * 乾燥空気の定容比熱$\ C_v\ = 717\ JK^{-1}kg^{-1}\ $
# * 乾燥空気の定圧比熱$\ C_p\ = 1004\ JK^{-1}kg^{-1}\ $
#
# \begin{equation}
# C_p - C_v = R \qquad (Rは気体定数)
# \end{equation}
# ## 水蒸気量の表現に関する数式
#
# * 混合比と水蒸気圧の関係
#
# $W$は混合比、$e$は水蒸気圧
#
# \begin{equation}
# W \approx 0.622 \frac{e}{p}
# \end{equation}
#
# * 相対湿度の式 $I$
#
# \begin{equation}
# (相対湿度) = \frac{空気塊内の水蒸気量}{そのときの温度の飽和水蒸気量} * 100
# \end{equation}
#
# * 相対湿度の式 $II$
#
# \begin{equation}
# (相対湿度) = \frac{空気塊内の水蒸気圧}{そのときの温度の飽和水蒸気圧} * 100
# \end{equation}
#
# * 混合比
#
# \begin{equation}
# (混合比) = \frac{水蒸気の質量}{乾燥空気の質量}
# \end{equation}
#
# * 比湿
#
# \begin{equation}
# (比湿) = \frac{水蒸気の質量}{湿潤空気全体の質量}
# \end{equation}
| 90_Important mathematical formulas in meteorology.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: S2S Challenge
# language: python
# name: s2s
# ---
# # Learn Gamma
#
# The objective is to fit one gamma distribution per lat-lon to model the precipitation distribution of a tile.
# First, we study the Gamma distribution object from pytorch to learn how tu use it.
# %load_ext autoreload
# %autoreload 2
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
import torch
import scipy
import seaborn as sns
import xarray as xr
from crims2s.util import fix_dataset_dims
# -
d = torch.distributions.Gamma(torch.Tensor([5.0]), torch.Tensor([1]))
sample = d.sample((1000,))
df = pd.DataFrame({'value': sample.numpy()[:,0]})
sns.displot(data=df)
a_hat = sample.mean() ** 2 / sample.var()
b_hat = sample.var() / sample.mean()
a_hat
b_hat
# +
a = torch.full((1,), a_hat, requires_grad=True)
b = torch.full((1,), b_hat, requires_grad=True)
#a = torch.rand((1,), requires_grad=True)
#b = torch.rand((1,), requires_grad=True)
optimizer = torch.optim.SGD([a,b], lr=1e-2, momentum=0)
losses = []
a_list = []
b_list = []
mean_lls = []
regs = []
lambd = 1e-10
for _ in range(1000):
estimated_gamma = torch.distributions.Gamma(torch.clamp(a, min=1e-6) , torch.clamp(b, min=1e-6))
mean_log_likelihood = (1.0 - lambd) * estimated_gamma.log_prob(sample).mean()
regularization = lambd * torch.square(a+b)
mean_lls.append(mean_log_likelihood.detach().item())
regs.append(regularization.detach().item())
loss = -1.0 * mean_log_likelihood + regularization
a_list.append(a.detach().item())
b_list.append(b.detach().item())
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.append(loss.detach().item())
print(a.detach().item())
print(b.detach().item())
# -
plt.plot(losses)
plt.plot(a_list)
plt.plot(b_list)
plt.plot(mean_lls)
plt.plot(regs)
# ## Do it for our real data
# +
OBS_DIR = '***BASEDIR***training-output-reference/'
obs_path = pathlib.Path(OBS_DIR)
obs_files = sorted([f for f in obs_path.iterdir() if 'tp' in f.stem])
# + tags=[]
tp = xr.open_mfdataset(obs_files, preprocess=fix_dataset_dims)
# -
tp
tp_w34 = (tp.sel(lead_time='28D') - tp.sel(lead_time='14D')).sel(latitude=slice(50.0, 30.0), forecast_dayofyear=slice(60, 220), forecast_year=slice(2007, None))
tp_w34
tp_w34.isnull().sum(dim=['latitude', 'longitude']).tp.compute().plot()
tp_w34 = tp_w34.stack(station=('latitude', 'longitude'))
tp_w34.dims
station_ids = xr.DataArray(np.arange(tp_w34.dims['station']), dims='station_coords')
tp_w34 = tp_w34.rename(station='station_coords').assign_coords(station=station_ids).swap_dims(station_coords='station')
# +
#tp_w34 = tp_w34.drop('station_coords')
# -
tp_w34
station_mask = (tp_w34.isnull().sum(dim=['forecast_year', 'forecast_dayofyear']) == 0).compute()
station_mask
tp_w34_only_land = tp_w34.where(station_mask, drop=True)
tp_w34_only_land
tp_train = tp_w34_only_land.isel(forecast_year=slice(None, -3))
tp_val = tp_w34_only_land.isel(forecast_year=slice(-3, None))
tp_train
tp_val
a_hat_xarray = tp_train.mean(dim='forecast_year') ** 2 / (tp_train.var(dim='forecast_year') + 1e-6)
b_hat_xarray = (tp_train.mean(dim='forecast_year') + 1e-6) / (tp_train.var(dim='forecast_year') + 1e-6)
a_hat_xarray.isnull().compute().sum()
train_pytorch = torch.tensor(tp_train.tp.data.compute())
train_pytorch.shape
val_pytorch = torch.tensor(tp_val.tp.data.compute())
val_pytorch.shape
train_pytorch.min()
train_pytorch.shape
# +
a_hat = torch.tensor(a_hat_xarray.tp.data.compute(), requires_grad=True, device='cuda')
b_hat = torch.tensor(b_hat_xarray.tp.data.compute(), requires_grad=True, device='cuda')
#a_hat = torch.rand(*train_pytorch.shape[1:], requires_grad=True)
#b_hat = torch.rand(*train_pytorch.shape[1:], requires_grad=True)
optimizer = torch.optim.SGD([a_hat,b_hat], lr=1e-2, momentum=0.0)
losses = []
a_list = []
b_list = []
mean_lls = []
regs = []
vals = []
true_train = []
true_val = []
train_pytorch = torch.tensor(tp_train.tp.data.compute()).cuda()
val_pytorch = torch.tensor(tp_val.tp.data.compute()).cuda()
# -
lambd = 0.01
optimizer = torch.optim.SGD([a_hat,b_hat], lr=5.0, momentum=0.0)
for i in range(2000):
estimated_gamma = torch.distributions.Gamma(torch.clamp(a_hat, min=1e-6) , torch.clamp(b_hat, min=1e-6))
mean_log_likelihood = (1.0 - lambd) * estimated_gamma.log_prob(train_pytorch + 1e-6).mean()
regularization = lambd * (torch.square(a_hat) + torch.square(b_hat)).mean()
mean_lls.append(-mean_log_likelihood.detach().item())
regs.append(regularization.detach().item())
loss = -1.0 * mean_log_likelihood + regularization
loss.backward()
optimizer.step()
optimizer.zero_grad()
val_mean_log_likelihood = (1.0 - lambd) * estimated_gamma.log_prob(val_pytorch + 1e-6).mean()
losses.append(loss.detach().item())
vals.append(-val_mean_log_likelihood.detach().item())
if i % 10 == 0:
a_list.append(a_hat.mean().detach().item())
b_list.append(b_hat.mean().detach().item())
true_train.append(estimated_gamma.log_prob(train_pytorch + 1e-6).mean().detach().item())
true_val.append(estimated_gamma.log_prob(val_pytorch + 1e-6).mean().detach().item())
estimated_gamma.log_prob(train_pytorch + 1e-6)[:, 0, 0]
fig, ax = plt.subplots()
plt.plot(true_train)
plt.plot(true_val)
# +
begin = 0
end = -1
fig, ax = plt.subplots()
ax.plot(mean_lls[begin:end], label='train')
ax.plot(vals[begin:end], label='val')
ax.plot(regs[begin:end], label='reg')
ax.legend()
plt.show()
# -
plt.plot(a_list)
plt.plot(b_list)
plt.plot(regs)
(a_hat < 0.0).sum()
a_hat.mean()
b_hat.mean()
a_hat.max()
val_pytorch.shape
train_pytorch[:, 0, 0]
train_pytorch.mean()
a_hat.max()
b_hat.max()
b_hat[0,0]
a_hat[0,0]
g = torch.distributions.Gamma(a_hat[0,0], b_hat[0,0])
g.log_prob(train_pytorch[:,0,0])
pdf = torch.exp(g.log_prob(torch.arange(1e-6, 50)))
plt.plot(pdf.detach().cpu().numpy())
a_hat[0,0]
scipy_g = scipy.stats.gamma(a=0.4462, scale=1 / 0.0194)
scipy_g
pdfs
fix, ax = plt.subplots()
ax.plot(pdfs)
a, loc, scale = scipy.stats.gamma.fit(train_pytorch[:, 0, 0].detach().cpu().numpy())
scipy_g = scipy.stats.gamma(a=1.5, scale=0.0681)
pdfs = scipy_g.pdf(np.arange(0.1, 50))
plt.plot(pdfs)
train_pytorch[:, 0, 0]
scipy_g.pdf(train_pytorch[:, 0, 0].detach().cpu().numpy())
a_hat[0,0]
# ## Do it on only one station
sample = tp_train.isel(station=0, forecast_dayofyear=0).compute()
sample
a_hat_xarray = (sample.mean(dim='forecast_year') ** 2 / (sample.var(dim='forecast_year') + 1e-6)).compute().tp.data
b_hat_xarray = ((sample.mean(dim='forecast_year') / sample.var(dim='forecast_year') + 1e-6)).compute().tp.data
a_hat_xarray
b_hat_xarray
a_hat = torch.tensor(a_hat_xarray, requires_grad=True)
b_hat = torch.tensor(b_hat_xarray, requires_grad=True)
g = torch.distributions.Gamma(a_hat, b_hat)
g.log_prob(sample.tp.data)
| notebooks/learn-gamma.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
origin = np.array((0,0,0))
counts = np.array((50,50,120))
spacing = np.array((0.5,0.5,0.5))
grid = {}
grid['x'] = np.zeros(shape=tuple(counts), dtype=float)
grid['y'] = np.zeros(shape=tuple(counts), dtype=float)
grid['z'] = np.zeros(shape=tuple(counts), dtype=float)
for i in range(counts[0]):
for j in range(counts[1]):
for k in range(counts[2]):
grid['x'][i,j,k] = i*spacing[0]
grid['y'][i,j,k] = j*spacing[1]
grid['z'][i,j,k] = k*spacing[2]
grid['LJr'] = np.zeros(shape=tuple(counts), dtype=float)
mid_xy = (origin[0] + counts[0]*spacing[0]/2, origin[1] + counts[1]*spacing[1]/2)
grid['LJr'][np.logical_and(((grid['x']-mid_xy[0])**2 + (grid['y']-mid_xy[1])**2) > 9,\
np.logical_and(grid['z']>28, grid['z']<32))] = 10.
import AlGDock.IO
IO_Grid = AlGDock.IO.Grid()
data = {'origin':origin, 'spacing':spacing, 'counts':counts, 'vals':grid['LJr'].flatten()}
IO_Grid.write('LJr.dx', data)
IO_Grid.write('LJr.nc', data)
# -
| Nanopore_Example/grids/.ipynb_checkpoints/createLJrNanopore-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from datascience import *
import matplotlib
matplotlib.use('Agg', warn=False)
# %matplotlib inline
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# +
#difference notebook (e.g. jupyter, google colab, ms azure) and programming language (e.g. python, R, Java, C++, C# etc)
# python -> import a file ; clean the data <- ! possible
# 3rd party libraries -> pandas, tidyverse, datascience [numpy, scipy, sci-kit learn, nltk,]
# +
#variables:
# nominal/categorical: terriers, retrievers, spaniels | powerlifter, strongman, crossfit, olympic
# ordinal: small, medium, large
# count: 1,2,3,4 ...
# continuous: height, weight, age
# special: text, signal (audio/video/brainwaves)
# -
a = 0
b = 1
c = a + b
print(c)
#importing anaconda library package math
import math, scipy
import numpy as np
#using sqrt function from math
math.sqrt(9)
math.factorial(4)
math.factorial(50)
math.factorial(1000)
2/3
5.42/100000
0.0000542
#text = string
'This is a string'
'This is also a string'
#concatenation
'break' + 'fast' + '1'
"na " * 5
#array is a list of variables
a = 5
print(a)
b = (145,140,127,55,100,24,20,12)
b
len(b)
#more array confusion
array1 = np.arange(2,7,2)
array2 = array1 - 1
array3 = array1 + 1
array1
array2
array3
#sep 29 class
#the data frame or data table
Table.read_table('data/flowers.csv')
flowers = Table.read_table('data/flowers.csv')
flowers
#manipulating tables
flowers.select('Petals', 'Name')
flowers.select(2)
flowers1 = flowers.drop('Color')
flowers1
print(flowers) #tsv = tab separated value
movies = Table.read_table('data/top_movies_by_title.csv')
movies
#sorting rows and columns
movies.sort('Gross', descending=True)
movies.num_rows
sorted_by_gross = movies.sort('Gross', descending=True)
sorted_by_gross
sorted_by_gross.sort('Studio', distinct=True)
top_per_studio = sorted_by_gross.sort('Studio', distinct=True)
top_per_studio.barh('Studio','Gross')
top_studios = top_per_studio.sort('Gross',descending=True)
top_studios.barh('Studio','Gross')
just_revenues = top_studios.select('Studio', 'Gross','Gross (Adjusted)')
just_revenues
just_revenues.barh('Studio')
#tidy data: what is it?
#tidy dataset has 3 properties:
# - each predictor/attribute/feature/variable forms a column and has well defined values
# - each observation/exemplar/instance forms a row
# - each observational unit forms a table/matrix/data frame
# +
#opposite of tidy dataset = untidy/messy data
#how do we prepare tidy data?
#there are 5 principles of tidy data
#Problem: Column names are values not variable names
#Principle: Column names need to be informative, variable names and not values
# -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pew = pd.read_csv("data/pew-raw.csv")
pew.head(10)
tidy_pew = pew.melt(id_vars = "religion", var_name = "income", value_name = "count")
tidy_pew.head(40)
# +
#problem: there are multiple variables stored in 1 column
#principle: each column needs to consist of one and only one variable
df_tb = pd.read_csv('data/tb-raw.csv')
df_tb.columns
# -
df_tb.head()
df_tb.tail()
df_tb = df_tb.melt(id_vars=["country","year"], var_name=["demographic"],value_name="cases") #implicit typing
df_tb.head(10)
# +
#python lambda
#x = lambda a : (a - 10) * 2
#print(x(4))
# +
#df_tb = (df_tb.assign(gender = lambda x: x.demographic.str[0].astype(str),
#age = lambda x: x.demographic.str[1:].astype(str))
# .drop("demographic",axis=1))
df_tb=(df_tb.assign(gender = lambda x: x.demographic.str[0].astype(str), age = lambda x: x.demographic.str[1:].astype(str)).drop("demographic",axis=1))
df_tb.head(5)
#pd.update
# -
# Styling the dataset
df_tb.update(pd.DataFrame({"age":[age[:2]+'-'+age[2:] if len(age) == 4 else (age) for age in df_tb["age"]]}))
df_tb=(df_tb.replace(to_replace =["m","f","014","65","u"],value =["Male","Female","0-14","65+","unknown"])
.dropna())
df_tb.sample(10)
#problem: variables are stored in both rows and columns
#tidy data principle #3: variables need to be in cells, not rows and columns
weather = pd.read_csv("data/weather-raw.csv")
weather
(weather.
melt(id_vars = ["id","year","month","element"],var_name = "day", value_name="temp").
pivot_table(index = ["id","year","month","day"],
columns = "element",
values = "temp").
reset_index().
head()
)
#problem: there are multiple types of data stored in 1 table
#tidy data principle #4: each table column needs to have a singular data type
billboard = pd.read_csv("data/billboard_cleaned.csv")
billboard.head(20)
#storing the unique song-artist combinations in billboard
billboard = billboard.set_index(["artist","track","time"])
#setting up songs
songs = pd.DataFrame.from_records(
columns=["id","artist","track","time"],
data =[
(a + 1, b, c, d)
for (a, (b,c,d)) in enumerate(billboard.index.unique())
],
)
songs.head(20)
ranking = billboard[["date","rank"]].copy()
ranking["id"] = songs.set_index(["artist","track","time"])
ranking = ranking.reset_index(drop=True).set_index("id")
songs = songs.set_index("id")
songs.head(20)
ranking.head(20)
| class-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import glob
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
import matplotlib as mpl
import astropy.units as u
import emcee
from dust_extinction.averages import G03_SMCBar
from dust_extinction.parameter_averages import F19
from measure_extinction.stardata import StarData
from measure_extinction.extdata import ExtData
from measure_extinction.modeldata import ModelData
from measure_extinction.utils.fit_model import FitInfo
from measure_extinction.utils.helpers import get_full_starfile
from measure_extinction.utils.fit_model import get_best_fit_params, get_percentile_params
# -
# Specify the location of the model and observed data
file_path = "/home/kgordon/Python_git/extstar_data/"
# Define star specific parameters
starname = "azv23"
fstarname = f"{starname}.dat"
velocity = 0.0 # SMC radial velocity from NED
relband = "V"
# Read in the star data
# get the observed reddened star data
reddened_star = StarData(fstarname, path=f"{file_path}/DAT_files/")
band_names = reddened_star.data["BAND"].get_band_names()
data_names = reddened_star.data.keys()
# Plot the spectrum
fig, ax = plt.subplots(figsize=(13, 10))
reddened_star.plot(ax)
ax.set_xscale("log")
ax.set_yscale("log")
# Get the model data
# +
tlusty_models_fullpath = glob.glob("{}/Models/tlusty_*v10.dat".format(file_path))
# tlusty_models_fullpath = tlusty_models_fullpath[0:10]
tlusty_models = [
tfile[tfile.rfind("/") + 1 : len(tfile)] for tfile in tlusty_models_fullpath
]
# get the models with just the reddened star band data and spectra
modinfo = ModelData(
tlusty_models,
path="{}/Models/".format(file_path),
band_names=band_names,
spectra_names=data_names,
)
# -
# Setup the fit parameters
# +
# parameter names
pnames = ["logT","logg","logZ","Av","Rv","C2","C3","C4","x0","gamma","HI_gal","HI_mw"]
# initial starting position
# **customize for each star**
# AzV23 sptype = B3Ia -> logTeff, logg
# SMC metallicity -> 0.2 (1/5 solar) -> log(Z) =
params = [4.25, 3.09, -0.7, 0.75, 3.7, 2.5, 0.65, 0.26, 4.66, 0.86, 22.0, 19.0]
# min/max allowed values for each parameter
# some are based on the min/max of the stellar atmosphere grid
plimits = [
[modinfo.temps_min, modinfo.temps_max], # log(Teff)
[modinfo.gravs_min, modinfo.gravs_max], # log(g)
[modinfo.mets_min, modinfo.mets_max], # log(Z)
[0.0, 4.0], # Av
[2.0, 6.0], # Rv
[-0.1, 5.0], # C2
[0.0, 2.5], # C3
[0.0, 1.0], # C4
[4.5, 4.9], # xo
[0.6, 1.5], # gamma
[17.0, 24.0], # log(HI) internal to galaxy
[17.0, 22.0], # log(HI) MW foreground
]
# add Gaussian priors based on prior knowledge
# sptype -> log(Teff), log(g)
# galaxy metallicity -> log(Z)
ppriors = {}
ppriors["logT"] = (4.25, 0.1)
ppriors["logg"] = (3.1, 0.1)
ppriors["logZ"] = (-0.7, 0.1)
# -
# Create the weight arrays based on the observed uncertainties
# +
# cropping info for weights
# bad regions are defined as those were we know the models do not work
# or the data is bad
ex_regions = [
[8.23 - 0.1, 8.23 + 0.1], # geocoronal line
[8.7, 10.0], # bad data from STIS
[3.55, 3.6],
[3.80, 3.90],
[4.15, 4.3],
[6.4, 6.6],
[7.1, 7.3],
[7.45, 7.55],
[7.65, 7.75],
[7.9, 7.95],
[8.05, 8.1],
] / u.micron
weights = {}
for cspec in data_names:
weights[cspec] = np.full(len(reddened_star.data[cspec].fluxes), 0.0)
gvals = reddened_star.data[cspec].npts > 0
weights[cspec][gvals] = 1.0 / reddened_star.data[cspec].uncs[gvals].value
x = 1.0 / reddened_star.data[cspec].waves
for cexreg in ex_regions:
weights[cspec][np.logical_and(x >= cexreg[0], x <= cexreg[1])] = 0.0
# make the photometric bands have higher weight
weights["BAND"] *= 10000.0
# -
# Package the fit info needed. FitInfo class defines the likelihood functions as well.
fitinfo = FitInfo(
pnames,
plimits,
weights,
parameter_priors=ppriors,
stellar_velocity=velocity,
)
# +
# simple function to turn the log(likelihood) into the chisqr
# requied as op.minimize function searchs for the minimum chisqr (not max likelihood like MCMC algorithms)
def nll(*args):
return -fitinfo.lnprob(*args)
# run the fit
result = op.minimize(
nll, params, method="Nelder-Mead", options={"maxiter": 1000}, args=(reddened_star, modinfo, fitinfo)
)
# check the fit output
print(result["message"])
# +
# save results
params = result["x"]
fit_params = params
params_best = params
pnames_extra = pnames
# print the best fit
for k, val in enumerate(params_best):
print("{} # {}".format(val, pnames_extra[k]))
# -
# Calculate and save the extinction curve
# +
# intrinsic sed
modsed = modinfo.stellar_sed(fit_params[0:3], velocity=velocity)
# dust_extinguished sed
ext_modsed = modinfo.dust_extinguished_sed(fit_params[3:10], modsed)
# hi_abs sed
hi_ext_modsed = modinfo.hi_abs_sed(
fit_params[10:12], [velocity, 0.0], ext_modsed
)
# create a StarData object for the best fit SED
modsed_stardata = modinfo.SED_to_StarData(modsed)
# create an extincion curve and save it
extdata = ExtData()
extdata.calc_elx(reddened_star, modsed_stardata, rel_band=relband)
col_info = {"av": fit_params[3], "rv": fit_params[4]}
extdata.save(starname + "_ext.fits", column_info=col_info)
# -
# Plot the spectra
# +
norm_model = np.average(hi_ext_modsed["BAND"])
norm_data = np.average(reddened_star.data["BAND"].fluxes)
# plotting setup for easier to read plots
fontsize = 18
font = {"size": fontsize}
mpl.rc("font", **font)
mpl.rc("lines", linewidth=1)
mpl.rc("axes", linewidth=2)
mpl.rc("xtick.major", width=2)
mpl.rc("xtick.minor", width=2)
mpl.rc("ytick.major", width=2)
mpl.rc("ytick.minor", width=2)
# setup the plot
fig, ax = plt.subplots(figsize=(13, 10))
# plot the bands and all spectra for this star
for cspec in modinfo.fluxes.keys():
if cspec == "BAND":
ptype = "o"
else:
ptype = "-"
# ax.plot(reddened_star.data[cspec].waves,
# weights[cspec], 'k-')
ax.plot(
reddened_star.data[cspec].waves,
reddened_star.data[cspec].fluxes / norm_data,
"k" + ptype,
label="data",
)
# print(reddened_star.data[cspec].waves)
# print(modinfo.waves[cspec])
ax.plot(
modinfo.waves[cspec], modsed[cspec] / norm_model, "b" + ptype, label=cspec
)
ax.plot(
modinfo.waves[cspec],
ext_modsed[cspec] / norm_model,
"r" + ptype,
label=cspec,
)
ax.plot(
modinfo.waves[cspec],
hi_ext_modsed[cspec] / norm_model,
"g" + ptype,
label=cspec,
)
# finish configuring the plot
ax.set_ylim(8e4 / norm_model, 2e9 / norm_model)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.3 * fontsize)
ax.set_ylabel(r"$F(\lambda)$ [$ergs\ cm^{-2}\ s\ \AA$]", fontsize=1.3 * fontsize)
ax.tick_params("both", length=10, width=2, which="major")
ax.tick_params("both", length=5, width=1, which="minor")
# ax.legend()
# use the whitespace better
fig.tight_layout()
# -
# Plot the extinction curve
# +
fig, ax = plt.subplots(figsize=(13, 10))
# convert from E(l-V) to A(l)/A(V)
print(extdata.type_rel_band != "V")
extdata.columns["AV"] = (params[3], 0.0)
extdata.trans_elv_alav()
extdata.plot(ax) #, alax=True)
ax.set_xscale("log")
ax.set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.3 * fontsize)
ax.set_ylim(0.0, 10.0)
ax.set_ylabel(r"$A(\lambda)/A(V)$", fontsize=1.3 * fontsize)
ax.tick_params("both", length=10, width=2, which="major")
ax.tick_params("both", length=5, width=1, which="minor")
# plot known exitnction curves
mod_x = np.arange(0.3, 8.7, 0.1) / u.micron
smcbar = G03_SMCBar()
ax.plot(1.0 / mod_x, smcbar(mod_x), "k:")
f19_rv31 = F19(Rv=3.1)
ax.plot(1.0 / mod_x, f19_rv31(mod_x), "k-")
# -
# Run emcee MCMC sampler to define uncertainties (bonus section)
# +
p0 = params
ndim = len(p0)
#nwalkers = 2 * ndim
#nsteps = 50
#burn = 50
nwalkers = 100
nsteps = 500
burn = 500
# setting up the walkers to start "near" the inital guess
p = [p0 * (1 + 0.01 * np.random.normal(0, 1.0, ndim)) for k in range(nwalkers)]
# setup the sampler
sampler = emcee.EnsembleSampler(
nwalkers, ndim, fitinfo.lnprob, args=(reddened_star, modinfo, fitinfo)
)
# burn in the walkers
pos, prob, state = sampler.run_mcmc(p, burn)
# rest the sampler
sampler.reset()
# do the full sampling
pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)
# create the samples variable for later use
samples = sampler.chain.reshape((-1, ndim))
# get the best fit values
pnames_extra = pnames + ["E(B-V)", "N(HI)/A(V)", "N(HI)/E(B-V)"]
params_best = get_best_fit_params(sampler)
fit_params = params_best
print("best params")
print(params_best)
# get the 16, 50, and 84 percentiles
params_per = get_percentile_params(samples)
# save the best fit and p50 +/- uncs values to a file
# save as a single row table to provide a uniform format
#f = open(out_basename + "_fit_params.dat", "w")
#f.write("# best fit, p50, +unc, -unc\n")
for k, val in enumerate(params_per):
print(
"{} {} {} {} # {}".format(
params_best[k], val[0], val[1], val[2], pnames_extra[k]
)
)
# f.write(
# "{} {} {} {} # {}\n".format(
# params_best[k], val[0], val[1], val[2], pnames_extra[k]
# )
# )
# +
# intrinsic sed
modsed = modinfo.stellar_sed(fit_params[0:3], velocity=velocity)
# dust_extinguished sed
ext_modsed = modinfo.dust_extinguished_sed(fit_params[3:10], modsed)
# hi_abs sed
hi_ext_modsed = modinfo.hi_abs_sed(
fit_params[10:12], [velocity, 0.0], ext_modsed
)
# create a StarData object for the best fit SED
modsed_stardata = modinfo.SED_to_StarData(modsed)
# create an extincion curve and save it
extdata = ExtData()
extdata.calc_elx(reddened_star, modsed_stardata, rel_band=relband)
col_info = {"av": fit_params[3], "rv": fit_params[4]}
extdata.save(starname + "_ext.fits", column_info=col_info)
# +
norm_model = np.average(hi_ext_modsed["BAND"])
norm_data = np.average(reddened_star.data["BAND"].fluxes)
# plotting setup for easier to read plots
fontsize = 18
font = {"size": fontsize}
mpl.rc("font", **font)
mpl.rc("lines", linewidth=1)
mpl.rc("axes", linewidth=2)
mpl.rc("xtick.major", width=2)
mpl.rc("xtick.minor", width=2)
mpl.rc("ytick.major", width=2)
mpl.rc("ytick.minor", width=2)
# setup the plot
fig, ax = plt.subplots(figsize=(13, 10))
# plot the bands and all spectra for this star
for cspec in modinfo.fluxes.keys():
if cspec == "BAND":
ptype = "o"
else:
ptype = "-"
# ax.plot(reddened_star.data[cspec].waves,
# weights[cspec], 'k-')
ax.plot(
reddened_star.data[cspec].waves,
reddened_star.data[cspec].fluxes / norm_data,
"k" + ptype,
label="data",
)
# print(reddened_star.data[cspec].waves)
# print(modinfo.waves[cspec])
ax.plot(
modinfo.waves[cspec], modsed[cspec] / norm_model, "b" + ptype, label=cspec
)
ax.plot(
modinfo.waves[cspec],
ext_modsed[cspec] / norm_model,
"r" + ptype,
label=cspec,
)
ax.plot(
modinfo.waves[cspec],
hi_ext_modsed[cspec] / norm_model,
"g" + ptype,
label=cspec,
)
# finish configuring the plot
ax.set_ylim(8e4 / norm_model, 2e9 / norm_model)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.3 * fontsize)
ax.set_ylabel(r"$F(\lambda)$ [$ergs\ cm^{-2}\ s\ \AA$]", fontsize=1.3 * fontsize)
ax.tick_params("both", length=10, width=2, which="major")
ax.tick_params("both", length=5, width=1, which="minor")
# ax.legend()
# use the whitespace better
fig.tight_layout()
# +
fig, ax = plt.subplots(figsize=(13, 10))
# convert from E(l-V) to A(l)/A(V)
print(extdata.type_rel_band != "V")
extdata.columns["AV"] = (params[3], 0.0)
extdata.trans_elv_alav()
extdata.plot(ax) #, alax=True)
ax.set_xscale("log")
ax.set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.3 * fontsize)
ax.set_ylim(0.0, 10.0)
ax.set_ylabel(r"$A(\lambda)/A(V)$", fontsize=1.3 * fontsize)
ax.tick_params("both", length=10, width=2, which="major")
ax.tick_params("both", length=5, width=1, which="minor")
# plot known exitnction curves
mod_x = np.arange(0.3, 8.7, 0.1) / u.micron
smcbar = G03_SMCBar()
ax.plot(1.0 / mod_x, smcbar(mod_x), "k:")
f19_rv31 = F19(Rv=3.1)
ax.plot(1.0 / mod_x, f19_rv31(mod_x), "k-")
# -
| notebooks/AzV 23 Extinction with Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Opinions and Gaze: Data Analysis (Step 3 of 3)
# This Jupyter notebook contains the data analysis for
# for "Seeing the other side: Conflict and controversy
# increase gaze coordination" (Paxton, Dale, & Richardson,
# *in preparation*).
# This notebook is the **last of three** notebooks for the
# "Opinions and Gaze" project. This must be run **after**
# the `oag-data_cleaning.ipynb` and `oag-data_processing.ipynb`
# files.
# To run this file from scratch, you will need:
# * `data/04-analysis_dataframes`: Directory of analysis- and
# plotting-ready dataframes, produced by `oag-data_processing.ipynb`.
# * `oag-plotting_df.csv`: Dataframe of real and baseline data.
# * `supplementary-code/`: Directory of additional functions and global
# variables.
# **Note**: Due to data sensitivity (per the Institutional
# Review Board of the University of California, Merced),
# only researchers from ICPSR member institutions may access
# study data through the approved link.
# ## Variable key
# Linear lag (continuous): `ot1`
# Quadratic lag (continuous): `ot2`
# Opinion congruence (factor, contrast-coded):
# - Agreement: `agree = .5`
# - Disagreement: `agree = -.5`
# Topic class (factor, contrast-coded):
# * Mixed-view: `viewtype = .5`
# * Dominant-view: `viewtype = -.5`
# Data type (factor, contrast-coded):
# * Real data: `data = .5`
# * Baseline (shuffled) data: `data = -.5`
# ## Table of contents
# * [Preliminaries](#Preliminaries)
# - [Import data and convert factors](#Import-data-and-convert-factors)
# * [Descriptive statistics](#Descriptive-statistics)
# - [Derive listener segment statistics](#Derive-listener-segment-statistics)
# - [Derive listener demographic statistics](#Derive-listener-demographic-statistics)
# * [Plotting](#Plotting)
# * [Data analysis](#Data-analysis)
# - [Plot-level analysis](#Plot-level-analysis)
# - [Planned analyses](#Planned-analyses)
# - [Exploratory analyses](#Exploratory-analyses)
# - [Baseline comparisons](#Baseline-comparisons)
# **Written by**: <NAME> (University of California, Berkeley)
# **Date last modified**: 11 April 2018
# ***
# # Preliminaries
# clear the space
rm(list=ls())
# read in the needed files and functions
source('../supplementary-code/libraries_and_functions-oag.r')
# ## Import data and convert factors
# read in plotting dataframe
plotting_file = file.path(analysis_data_path,
'oag-plotting_df.csv')
plotting_df = read.csv(plotting_file,
sep=",", header=TRUE)
# create a real-data analysis dataframe
analysis_real_df = plotting_df %>%
# filter out baseline
dplyr::filter(data==max(data)) %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5, -.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% c('agree',
'data')]) %>%
# convert recurrence to normal distribution
mutate(r = Gaussianize(r,
type='s'))
# create a real-data plotting dataframe
plotting_real_df = plotting_df %>%
# filter out baseline
dplyr::filter(data==max(data)) %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5, -.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% c('agree',
'data')])
# create a baseline-data analysis dataframe
analysis_baseline_df = plotting_df %>%
# filter out baseline
dplyr::filter(data==min(data)) %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5, -.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% c('agree',
'data')]) %>%
# convert recurrence to normal distribution
mutate(r = Gaussianize(r,
type='s'))
# create a baseline-data plotting dataframe
plotting_baseline_df = plotting_df %>%
# filter out baseline
dplyr::filter(data==min(data)) %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5, -.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% c('agree',
'data')])
# factorize the comparison plotting dataframe
analysis_joint_df = plotting_df %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5 ,-.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% 'agree']) %>%
# convert recurrence to normal distribution
mutate(r = Gaussianize(r,
type='s'))
# factorize the comparison plotting dataframe
plotting_df = plotting_df %>%
# reorder agreement for plotting
mutate(agree = factor(agree, levels=c(.5 ,-.5))) %>%
# factorize everything else
mutate_at(funs(factor),
.vars = factor_variables[!factor_variables %in% 'agree'])
# ***
# # Descriptive statistics
# This section produces basic descriptive statistics
# about individual speakers' segments and about listeners'
# demographic data.
# ## Derive listener segment statistics
# ### Total number of unique listeners with any recorded data
# Listeners who successfully had any recorded data
# will have had a survey saved with their data. We count those
# files to show how many listeners had any data recorded.
# How many unique **gaze files** do we have?
gaze_file_names = list.files('../data/01-input/listener-gaze-raw',
recursive=FALSE)
gaze_participants = str_extract_all(gaze_file_names,
'\\d{5}')
gaze_participants = lapply(gaze_participants, trimws)
length(unique(gaze_participants))
# How many unique **questionnaire files** do we have?
questionnaire_file_names = list.files('../data/01-input/listener-responses-raw',
recursive=FALSE,
pattern='*.tsv')
questionnaire_participants = str_extract_all(questionnaire_file_names,
'\\d{5}')
questionnaire_participants = lapply(questionnaire_participants, trimws)
length(unique(questionnaire_participants))
# How many **unique participants with gaze and/or questionnaire
# files** do we have?
questions_or_gaze_participants = c(unique(gaze_participants),
unique(questionnaire_participants))
length(unique(questions_or_gaze_participants))
# ### Unfiltered listeners and missing data by segment
# Considering all listeners with any recorded data (i.e.,
# any listener with _at least some_ gaze data tracked
# during _at least one_ of the segments and with _some_
# included metadata) and all segments with at any recorded
# data (i.e., no individual listeners' segments in which
# no samples were recorded):
# 1. how many listeners do we have overall,
# 1. how many listeners do we have per segment,
# 1. and what proportion of data are missing per segment?
# load in the missing data table
missing_data_filename = file.path(processed_data_path,
'listener-missing_data.csv')
missing_data = read.table(missing_data_filename,
sep=',',
header=TRUE)
unfiltered_listeners = missing_data %>% ungroup() %>%
dplyr::filter(is.na(r_event))
unfiltered_stats = unfiltered_listeners %>% ungroup() %>%
group_by(topic, side) %>%
summarise(unique_listeners = n(),
proportion_missing = round(mean(proportion),
3))
unfiltered_stats
# **How many unique listeners were included in the
# unfiltered dataset**?
length(unique(unfiltered_listeners$listener))
# On average, **how many listeners were included on each
# segment in the unfiltered dataset**?
round(mean(unfiltered_stats$unique_listeners),2)
# On average, **what proportion of the gaze data was
# missing across the unfiltered dataset**?
round(mean(unfiltered_stats$proportion_missing),3)
# ### Filtered listeners and missing data by segment
# Considering just the listeners that we consider to have
# usable data for each segment (i.e., no more than 30%
# missing gaze data in the segment):
# 1. how many listeners do we have overall,
# 1. how many listeners do we have per segment,
# 1. and what proportion of data are missing per segment?
filtered_listeners = missing_data %>% ungroup() %>%
dplyr::filter(is.na(r_event) & proportion<=.3)
filtered_stats = filtered_listeners %>% ungroup() %>%
group_by(topic, side) %>%
summarise(unique_listeners = n(),
proportion_missing = round(mean(proportion),
3))
filtered_stats
# **How many unique listeners are in the filtered dataset**?
length(unique(filtered_listeners$listener))
# On average, **how many listeners were included on each
# segment in the filtered dataset**?
round(mean(filtered_stats$unique_listeners),2)
# On average, **what proportion of the gaze data was
# missing in the filtered dataset**?
round(mean(filtered_stats$proportion_missing),3)
# ### Filtered segments and missing data by listener
filtered_segments = filtered_listeners %>% ungroup() %>%
group_by(listener) %>%
summarise(segments = n(),
proportion_missing = mean(proportion))
# On average, **how many segments does each listener
# contribute to the filtered dataset**?
round(mean(filtered_segments$segments),2)
# On average, **what proportion of missing data does each listener
# have for segments included in the filtered dataset**?
round(mean(filtered_segments$proportion_missing),3)
# ### Discarded listeners
# How many listeners were discarded because they were
# **missing required questionnaire or opinion** data?
# create dataframe for missing files
missing_metadata_participants = data.frame()
# load in missing opinion dataframe, if it exists
missing_opinion_filename = file.path(processed_data_path,
'listener-missing_opinions.csv')
if (file.exists(missing_opinion_filename)){
# add reason for missing
missing_opinions = read.table(missing_opinion_filename,
sep=",",
header=TRUE) %>%
mutate(reason = 'missing_opinions')
# append to dataframe
missing_metadata_participants = rbind.data.frame(missing_metadata_participants,
missing_opinions)
}
# load in missing questionnaire dataframe, if it exists
missing_questionnaire_filename = file.path(processed_data_path,
'listener-missing_questionnaire.csv')
if (file.exists(missing_questionnaire_filename)){
# add reason for missing
missing_questionnaire = read.table(missing_questionnaire_filename,
sep=",",
header=TRUE)
# append to dataframe
missing_metadata_participants = rbind.data.frame(missing_metadata_participants,
missing_questionnaire)
}
length(unique(missing_metadata_participants$listener))
# How many listeners were completely discarded from the dataset
# due to having **more than 30% missing data in all trials?**
discarded_listeners = missing_data %>% ungroup() %>%
dplyr::filter(is.na(r_event)) %>%
dplyr::filter(!(listener %in% unique(filtered_segments$listener)))
length(unique(discarded_listeners$listener))
# How many listeners were discarded due to **other equipment error**?
discarded_participants = c(unique(missing_metadata_participants$listener),
unique(discarded_listeners$listener))
recruited_participants = unique(questions_or_gaze_participants)
included_participants = unique(filtered_listeners$listener)
length(recruited_participants) -
length(discarded_participants) -
length(included_participants)
# ## Derive listener demographic statistics
# **Note**: All numeric categories were derived alphabetically.
# read in dataset
demographics_data = plotting_real_df %>% ungroup() %>%
# keep only the columns we need
dplyr::select(one_of(crqa_questionnaire_columns),
-topic_and_side,
-agree) %>%
# only one line per participant
distinct()
# ### What is the self-reported **gender distribution**?
# *Note: Participants were asked about gender but
# were provided with sex categories.*
gender_data = demographics_data %>% ungroup() %>%
group_by(gender) %>%
summarise(gender_counts = n(),
gender_proportion = round((n()/nrow(demographics_data)),
3))
# female = lower, male = higher
gender_data
# ### What is the self-reported **mean age**?
age = demographics_data %>%
dplyr::select(age) %>%
mutate(age = gsub("[^0-9.]", "", age)) %>%
dplyr::filter(age != '') %>%
mutate(age = as.numeric(age)) %>%
.$age
round(mean(age),2)
# ### What is the self-reported **native language** distribution?
native_lang_data = demographics_data %>% ungroup() %>%
# convert all to lowercase
mutate(native_lang = tolower(native_lang)) %>%
mutate(native_lang = trimws(native_lang)) %>%
# get counts
group_by(native_lang) %>%
summarise(native_lang_counts = n(),
native_lang_proportion = round(n()/nrow(demographics_data),
3)) %>%
ungroup() %>%
# arrange in descending order
dplyr::arrange(native_lang)
# English = lowest, Spanish = middle, other = highest
native_lang_data
# ## Derive listener opinion statistics
# read in dataset
opinion_data = plotting_real_df %>% ungroup() %>%
# keep only the columns we need
dplyr::select(one_of(crqa_questionnaire_columns),
topic_and_side,
-gender,
-native_lang,
-age) %>%
# get one line per topic and side
distinct()
# ### What is the **distribution of self-reported agreement** of listeners with the speaker, regardless of topic and side?
agree_bias = opinion_data %>% ungroup() %>%
group_by(agree) %>%
summarise(agree_counts = n(),
agree_proportions = round(n()/nrow(opinion_data),
3))
# agree = .5, disagree = -.5
agree_bias
# ***
# # Plotting
# ## Distribution of opinion congruence by topic class
# prepare for histogram
agreement_plots = plotting_df %>% ungroup() %>%
# get just one line per listener per topic
dplyr::select(speaker, listener, topic_and_side, agree, viewtype) %>%
group_by(speaker, listener, topic_and_side, agree, viewtype) %>%
distinct() %>%
ungroup() %>%
# group for counts
group_by(agree, viewtype) %>%
summarise(counts = n()) %>%
ungroup() %>%
# convert counts to proportion
mutate(proportion = counts/sum(counts))
# create the plot
agreement_distribution = ggplot(data=agreement_plots,
aes(x=as.factor(agree),
y=proportion,
fill=as.factor(viewtype)),
labeller = agree_labeller) +
geom_bar(stat="identity",
position='dodge')+
scale_fill_manual(name="Opinion congruence",
breaks=c(.5,-.5),
labels=c("Agree", "Disagree"),
values=c("#b2182b", "#67a9cf")) +
scale_x_discrete(labels=c("Mixed-view","Dominant-view")) +
theme(legend.position='bottom') +
xlab("Topic class") +
ylab("Proportion") +
ggtitle("Listener agreement by topic class\nand opinion congruence")
# save a high-resolution version
ggsave(plot = agreement_distribution,
height = 4,
width = 4,
filename = '../figures/gca-agreement_viewtype_distribution.jpg')
# save a smaller version of the plot
ggsave(plot = agreement_distribution,
height = 4,
width = 4,
dpi=100,
filename = '../figures/gca-agreement_viewtype_distribution-inline.jpg')
# 
# The plot above breaks down listeners' self-reported
# agreement (blue) or disagreement (red) with each segment, along
# with whether that segment was part of a mixed- (right)
# or dominant-view (left) topic.
# ## Individual segment plot for all listeners
# What do all listeners' individual segments look like?
# plot all individual listeners
all_listener_plot = ggplot(plotting_real_df,
aes(x=t,
y=r,
group=topic_and_side,
color=as.factor(agree))) +
facet_wrap(~listener) +
geom_path() +
scale_color_manual(name="Opinion congruence",
labels=c("Agree", "Disagree"),
values=c("#67a9cf", "#b2182b")) +
xlab("Lag (in 10Hz samples)") +
ylab("Recurrence (rec)") +
ggtitle("Gaze coordination between listeners and speakers
by lag and opinion congruence") +
theme(strip.text.x = element_blank(),
strip.background = element_rect(colour="white",
fill="white"),
legend.position='bottom')
# save a high-resolution version
ggsave(plot = all_listener_plot,
height = 7,
width = 7,
filename = '../figures/gca-individual_trials.jpg')
# save an inline display version
ggsave(plot = all_listener_plot,
height = 7,
width = 7,
dpi=100,
filename = '../figures/gca-individual_trials-inline.jpg')
# 
# Each panel above presents all usable data from a single
# listener. Each line is a single diagonal recurrence profile
# between the listener and the speaker for a single audio
# segment. Lines are color-coded according to whether the
# listener rated having agreed with (blue) or disagreed with
# (red) the speaker after hearing their segment.
# ## Gaze coordination by opinion congruence and lag
# How does speaker-listener gaze coordination look when
# considering listeners' disagreement versus agreement?
# plot recurrence by agreement only
r_by_agreement = ggplot(data=plotting_real_df,
aes(x=t,
y=r,
color=agree,
group=agree)) +
# plot mean DRP curves
geom_smooth(method="loess",
se=TRUE) +
# add in lines for raw means
geom_line(data = plotting_real_df %>%
group_by(t, agree) %>%
summarise(r = mean(r)),
aes(x = t,
y = r,
color = agree,
group = agree)) +
# set color by agreement
scale_color_manual(name="Opinion congruence",
breaks=c("1","0"),
labels=c("Agree","Disagree"),
values=c("#67a9cf","#b2182b")) +
# set labels
xlab("Lag (in 10Hz samples)") +
ylab("Recurrence (rec)") +
ggtitle("Gaze coordination by lag
and opinion congruence") +
theme(strip.background = element_rect(colour="white",
fill="white"),
legend.position='bottom')
# save a high-resolution version
ggsave(plot = r_by_agreement,
height = 5,
width = 4,
filename = '../figures/gca-main_interaction_plot.jpg')
# save an inline display version
ggsave(plot = r_by_agreement,
height = 5,
width = 4,
dpi=100,
filename = '../figures/gca-main_interaction_plot-inline.jpg')
# 
# The figure above shows the aggregated diagonal
# recurrence profile (DRP) for the planned analysis,
# predicting recurrence (*rec*) with opinion
# congruence (listener agreement [blue] versus disagreement
# [red] with the speaker in each segment) and lag.
# ## Gaze coordination by opinion congruence, topic class, and lag
# How does gaze coordination look when considering opinion
# congruence (agreement versus disagreement) and topic
# class (dominant- versus mixed-view topics)?
# plot recurrence by agreement and viewtype
r_by_agreement_and_viewtype = ggplot(data=plotting_real_df %>%
mutate(agree = ifelse(agree==-.5,
"Disagree",
"Agree")),
aes(x=t,
y=r,
color=viewtype,
group=viewtype)) +
# add in lines for raw means
geom_line(data = plotting_real_df %>%
mutate(agree = ifelse(agree==-.5,
"Disagree",
"Agree")) %>%
group_by(t, agree, viewtype) %>%
summarise(r = mean(r)),
aes(x = t,
y = r,
color = viewtype,
group = viewtype)) +
# separate by agreement
facet_wrap(~ agree) +
# set color by dominant- versus mixed-view
scale_color_manual(name="Topic class",
breaks=c(-.5, .5),
labels=c("Dominant-view", "Mixed-view"),
values=c("#d95f02","#7570b3")) +
# set labels
xlab("Lag (in 10Hz samples)") +
ylab("Recurrence (rec)") +
ggtitle("Gaze coordination by lag,
opinion congruence, and topic class") +
theme(strip.background = element_rect(colour="white",
fill="white"),
legend.position='bottom') +
# plot mean DRP curves
geom_smooth(method="loess",
se=TRUE)
# save a high-resolution version
ggsave(plot = r_by_agreement_and_viewtype,
height = 5,
width = 4,
filename = '../figures/gca-exploratory_interaction_plot.jpg')
# save an inline display version
ggsave(plot = r_by_agreement_and_viewtype,
height = 5,
width = 4,
dpi=100,
filename = '../figures/gca-exploratory_interaction_plot-inline.jpg')
# 
# The figure above shows the aggregated diagonal
# recurrence profile (DRP) for the exploratory analysis,
# predicting recurrence (*rec*) with opinion
# congruence (listener agreement [left panel] versus
# disagreement [right panel] with the speaker in each segment),
# topic class (dominant-view [orange] or mixed-view [blue]), and lag.
# ***
# # Data analysis
# ## Plot-level analysis
# +
# create subsets for plot-level metric analysis
one_liner = plotting_real_df %>%
dplyr::filter(t==0) %>%
# shift lag to account for window
mutate(maxlag = maxlag-win_size-1)
# separate for agreement and disagreement
one_liner_agree = one_liner %>%
dplyr::filter(agree==.5)
one_liner_disagree = one_liner %>%
dplyr::filter(agree==-.5)
# -
# ### Maximum recurrence
# Is maximum recurrence different from 0? Yes.
t.test(one_liner$maxrec)
# Are the maximum recurrence values for both
# agreement and disagreement different from 0?
# Yes for each.
t.test(one_liner_agree$maxrec)
t.test(one_liner_disagree$maxrec)
# Does maximum recurrence differ between
# agreement and disagreement? No.
lm.maxrec.agree = lmer(maxrec ~ agree +
(1 + agree | listener) +
(1 + agree | topic_and_side),
data = one_liner)
pander_lme(lm.maxrec.agree)
# What is the mean maximum recurrence for agreement
# and for disagreement?
print(paste0('Mean maximum recurrence for agreement: ',
round(mean(one_liner_agree$maxrec), 3)))
print(paste0('Mean maximum recurrence for disagreement: ',
round(mean(one_liner_disagree$maxrec), 3)))
# ### Maximum lag
# Is maxlag different from 0 overall? No.
t.test(one_liner$maxlag)
# Is maximum lag for each agreement group different
# from 0? No for each.
t.test(one_liner_agree$maxlag)
t.test(one_liner_disagree$maxlag)
# Does maximum lag differ by agreement or disagreement?
# No.
lm.maxlag.agree = lmer(maxlag ~ agree +
(1 + agree | listener) +
(1 | topic_and_side),
data = one_liner)
pander_lme(lm.maxlag.agree)
# What is the mean maximum lag for agreement and
# for disagreement?
print(paste0('Mean maximum lag for agreement: ',
round(mean(one_liner_agree$maxlag), 2),
' samples (',
round(mean(one_liner_agree$maxlag)/10, 2),
' sec)'))
print(paste0('Mean maximum lag for disagreement: ',
round(mean(one_liner_disagree$maxlag), 2),
' samples (',
round(mean(one_liner_disagree$maxlag)/10, 2),
' sec)'))
# ## Planned analyses
# Here, we'll test the relations among recurrence (`r`),
# linear lag (`ot1`), quadratic lag (`ot2`), and opinion congruence
# (`agree`) that we had anticipated testing at the outset of the study.
# ### Two-way interaction model
# First, we'll model the data using all main terms and up
# to two-way interaction terms.
planned_model_twowayint = lmer(r ~ ot1 + ot2 + agree +
ot1:agree + ot2:agree + ot1:ot2 +
(1 + ot1 + ot2 + agree | listener) +
(1 + ot1 + ot2 + agree | topic_and_side),
data=analysis_real_df)
pander_lme(planned_model_twowayint)
# ### Full interaction model
# Next, let's include the main terms and all possible
# interaction terms.
planned_model_allint = lmer(r ~ ot1 * ot2 * agree +
(1 + ot1 + ot2 + agree | listener) +
(1 + ot1 + ot2 + agree | topic_and_side),
data=analysis_real_df)
pander_lme(planned_model_allint)
# ### Model comparison
# Which model better accounts for the data?
anova(planned_model_twowayint,
planned_model_allint)
# The model with all possible interaction terms trends toward
# but does not significantly perform better than the model
# with only two-way interaction terms.
# ## Exploratory analyses
# Next, we'll look into the relations among recurrence (`r`),
# linear lag (`ot1`), quadratic lag (`ot2`), opinion congruence
# (`agree`), and topic class (`viewtype`) -- a new variable
# that we had not expected to explore at the outset of the study.
# ### Two-way interaction model
# First, we'll model the data using all main terms and up
# to two-way interaction terms.
exploratory_model_twowayint = lmer(r ~ ot1 + ot2 + agree + viewtype +
ot1:agree + ot2:agree +
ot1:viewtype + ot2:viewtype +
agree:viewtype + ot1:ot2 +
(1 + ot1 + ot2 + agree + viewtype | listener) +
(1 + ot1 + ot2 + agree + viewtype | topic_and_side),
data=analysis_real_df)
pander_lme(exploratory_model_twowayint)
# ### Full interaction model
# Next, let's include the main terms and all possible interaction terms.
exploratory_model_allint = lmer(r ~ ot1 * ot2 * agree * viewtype +
(1 + ot1 + ot2 + agree + viewtype | listener) +
(1 + ot1 + ot2 + agree + viewtype | topic_and_side),
data=analysis_real_df)
pander_lme(exploratory_model_allint)
# ### Model comparison
# Which model better accounts for the data?
anova(exploratory_model_twowayint,
exploratory_model_allint)
# Here, the full model accounts for the data
# significantly better than the model that
# includes only limited interaction terms.
# ### Post-hoc analyses
# Given the possible differences by opinion congruence and
# topic class, let's dig a bit more deeply into the exploratory
# model. What's actually driving the significant effects that
# we see?
# separate dataframes by type
topic_dfs = split(analysis_real_df,
analysis_real_df$viewtype)
dominant_df = topic_dfs$`-0.5`
mixed_df = topic_dfs$`0.5`
# **Dominant-view model** (`viewtype = -.5`)
exploratory_model_dominant = lmer(r ~ ot1 * ot2 * agree +
(1 + ot1 + ot2 + agree | listener) +
(1 + ot1 + ot2 + agree | topic_and_side),
data=dominant_df)
pander_lme(exploratory_model_dominant)
# We find no significant effects for dominant-view topics based on
# listener agreement or disagreement.
# **Mixed-view model** (`viewtype = .5`)
exploratory_model_mixed = lmer(r ~ ot1 * ot2 * agree +
(1 + ot1 + ot2 + agree | listener) +
(1 + ot1 + ot2 + agree | topic_and_side),
data=mixed_df)
pander_lme(exploratory_model_mixed)
# The mixed-view topics appear to be doing the majority of the heavy
# lifting with these data: All effects here are significant.
# ## Baseline comparisons
# Let's add a term to account for real (`data=.5`) versus baseline
# data (`data=-.5`) to the planned and exploratory models reported
# above. In each, we'll again compare models that include only
# up to two-way interactions with those that include all
# possible interaction terms to see which better account for the data.
# ### Planned analysis
# #### Two-way interaction model
planned_model_baseline_twowayint = lmer(r ~ ot1 + ot2 + agree + data +
ot1:agree + ot2:agree +
ot1:data + ot2:data +
agree:data + ot1:ot2 +
(1 + agree | listener) +
(1 | topic_and_side),
data=analysis_joint_df)
pander_lme(planned_model_baseline_twowayint)
# #### Full interaction model
planned_model_baseline_allint = lmer(r ~ ot1 * ot2 * agree * data +
(1 + agree | listener) +
(1 | topic_and_side),
data=analysis_joint_df)
pander_lme(planned_model_baseline_allint)
# #### Model comparison
anova(planned_model_baseline_twowayint,
planned_model_baseline_allint)
# We're significantly better able to maximize log-likelihood
# of the model by including all possible interaction terms,
# rather than just including two-way interactions.
# ### Exploratory analysis
# #### Two-way interaction model
exploratory_model_baseline_twowayint = lmer(r ~ ot1 + ot2 + agree + viewtype + data +
ot1:agree + ot2:agree +
ot1:viewtype + ot2:viewtype +
ot1:ot2 + agree:viewtype +
ot1:data + ot2:data +
viewtype:data + agree:data +
(1 + data | listener) +
(1 + data | topic_and_side),
data=analysis_joint_df)
pander_lme(exploratory_model_baseline_twowayint)
# #### Full interaction model
exploratory_model_baseline_allint = lmer(r ~ ot1 * ot2 * agree * viewtype * data +
(1 + data | listener) +
(1 + data | topic_and_side),
data=analysis_joint_df)
pander_lme(exploratory_model_baseline_allint)
# #### Model comparison
anova(exploratory_model_baseline_twowayint,
exploratory_model_baseline_allint)
# Again, the model that includes all possible interaction
# terms is significantly better able to capture the data
# than the model that includes only two-way interaction terms.
# # Discussion
# Gaze patterns -- that is, the ways in which people look at
# things in their environment over time -- can reveal essential
# information about attention and understanding during conversation.
# When analyzing gaze patterns that are only tied to perception and
# information processing and *not* social signaling (i.e., looking
# at images while listening to a monologue without the speaker in
# the room), previous research has shown listeners who have more similar
# (or more *coordinated*) gaze patterns with speakers better understand
# the monologues.
# This experiment explores the degree to which the coupling of
# information-processing gaze can be affected by the broader
# context of the interaction. We designed the experiment to
# test specifically listeners' agreement or disagreement with
# the speaker, but we unexpectedly found that the data could
# shed light on broader social processes around sociopolitical
# issues -- namely, whether the issue has been largely settled
# in the social setting (i.e., "dominant-view" topics) or has
# two opposing views of similar popularity in that social
# setting (i.e., "mixed-view" topics).
# Contrary to our hypotheses, we found that gaze coupling *increased*
# when listeners disagreed with the speakers. When performing
# exploratory analyses of the unexpected social setting variable (i.e.,
# whether the topic was dominant-view or mixed-view), we found
# another intriguing effect: Not only did listeners in all mixed-view
# (or controversial) topics have greater gaze coordination with speakers,
# but listeners who *disagreed* with the speakers about controversial
# topics had the *highest* amount of gaze coordination -- *even though the
# listeners could not see what the speakers were seeing*. These coordination
# dynamics suggest that listeners may be trying to attend more to speakers
# who are providing controversial opinions or opinions that are different
# from their own; given that previous work has causally linked gaze
# coordination with understanding (Richardson & Dale, 2005), this may suggest
# that listeners are being more active in their attempts to take these
# speakers' perspectives and "see the other side."
| analyses/oag-data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm import tqdm
import os
import shutil
import math
import pydicom
import cv2
import random
random.seed(10)
cmmd_manifest_directory = "/media/craig/Larry/python/manifest-1616439774456/"
parent_dir = (cmmd_manifest_directory[:-23]) #e.g. "/media/craig/Larry/python"
# -
df = pd.read_csv("./CMMD_metadata_subset.csv")
df
# +
#Create dataframe which excludes all non benign classifications
benign_df = df.loc[df['classification'] == 'Benign']
#Create dataframe which excludes all non malignant classifications
malignant_df = df.loc[df['classification'] == 'Malignant']
cmmd_dir = parent_dir
benign_loc = cmmd_dir+"cmmd_data/benign/"
malignant_loc = cmmd_dir+"cmmd_data/malignant/"
#create directory if doesnt exist
Path(benign_loc).mkdir(parents=True, exist_ok=True)
#create directory if doesnt exist
Path(malignant_loc).mkdir(parents=True, exist_ok=True)
matches = ["1-3.dcm", "1-4.dcm"]
def create_benign_malignant(df, dest_folder):
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
src = cmmd_dir+"manifest-1616439774456/"+row['file_location']
basename = os.path.basename(src) #<- basename = file name + extension
if any(x in basename for x in matches): # Check for a 3rd or 4th in path
#append "_b" to subject ID to show this is a second case for the same patient
dest = dest_folder+row['subject_id']+"_b/"+basename
Path(dest_folder+row['subject_id']+"_b/").mkdir(parents=True, exist_ok=True)
else:
dest = dest_folder+row['subject_id']+"/"+basename
Path(dest_folder+row['subject_id']+"/").mkdir(parents=True, exist_ok=True)
shutil.copyfile(src, dest)
print("Building benign")
create_benign_malignant(benign_df, benign_loc)
print("Building malignant")
create_benign_malignant(malignant_df, malignant_loc)
# -
#method to move 20% of a directory into another location
#splits a dataset into train/test and/or train/validate
def create_test_dataset(data_location, destination):
count = (len(os.listdir(data_location))/5)
count = (math.ceil(count))
test_set = random.sample(os.listdir(data_location), count)
for i in tqdm(range(len(test_set))):
shutil.move(data_location+test_set[i], destination)
# +
benign_testset_location = cmmd_dir+"TEST/benign/"
malignant_testset_location = cmmd_dir+"TEST/malignant/"
#create directory if doesnt exist
Path(benign_testset_location).mkdir(parents=True, exist_ok=True)
#create directory if doesnt exist
Path(malignant_testset_location).mkdir(parents=True, exist_ok=True)
print("Creating 20% test split for benign set...")
create_test_dataset(benign_loc, benign_testset_location)
print("Creating 20% test split for malignant set...")
create_test_dataset(malignant_loc, malignant_testset_location)
# +
benign_valset_location = cmmd_dir+"VAL/benign/"
malignant_valset_location = cmmd_dir+"VAL/malignant/"
#create directory if doesnt exist
Path(benign_valset_location).mkdir(parents=True, exist_ok=True)
#create directory if doesnt exist
Path(malignant_valset_location).mkdir(parents=True, exist_ok=True)
create_test_dataset(benign_loc, benign_valset_location)
create_test_dataset(malignant_loc, malignant_valset_location)
# -
shutil.move(cmmd_dir+"cmmd_data", cmmd_dir+"TRAIN")
# +
def move_dcm_from_subdir(source, destination):
Path(destination).mkdir(parents=True, exist_ok=True)
files_list = os.listdir(source)
j=1
for files in files_list:
files_list2 = os.listdir(source+files)
for x in files_list2:
shutil.move(source+files+"/"+x, destination+str(j)+".dcm")
j+=1
move_dcm_from_subdir(source=cmmd_dir+"TRAIN/benign/",
destination = cmmd_dir+"cmmd_data/TRAIN/benign/")
move_dcm_from_subdir(source=cmmd_dir+"TRAIN/malignant/",
destination = cmmd_dir+"cmmd_data/TRAIN/malignant/")
move_dcm_from_subdir(source=cmmd_dir+"VAL/benign/",
destination = cmmd_dir+"cmmd_data/VAL/benign/")
move_dcm_from_subdir(source=cmmd_dir+"VAL/malignant/",
destination = cmmd_dir+"cmmd_data/VAL/malignant/")
move_dcm_from_subdir(source=cmmd_dir+"TEST/benign/",
destination = cmmd_dir+"cmmd_data/TEST/benign/")
move_dcm_from_subdir(source=cmmd_dir+"TEST/malignant/",
destination = cmmd_dir+"cmmd_data/TEST/malignant/")
# +
def rm_dir(directiory):
## Try to remove tree
try:
shutil.rmtree(directiory)
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
rm_dir(cmmd_dir+"TRAIN")
rm_dir(cmmd_dir+"TEST")
rm_dir(cmmd_dir+"VAL")
# +
def convert_dicom_to_png(input_dir, output_dir):
if not os.path.exists(output_dir): #if file doesnt exist, create it
Path(output_dir).mkdir(parents=True, exist_ok=True)
classification_list = [ classification for classification in os.listdir(input_dir)]
for classification in classification_list:
print("Working on "+classification+" for \n"+output_dir)
dicom_list = [ dcm_image for dcm_image in os.listdir(input_dir+classification)]
for dcm_image in tqdm(dicom_list):
ds = pydicom.read_file(input_dir+classification+"/"+dcm_image) # read dicom image
img = ds.pixel_array # get image array
if not os.path.exists(output_dir + classification): #if file doesnt exist, create it
Path(output_dir + classification).mkdir(parents=True, exist_ok=True)
cv2.imwrite(output_dir + classification + "/img_" +dcm_image.replace('.dcm','.png'),img) # write png image
convert_dicom_to_png(input_dir = cmmd_dir + 'cmmd_data/TRAIN/',
output_dir = cmmd_dir + 'cmmd_data/PNG/TRAIN/')
convert_dicom_to_png(input_dir = cmmd_dir + 'cmmd_data/TEST/',
output_dir = cmmd_dir + 'cmmd_data/PNG/TEST/')
convert_dicom_to_png(input_dir = cmmd_dir + 'cmmd_data/VAL/',
output_dir = cmmd_dir + 'cmmd_data/PNG/VAL/')
#Tidy up directory
rm_dir(cmmd_dir+"cmmd_data/TRAIN/")
rm_dir(cmmd_dir+"cmmd_data/TEST/")
rm_dir(cmmd_dir+"cmmd_data/VAL/")
rm_dir(cmmd_manifest_directory)
print("Data converted to PNG and filesystem is tidied.")
| 1_stratification_data_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Final: Classifying news stories as true or fake
#
# #### <NAME>, <NAME>, <NAME>
#
# With the rise of social media, everyone has an equal opportunity to create, share and distribute articles of their choosing. Some of these articles are written without sources or evidence for the claims they make. These "fake news" articles may have patterns of text that can make them identifiable as real or fake.
#
# Our goal is to classify whether a piece is fake or not _based on its substantive text content only_. The dataset is carefully prepared to remove any 'hints' to the model that detract from this goal (e.g., removing datelines).
#
# The data can be downloaded from Kaggle at https://www.kaggle.com/clmentbisaillon/fake-and-real-news-dataset. It is not present in the Github repo because of its size.
#
# We make heavy use of tidyverse R functionality. Especially useful here is the `tidytext` package, which drastically simplifies the calculation of TF-IDF matrices; and the `caret` library for cross-validated modeling. Load necessary libraries:
library(caret)
library(dplyr)
library(ggplot2)
library(lubridate) # easy dates
library(stringr) # easy regex
library(textstem) # stemming, lemmatization
library(tidytext) # easy TF-IDF
library(tidyverse)
library(janitor)
library(topicmodels)
library(tm)
# ## Load the data
#
# The data comes seperated in two CSVs of about equal size, one for true news stories, and other for fake. We load them, assign our dependent variable $y = \{fake, true\}$, and combine them.
# +
fake <- read_csv('~/CUNY/DATA 620/final/Fake.csv') %>%
mutate(y='fake')
real <- read_csv('~/CUNY/DATA 620/final/True.csv') %>%
mutate(y='real')
# rename date to dt to keep R happy
df <- rbind(fake, real) %>%
rename(dt = 'date') %>%
mutate(dt = parse_date_time(dt, '%B %d, $Y'))
head(df, 1)
# -
# The dataset gives us the title of the text, the text itself, a classification of the news story, and the date of publishing.
#
# ## Exploratory analysis
#
# Before modeling, we get to know our dataset more thoroughly. First, inspect the distribution of the dependent variable, which shows a balanced dataset.
table(df$y)
table(df$subject)
# The table below shows the distribution of subject these categories between fake and real news. We see that the fake news is labeled as either `politicsNews` or as `worldnews`, while the real news is labeled as everything else but those two. This suggests _subject should not be used as a model feature_.
prop.table(table(df$y, df$subject), 1)
#
# The `text` column will be ommitted in the NLP analysis because it is simply too much data for be easily computer on the present machines. However, we can still try to create simple features using regular expressions from the text. We examine a few of these below:
# Presence of Twitter handle
df$twitter_handle <- str_detect(df$text, '(?<!\\w)@[\\w+]{1,15}\\b')
prop.table(table(df$y, df$twitter_handle), 1)
# The above shows that their is an association between the presence of a twitter handle and the reality of a news story.
#
# Below shows a slighly weaker association with profanity (using a very crude and very incomplete regex):
# +
PROFANITY <- 'fuck|shit|bastard|bitch|whore|goddamn'
df$profanity <- str_detect( tolower(df$text), PROFANITY)
prop.table(table(df$y, df$profanity), 1)
# -
# Finally, we can count the number of all-caps words in the text. Interestingly, there's no real difference in the distributions. Below we will see that this is due to the presence of acronyms in real news stories, as well as dateline in all-caps, e.g., WASHINGTON (Reuters). A better solution would be to count the length (in words) of all-caps phrases which do not appear often in real news stories.
# +
ALL_CAPS <- '(\\b[A-Z][A-Z]+|\\b[A-Z]\\b)'
df$all_caps <- str_count(df$text, ALL_CAPS)
ggplot(df %>% filter(all_caps > 5), aes(x=y, y=all_caps)) + geom_boxplot()
# -
# Finally, examine the distribution of fake and real news stories over time. There are no fake stories recorded until just after 2016, and following the spring of 2017 the real stories shoot up in quantity relative to the fake stories.
#
# This could suggest the optimal sample should be restricted to Jan. 2016 through June 2017, which would be about 26 thousand samples.
df %>%
group_by(dt, y) %>%
summarise(n=n_distinct(title)) %>%
ggplot(aes(x=dt, y=n, colour=y)) + geom_line()
# We will perform the same operations as above on the title:
df$title_twitter_handle <- str_detect(df$title, '(?<!\\w)@[\\w+]{1,15}\\b')
df$title_all_caps <- str_count(df$title, ALL_CAPS)
df$title_profanity <- str_detect( tolower(df$title), PROFANITY)
# ## Data cleaning
#
# Using the story titles as IDs, there are duplicated stories. These are removed by keeping only the latest version:
length(df$title); length(unique(df$title))
# +
df <- df %>%
select(y, title, dt) %>%
arrange(title, desc(dt)) %>%
group_by(title) %>%
slice(which.max(dt)) %>%
filter(dt > date("2016-01-01") && dt < date("2017-07-01")) %>%
ungroup()
length(df$title); length(unique(df$title))
# -
# All of the real news stories are from Reuters, and all Reuters news stories begin with a dateline, e.g., WASHINGTON (Rueters). These are removed by a very simple Regex that matches the beginning of the story up to the first hyphen. It appears to be effective enough. Additionally, we remove any other mentions of 'Reuters' just to be sure, and numbers:
df <- df %>%
mutate(title = str_remove(title, '^[^\\-]*\\-\\s+'),
title = str_remove(tolower(title), 'reuters'),
title = str_remove(title, '[0-9]+'),
title = str_remove(title, '[:punct:]+'),
title = str_trim(title, side = "both"),
title = str_squish(title))
# Often, at this point in NLP process, we normally remove a list of stop words. However, it may be that there are differences in stop words between fake and real news. We thus retain them.
#
# ### Test/Training Split
#
# Now we're going to separate our test and training data. By keeping some data out of our model will ensure that we have data the model hasn't seen to get a sense of how our model will work on new data.
indexed <- createDataPartition(df$y, p=0.5, list=FALSE, times=1)
df_train <- df[indexed,]
df_test <- df[-indexed,]
# ### Tokenize
#
# However, we will be conducting other stemming, requiring tokenization. Recall that we are concerned only with the title for now due to data size limitations. We will include bigrams and trigrams as features.
# +
tokens <- df_train %>%
select(y, title) %>%
unique() %>%
unnest_tokens(word, title, drop=FALSE)
tokens2 <- df_train %>%
select(y, title) %>%
unique() %>%
mutate(title2 = title) %>%
unnest_tokens(output = word, input = title2, token = "ngrams", n = 2)
tokens3 <- df_train %>%
select(y, title) %>%
unique() %>%
mutate(title2 = title) %>%
unnest_tokens(output = word, input = title2, token = "ngrams", n = 3)
tokens <- rbind(tokens, tokens2, tokens3)
head(tokens)
# -
# ### Stemming
#
# We will, of course, conduct stemming on the title, using the `textstem` library, which uses a stemmer based on the `libstemmer` C library developed in the early 2000s.
# +
stems <- tokens %>%
mutate(stemmed = stem_words(word))
head(stems)
# -
# ### Lemmatize
#
# Next, we clean up the tokens further by lemmatization, again using the `textstem` library:
# +
lemmas <- stems %>%
mutate(lemma = lemmatize_words(stemmed))
head(lemmas)
# -
# ## Feature engineering
#
# Now we calculate TF-IDF. Those scores and all other features are incorporated to a data frame called `X` for later machine learning.
token_count <- lemmas %>%
count(title, y, lemma, sort=TRUE) %>%
ungroup()
head(token_count)
total_words <- token_count %>%
group_by(title, y) %>%
summarise(total = sum(n), .groups='keep')
head(total_words)
# +
token_count <- token_count %>%
bind_tf_idf(lemma, title, n)
# Remove tokens with less than 10 instances
tc <- token_count %>%
group_by(lemma) %>%
summarise(n=sum(n), .groups='keep') %>%
filter(n > 10)
token_count <- token_count %>%
filter(lemma %in% tc$lemma)
head(token_count)
# -
# Make into a wide, tidy data frame for machine learning
X <- token_count %>%
select(title, y, lemma, tf_idf) %>%
filter(lemma != 'title',
lemma != 'y') %>% # remove because I'm already using 'title' and 'y'
unique() %>%
tidyr::spread(lemma, tf_idf) %>%
janitor::clean_names() %>% # VERY HANDY FUNCTION!
replace(is.na(.), 0) %>%
select(-contains('_2'))
dim(X)
head(X[, c(1,2,601, 709, 804, 945)])
prop.table(table(X$y))
# Our final TF-IDF matrix has almost 11,000 articles (rows), 2607 token TF-IDF scores (columns), and a reasonably balanced distribution of fake and real news.
# ## Modeling
#
# With almost 2,6000 potential variable, we would like to drop those to a size our computer can handle. First, remove low variance using the ratio of a variable's unique values to its sample size. By default, `nearZeroVar` suggests all the variables have sparse variancel; this is fixed by modifying the default value for `uniqueCut`:
# Remove low variance variables
low_variance <- nearZeroVar(X, uniqueCut=.12)
print(low_variance)
length(low_variance)
X <- X[,-low_variance]
ncol(X)
# ### Random Forest
#
# **TEMPLATE FOR MODELING**
#
# Using the `ranger` package, a faster and more modern implementation of random forests
#
# Maximize for ROC:
# +
ctrl <- trainControl(method='repeatedcv',
number=2, # number of folds for each run: change this for final run
repeats=1, # number of times to repeat CV: change this for final run
classProbs=TRUE,
savePredictions=TRUE,
summaryFunction = twoClassSummary)
# good guesses for mtry are sqrt(p) and log2(p)
tunegrid <- expand.grid(mtry=c(9, 20),
splitrule=c('gini'),
min.node.size=c(1, 5))
set.seed(1804)
m0 <- train(y ~ .,
data=X,
tuneGrid=tunegrid,
method='ranger',
num.trees=1000,
importance='impurity',
trControl=ctrl,
metric='ROC')
print(m0)
# -
print(m0)
# +
# shows the performance on each repetition of the modeling proess
# can get mean performance, std dev of each performance, or plot
# a histogram, giving complete overview of performance on multiple
# slices and dices of the training dataset
head(m0$resample)
# Averaged cross-validation scores, most representative of performance
mean(m0$resample$ROC); mean(m0$resample$Sens); mean(m0$resample$Spec)
# -
varImp(m0)
# Our most important variable is 'video', a tag that Reuters uses in all articles with a video that our fake news outlets didn't. This might not be effective for reproducability but we will see in the accuracy measures. Followed by 'u' and 'trump'. The spelling of 'u' might give fake news outlets away but for Trump, the number of times the word is used in the title might give away fake news outlets as well. 'To' and 'the' are instersting in how they're used may change between journalists and bots. 'Said' is very interesting because it implies a source that could be very useful in identifiying real news articles.
#
#
# The following graph is an analysis of how likely a word is associated with a real or fake article based on the TF-IDF. We give an example of the word 'and'.
# +
Y <- X %>%
mutate(yy = if_else(y == 'fake', TRUE, FALSE)) # has to be a factor for Hmisc
library(Hmisc)
ggplot(Y, aes(x=and, y=yy )) +
histSpikeg(yy ~ and, lowess=TRUE, data=Y)
# -
# ## Accuracy
#
# First we pre-process our test data.
# +
tokens <- df_train %>%
select(y, title) %>%
unique() %>%
unnest_tokens(word, title, drop=FALSE) %>%
filter(!word %in% stop_words$word)
tokens2 <- df_train %>%
select(y, title) %>%
unique() %>%
mutate(title2 = title) %>%
unnest_tokens(output = bigram, input = title2, token = "ngrams", n = 2) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
unite(word, word1, word2, sep = " ") %>%
filter(!word == "NA NA")
tokens3 <- df_train %>%
select(y, title) %>%
unique() %>%
mutate(title2 = title) %>%
unnest_tokens(output = trigram, input = title2, token = "ngrams", n = 3) %>%
separate(trigram, c("word1", "word2", "word3"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
filter(!word3 %in% stop_words$word) %>%
unite(word, word1, word2, word3, sep = " ") %>%
filter(!word == "NA NA NA")
words <- rbind(tokens, tokens2, tokens3)
# +
token_count <- words %>%
mutate(stemmed = stem_words(word))%>%
mutate(lemma = lemmatize_words(stemmed)) %>%
count(title, y, lemma, sort=TRUE) %>%
ungroup() %>%
group_by(title, y) %>%
summarise(total = sum(n), .groups='keep') %>%
bind_tf_idf(lemma, title, n)
tc <- token_count %>%
group_by(lemma) %>%
summarise(n=sum(n), .groups='keep') %>%
filter(n > 10)
token_count <- token_count %>%
filter(lemma %in% tc$lemma)
# +
df_test_X <- token_count %>%
select(title, y, lemma, tf_idf) %>%
filter(lemma != 'title',
lemma != 'y') %>% # remove because I'm already using 'title' and 'y'
unique() %>%
tidyr::spread(lemma, tf_idf) %>%
janitor::clean_names() %>% # VERY HANDY FUNCTION!
replace(is.na(.), 0) %>%
select(!contains('_2'))
# -
pred_m0 <- predict(m0, df_test_X)
df_test_X$y <- as.factor(df_test_X$y)
pred_m0 <- as.factor(pred_m0)
# Lastly we view our results.
# +
confusionMatrix(pred_m0, df_test_X$y)
# -
# ## Conclusion
#
# Our accuracy, sensitivity and specificity are outstanding. Some considerations for future modeling would be a 50/50 split of fake and real data sources. The model may have overfit with the word video. We would also like to include real news articles for multiple sources as journalists and editors from Reuters may have other tendencies that the model recognized that would not appear in other real outlets.
| final/data620final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# imports
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter('ignore')
data = pd.read_csv('../data/raw/countries.csv')
data.head()
# split data into us and china
us = data[data.country == 'United States']
china = data[data.country == 'China']
# us gdp per cap by year
plt.plot(us.year,us.gdpPerCapita)
plt.title('us gdp per cap by year')
plt.xlabel('year')
plt.ylabel('gdp per cap')
plt.show()
# compare gdp per capita over time for us and china
plt.plot(us.year,us.gdpPerCapita)
plt.plot(china.year,china.gdpPerCapita)
plt.legend(['United State','China'])
plt.title('us and china gdp per cap by year')
plt.xlabel('year')
plt.ylabel('gdp per capita')
plt.show()
# +
# compare change in gdp per capita over time for us and china
# get change in gdp per cap over time by dividing by starting value
china['growthGdpPerCapita'] = china.gdpPerCapita/china.gdpPerCapita[china.year == min(china.year)].iloc[0]
us['growthGdpPerCapita'] = us.gdpPerCapita/us.gdpPerCapita[us.year == min(us.year)].iloc[0]
# plot
plt.plot(us.year,us.growthGdpPerCapita)
plt.plot(china.year,china.growthGdpPerCapita)
plt.legend(['United State','China'])
plt.title('us and china change in gdp per cap over time')
plt.xlabel('year')
plt.ylabel('gdp per capita %')
plt.show()
# +
# compare change in population over time for us and china
# subplot 1 population
plt.subplot(2,1,1)
plt.plot(us.year,us.population)
plt.plot(china.year,china.population)
plt.legend(['United State','China'])
plt.title('us and china population over time')
plt.xlabel('year')
plt.ylabel('population')
# subplot 2 change in population
plt.subplot(2,1,2)
china['growthPopulation'] = china.population/china.population[china.year == min(china.year)].iloc[0]
us['growthPopulation'] = us.population/us.population[us.year == min(us.year)].iloc[0]
plt.plot(us.year,us.growthPopulation)
plt.plot(china.year,china.growthPopulation)
plt.title('us and china % change in population over time')
plt.xlabel('year')
plt.ylabel('change in population %')
plt.tight_layout()
plt.show()
# -
| notebooks/line_timeseries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/russenoire/100daysofalgorithms/blob/master/001_towersofhanoi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="0qZxeiArfwpQ" colab_type="text"
# # /pseudocode
# ```
# pegs:
# source = "A"
# dest = "B"
# spare = "C"
# disc = 4
#
# def solve_hanoi(discs, source, dest, spare):
# if disc is 1:
# move disc from source to dest
# else:
# solve_hanoi(disc - 1, source, dest, spare)
# move disc from source to dest
# solve_hanoi(disc - 1, spare, dest, source)
# ```
# + [markdown] id="N7r2vSN7fwpW" colab_type="text"
# # /algorithm
# + id="ldBHdqg0fwpa" colab_type="code" colab={}
def solve_hanoi(disc, source="left", spare="right", dest="center"):
if disc == 1:
print("move disc %d from %s to %s" %(disc, source, dest))
else:
solve_hanoi(disc - 1, source, dest, spare)
print("move disc %d from %s to %s" %(disc, source, dest))
solve_hanoi(disc - 1, spare, source, dest)
# + [markdown] id="cD3bbWK9fwqZ" colab_type="text"
# # /examples
# + id="1EqROLxUfwqc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="d39bf18a-c688-4122-f04e-9bcfb709d7bf"
solve_hanoi(3)
# + id="K4_EqA38fwsM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fd86f77a-6ebc-4986-928b-8fd41657f338"
solve_hanoi(1)
# + id="hbEHpdjAfwsh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="86cf296c-462b-4cfc-bb09-11db5ffccfbf"
solve_hanoi(4)
# + id="-7gCNPesfwts" colab_type="code" colab={}
| 001_towersofhanoi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ### Homework part I: Prohibited Comment Classification (3 points)
#
# 
#
# __In this notebook__ you will build an algorithm that classifies social media comments into normal or toxic.
# Like in many real-world cases, you only have a small (10^3) dataset of hand-labeled examples to work with. We'll tackle this problem using both classical nlp methods and embedding-based approach.
# +
import pandas as pd
data = pd.read_csv("comments.tsv", sep='\t')
texts = data['comment_text'].values
target = data['should_ban'].values
data[50::200]
# -
from sklearn.model_selection import train_test_split
texts_train, texts_test, y_train, y_test = train_test_split(texts, target, test_size=0.5, random_state=42)
# __Note:__ it is generally a good idea to split data into train/test before anything is done to them.
#
# It guards you against possible data leakage in the preprocessing stage. For example, should you decide to select words present in obscene tweets as features, you should only count those words over the training set. Otherwise your algoritm can cheat evaluation.
# ### Preprocessing and tokenization
#
# Comments contain raw text with punctuation, upper/lowercase letters and even newline symbols.
#
# To simplify all further steps, we'll split text into space-separated tokens using one of nltk tokenizers.
# +
from nltk.tokenize import TweetTokenizer
tokenizer = TweetTokenizer()
preprocess = lambda text: ' '.join(tokenizer.tokenize(text.lower()))
text = 'How to be a grown-up at work: replace "fuck you" with "Ok, great!".'
print("before:", text,)
print("after:", preprocess(text),)
# +
# task: preprocess each comment in train and test
texts_train = <YOUR CODE>
texts_test = <YOUR CODE>
# -
assert texts_train[5] == 'who cares anymore . they attack with impunity .'
assert texts_test[89] == 'hey todds ! quick q ? why are you so gay'
assert len(texts_test) == len(y_test)
# ### Solving it: bag of words
#
# 
#
# One traditional approach to such problem is to use bag of words features:
# 1. build a vocabulary of frequent words (use train data only)
# 2. for each training sample, count the number of times a word occurs in it (for each word in vocabulary).
# 3. consider this count a feature for some classifier
#
# __Note:__ in practice, you can compute such features using sklearn. Please don't do that in the current assignment, though.
# * `from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer`
# +
# task: find up to k most frequent tokens in texts_train,
# sort them by number of occurences (highest first)
k = 10000
<YOUR CODE>
bow_vocabulary = <YOUR CODE>
print('example features:', sorted(bow_vocabulary)[::100])
# -
def text_to_bow(text):
""" convert text string to an array of token counts. Use bow_vocabulary. """
<YOUR CODE>
return np.array(<...>, 'float32')
X_train_bow = np.stack(list(map(text_to_bow, texts_train)))
X_test_bow = np.stack(list(map(text_to_bow, texts_test)))
k_max = len(set(' '.join(texts_train).split()))
assert X_train_bow.shape == (len(texts_train), min(k, k_max))
assert X_test_bow.shape == (len(texts_test), min(k, k_max))
assert np.all(X_train_bow[5:10].sum(-1) == np.array([len(s.split()) for s in texts_train[5:10]]))
assert len(bow_vocabulary) <= min(k, k_max)
assert X_train_bow[6, bow_vocabulary.index('.')] == texts_train[6].split().count('.')
# Machine learning stuff: fit, predict, evaluate. You know the drill.
from sklearn.linear_model import LogisticRegression
bow_model = LogisticRegression().fit(X_train_bow, y_train)
# +
from sklearn.metrics import roc_auc_score, roc_curve
for name, X, y, model in [
('train', X_train_bow, y_train, bow_model),
('test ', X_test_bow, y_test, bow_model)
]:
proba = model.predict_proba(X)[:, 1]
auc = roc_auc_score(y, proba)
plt.plot(*roc_curve(y, proba)[:2], label='%s AUC=%.4f' % (name, auc))
plt.plot([0, 1], [0, 1], '--', color='black',)
plt.legend(fontsize='large')
plt.grid()
# -
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
#
# ### Solving it better: word vectors
#
# Let's try another approach: instead of counting per-word frequencies, we shall map all words to pre-trained word vectors and average over them to get text features.
#
# This should give us two key advantages: (1) we now have 10^2 features instead of 10^4 and (2) our model can generalize to word that are not in training dataset.
#
# We begin with a standard approach with pre-trained word vectors. However, you may also try
# * training embeddings from scratch on relevant (unlabeled) data
# * multiplying word vectors by inverse word frequency in dataset (like tf-idf).
# * concatenating several embeddings
# * call `gensim.downloader.info()['models'].keys()` to get a list of available models
# * clusterizing words by their word-vectors and try bag of cluster_ids
#
# __Note:__ loading pre-trained model may take a while. It's a perfect opportunity to refill your cup of tea/coffee and grab some extra cookies. Or binge-watch some tv series if you're slow on internet connection
# +
import gensim.downloader
embeddings = gensim.downloader.load("fasttext-wiki-news-subwords-300")
# If you're low on RAM or download speed, use "glove-wiki-gigaword-100" instead. Ignore all further asserts.
# +
def vectorize_sum(comment):
"""
implement a function that converts preprocessed comment to a sum of token vectors
"""
embedding_dim = embeddings.wv.vectors.shape[1]
features = np.zeros([embedding_dim], dtype='float32')
<YOUR CODE>
return features
assert np.allclose(
vectorize_sum("who cares anymore . they attack with impunity .")[::70],
np.array([ 0.0108616 , 0.0261663 , 0.13855131, -0.18510573, -0.46380025])
)
# -
X_train_wv = np.stack([vectorize_sum(text) for text in texts_train])
X_test_wv = np.stack([vectorize_sum(text) for text in texts_test])
# +
wv_model = LogisticRegression().fit(X_train_wv, y_train)
for name, X, y, model in [
('bow train', X_train_bow, y_train, bow_model),
('bow test ', X_test_bow, y_test, bow_model),
('vec train', X_train_wv, y_train, wv_model),
('vec test ', X_test_wv, y_test, wv_model)
]:
proba = model.predict_proba(X)[:, 1]
auc = roc_auc_score(y, proba)
plt.plot(*roc_curve(y, proba)[:2], label='%s AUC=%.4f' % (name, auc))
plt.plot([0, 1], [0, 1], '--', color='black',)
plt.legend(fontsize='large')
plt.grid()
assert roc_auc_score(y_test, wv_model.predict_proba(X_test_wv)[:, 1]) > 0.92, "something's wrong with your features"
# -
# If everything went right, you've just managed to reduce misclassification rate by a factor of two.
# This trick is very useful when you're dealing with small datasets. However, if you have hundreds of thousands of samples, there's a whole different range of methods for that. We'll get there in the second part.
| week05_nlp/part1_common.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lauraAriasFdez/SentimentAnalysis/blob/main/project_4_gram.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8VhWejcf1xxQ"
# ### 1. Connect to Google Drive
#
# + colab={"base_uri": "https://localhost:8080/"} id="uaL7Wx-N1yI_" outputId="e75508aa-1465-449b-9199-a610551b3087"
# MAIN DIRECTORY STILL TO DO
from google.colab import drive
drive.mount('/content/gdrive')
# + id="d_i18n_q1z14"
data_file = "/content/gdrive/MyDrive/CSCI4511W/project/sentiments.csv"
# + id="Ii-eEvWz12x7"
import pandas as pd
import numpy as np
cols = ['sentiment','id','date','query_string','user','text']
sms_data = pd.read_csv(data_file, encoding='latin-1',header=None,names=cols)
# replace lables 0 = neg 1= pos
sms_data.sentiment = sms_data.sentiment.replace({0: 0, 4: 1})
labels = sms_data[sms_data.columns[0]]
# + [markdown] id="XxYGvMX116Sw"
# ### 2. Preprocess Data
# + id="n6e0vbFw18DY" colab={"base_uri": "https://localhost:8080/"} outputId="c7c0cda0-29c6-424f-ee9e-b55c71193e58"
# !pip install texthero
import texthero as hero
# + id="MlrCDSjP1-E9"
custom_cleaning = [
#Replace not assigned values with empty space
hero.preprocessing.fillna,
hero.preprocessing.lowercase,
hero.preprocessing.remove_digits,
hero.preprocessing.remove_punctuation,
hero.preprocessing.remove_diacritics,
hero.preprocessing.remove_stopwords,
hero.preprocessing.remove_whitespace,
hero.preprocessing.stem
]
content = hero.clean(sms_data['text'], pipeline = custom_cleaning)
# + [markdown] id="dDz5Pr_o2Ang"
# ### 4-NGRAM
#
# + id="yCBaYRvZ2CjY"
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
count_vect = CountVectorizer(ngram_range=(4,4))
n_gram_data = count_vect.fit_transform(content)
n_gram_x_train,n_gram_x_test,y_train,y_test = train_test_split(n_gram_data,labels,test_size = 0.3, stratify=labels,random_state=100)
# + [markdown] id="AvveD9852b5E"
# ### Naive Bayes
# + colab={"base_uri": "https://localhost:8080/"} id="cFO6t-G-2d6O" outputId="5173b8c6-da4d-4505-d165-419097754ce7"
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import f1_score
print("NAIVE BAYES + 3-GRAM______________________________________________________________")
clf_multinomialnb = MultinomialNB()
clf_multinomialnb.fit(n_gram_x_train,y_train)
y_pred = clf_multinomialnb.predict(n_gram_x_test)
print(classification_report(y_test,y_pred))
f1_score(y_test,y_pred)
# + [markdown] id="giWddnVq2nYq"
# ### Linear SVM
# + colab={"base_uri": "https://localhost:8080/"} id="HeMUxzo42mle" outputId="533a51cd-5dd4-4097-980e-309cd4f32e9f"
from sklearn.svm import LinearSVC
# SVM + TLF
print("LINEAR SVM + 3 GRAM______________________________________________________________")
linearsvc = LinearSVC()
linearsvc.fit(n_gram_x_train,y_train)
y_pred = linearsvc.predict(n_gram_x_test)
print(classification_report(y_test,y_pred))
f1_score(y_test,y_pred)
# + [markdown] id="frmpxAnU2uWj"
# ### Logistic Regression
# + id="zGhN3G502x8b" colab={"base_uri": "https://localhost:8080/"} outputId="f655c183-00b4-4387-f723-70b2e74c0a6e"
from sklearn.linear_model import LogisticRegression
print("LOGISTIC REGRESSION + 3 GRAMS______________________________________________________________")
logisticRegr = LogisticRegression()
logisticRegr.fit(n_gram_x_train,y_train)
y_pred = logisticRegr.predict(n_gram_x_test)
print(classification_report(y_test,y_pred))
f1_score(y_test,y_pred)
| project_4_gram.ipynb |
;; -*- coding: utf-8 -*-
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .clj
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Lein-Clojure
;; language: clojure
;; name: lein-clojure
;; ---
;; # Fine-tuning Sentence Pair Classification with BERT
;;
;; **This tutorial is based off of the Gluon NLP one here https://gluon-nlp.mxnet.io/examples/sentence_embedding/bert.html**
;;
;; Pre-trained language representations have been shown to improve many downstream NLP tasks such as question answering, and natural language inference. To apply pre-trained representations to these tasks, there are two strategies:
;;
;; feature-based approach, which uses the pre-trained representations as additional features to the downstream task.
;; fine-tuning based approach, which trains the downstream tasks by fine-tuning pre-trained parameters.
;; While feature-based approaches such as ELMo [3] (introduced in the previous tutorial) are effective in improving many downstream tasks, they require task-specific architectures. Devlin, Jacob, et al proposed BERT [1] (Bidirectional Encoder Representations from Transformers), which fine-tunes deep bidirectional representations on a wide range of tasks with minimal task-specific parameters, and obtained state- of-the-art results.
;;
;; In this tutorial, we will focus on fine-tuning with the pre-trained BERT model to classify semantically equivalent sentence pairs. Specifically, we will:
;;
;; load the state-of-the-art pre-trained BERT model and attach an additional layer for classification,
;; process and transform sentence pair data for the task at hand, and
;; fine-tune BERT model for sentence classification.
;;
;;
;; ## Preparation
;;
;; To run this tutorial locally, in the example directory:
;;
;; 1. Get the model and supporting data by running `get_bert_data.sh`.
;; 2. This Jupyter Notebook uses the lein-jupyter plugin to be able to execute Clojure code in project setting. The first time that you run it you will need to install the kernel with`lein jupyter install-kernel`. After that you can open the notebook in the project directory with `lein jupyter notebook`.
;; ## Load requirements
;;
;; We need to load up all the namespace requires
(ns bert.bert-sentence-classification
(:require [bert.util :as bert-util]
[clojure-csv.core :as csv]
[clojure.java.shell :refer [sh]]
[clojure.string :as string]
[org.apache.clojure-mxnet.callback :as callback]
[org.apache.clojure-mxnet.context :as context]
[org.apache.clojure-mxnet.dtype :as dtype]
[org.apache.clojure-mxnet.eval-metric :as eval-metric]
[org.apache.clojure-mxnet.io :as mx-io]
[org.apache.clojure-mxnet.layout :as layout]
[org.apache.clojure-mxnet.module :as m]
[org.apache.clojure-mxnet.ndarray :as ndarray]
[org.apache.clojure-mxnet.optimizer :as optimizer]
[org.apache.clojure-mxnet.symbol :as sym]))
;; # Use the Pre-trained BERT Model
;;
;; In this tutorial we will use the pre-trained BERT model that was exported from GluonNLP via the `scripts/bert/staticbert/static_export_base.py`. For convenience, the model has been downloaded for you by running the `get_bert_data.sh` file in the root directory of this example.
;; ## Get BERT
;;
;; Let’s first take a look at the BERT model architecture for sentence pair classification below:
;;
;; 
;;
;; where the model takes a pair of sequences and pools the representation of the first token in the sequence. Note that the original BERT model was trained for masked language model and next sentence prediction tasks, which includes layers for language model decoding and classification. These layers will not be used for fine-tuning sentence pair classification.
;;
;; Let's load the pre-trained BERT using the module API in MXNet.
;; +
(def model-path-prefix "data/static_bert_base_net")
;; the vocabulary used in the model
(def vocab (bert-util/get-vocab))
;; the input question
;; the maximum length of the sequence
(def seq-length 128)
(def bert-base (m/load-checkpoint {:prefix model-path-prefix :epoch 0}))
;; -
;; ## Model Definition for Sentence Pair Classification
;;
;; Now that we have loaded the BERT model, we only need to attach an additional layer for classification. We can do this by defining a fine tune model from the symbol of the base BERT model.
;; +
(defn fine-tune-model
"msymbol: the pretrained network symbol
num-classes: the number of classes for the fine-tune datasets
dropout: the dropout rate"
[msymbol {:keys [num-classes dropout]}]
(as-> msymbol data
(sym/dropout {:data data :p dropout})
(sym/fully-connected "fc-finetune" {:data data :num-hidden num-classes})
(sym/softmax-output "softmax" {:data data})))
(def model-sym (fine-tune-model (m/symbol bert-base) {:num-classes 2 :dropout 0.1}))
;; -
;; # Data Preprocessing for BERT
;;
;; ## Dataset
;;
;; For demonstration purpose, we use the dev set of the Microsoft Research Paraphrase Corpus dataset. The file is named ‘dev.tsv’ and was downloaded as part of the data script. Let’s take a look at the raw dataset.
(-> (sh "head" "-n" "5" "data/dev.tsv")
:out
println)
;; The file contains 5 columns, separated by tabs (i.e. ‘
;;
;; \t
;; ‘). The first line of the file explains each of these columns: 0. the label indicating whether the two sentences are semantically equivalent 1. the id of the first sentence in this sample 2. the id of the second sentence in this sample 3. the content of the first sentence 4. the content of the second sentence
;;
;; For our task, we are interested in the 0th, 3rd and 4th columns.
;; +
(def raw-file
(csv/parse-csv (string/replace (slurp "data/dev.tsv") "\"" "")
:delimiter \tab
:strict true))
(def data-train-raw (->> raw-file
(mapv #(vals (select-keys % [3 4 0])))
(rest) ; drop header
(into [])))
(def sample (first data-train-raw))
(println (nth sample 0)) ;;;sentence a
(println (nth sample 1)) ;; sentence b
(println (nth sample 2)) ;; 1 means equivalent, 0 means not equivalent
;; -
;; To use the pre-trained BERT model, we need to preprocess the data in the same way it was trained. The following figure shows the input representation in BERT:
;;
;; 
;;
;; We will do pre-processing on the inputs to get them in the right format and to perform the following transformations:
;; - tokenize the input sequences
;; - insert [CLS] at the beginning
;; - insert [SEP] between sentence one and sentence two, and at the end - generate segment ids to indicate whether a token belongs to the first sequence or the second sequence.
;; - generate valid length
;; +
(defn pre-processing
"Preprocesses the sentences in the format that BERT is expecting"
[ctx idx->token token->idx train-item]
(let [[sentence-a sentence-b label] train-item
;;; pre-processing tokenize sentence
token-1 (bert-util/tokenize (string/lower-case sentence-a))
token-2 (bert-util/tokenize (string/lower-case sentence-b))
valid-length (+ (count token-1) (count token-2))
;;; generate token types [0000...1111...0000]
qa-embedded (into (bert-util/pad [] 0 (count token-1))
(bert-util/pad [] 1 (count token-2)))
token-types (bert-util/pad qa-embedded 0 seq-length)
;;; make BERT pre-processing standard
token-2 (conj token-2 "[SEP]")
token-1 (into [] (concat ["[CLS]"] token-1 ["[SEP]"] token-2))
tokens (bert-util/pad token-1 "[PAD]" seq-length)
;;; pre-processing - token to index translation
indexes (bert-util/tokens->idxs token->idx tokens)]
{:input-batch [indexes
token-types
[valid-length]]
:label (if (= "0" label)
[0]
[1])
:tokens tokens
:train-item train-item}))
(def idx->token (:idx->token vocab))
(def token->idx (:token->idx vocab))
(def dev (context/default-context))
(def processed-datas (mapv #(pre-processing dev idx->token token->idx %) data-train-raw))
(def train-count (count processed-datas))
(println "Train Count is = " train-count)
(println "[PAD] token id = " (get token->idx "[PAD]"))
(println "[CLS] token id = " (get token->idx "[CLS]"))
(println "[SEP] token id = " (get token->idx "[SEP]"))
(println "token ids = \n"(-> (first processed-datas) :input-batch first))
(println "segment ids = \n"(-> (first processed-datas) :input-batch second))
(println "valid length = \n" (-> (first processed-datas) :input-batch last))
(println "label = \n" (-> (second processed-datas) :label))
;; -
;; Now that we have all the input-batches for each row, we are going to slice them up column-wise and create NDArray Iterators that we can use in training
;; +
(defn slice-inputs-data
"Each sentence pair had to be processed as a row. This breaks all
the rows up into a column for creating a NDArray"
[processed-datas n]
(->> processed-datas
(mapv #(nth (:input-batch %) n))
(flatten)
(into [])))
(def prepared-data {:data0s (slice-inputs-data processed-datas 0)
:data1s (slice-inputs-data processed-datas 1)
:data2s (slice-inputs-data processed-datas 2)
:labels (->> (mapv :label processed-datas)
(flatten)
(into []))
:train-num (count processed-datas)})
(def batch-size 32)
(def train-data
(let [{:keys [data0s data1s data2s labels train-num]} prepared-data
data-desc0 (mx-io/data-desc {:name "data0"
:shape [train-num seq-length]
:dtype dtype/FLOAT32
:layout layout/NT})
data-desc1 (mx-io/data-desc {:name "data1"
:shape [train-num seq-length]
:dtype dtype/FLOAT32
:layout layout/NT})
data-desc2 (mx-io/data-desc {:name "data2"
:shape [train-num]
:dtype dtype/FLOAT32
:layout layout/N})
label-desc (mx-io/data-desc {:name "softmax_label"
:shape [train-num]
:dtype dtype/FLOAT32
:layout layout/N})]
(mx-io/ndarray-iter {data-desc0 (ndarray/array data0s [train-num seq-length]
{:ctx dev})
data-desc1 (ndarray/array data1s [train-num seq-length]
{:ctx dev})
data-desc2 (ndarray/array data2s [train-num]
{:ctx dev})}
{:label {label-desc (ndarray/array labels [train-num]
{:ctx dev})}
:data-batch-size batch-size})))
train-data
;; -
;; # Fine-tune BERT Model
;;
;; Putting everything together, now we can fine-tune the model with a few epochs. For demonstration, we use a fixed learning rate and skip validation steps.
;; +
(def num-epoch 3)
(def fine-tune-model (m/module model-sym {:contexts [dev]
:data-names ["data0" "data1" "data2"]}))
(m/fit fine-tune-model {:train-data train-data :num-epoch num-epoch
:fit-params (m/fit-params {:allow-missing true
:arg-params (m/arg-params bert-base)
:aux-params (m/aux-params bert-base)
:optimizer (optimizer/adam {:learning-rate 5e-6 :episilon 1e-9})
:batch-end-callback (callback/speedometer batch-size 1)})})
| contrib/clojure-package/examples/bert/fine-tune-bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import sklearn as sk
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.preprocessing import StandardScaler
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
df_train.head()
df_train.describe()
df_train.info()
df_train.dtypes
df_train.columns
corr_NU_MATH = pd.DataFrame(df_train.corr()['NU_NOTA_MT'].sort_values(ascending=False))
corr_NU_MATH.head(20)
x = df_train[['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_REDACAO']]
y = df_train[['NU_NOTA_MT']]
x.isna().sum()/x.shape[0]
x.fillna(0, inplace=True, axis=1)
y.fillna(0, inplace=True)
x.isna().sum()
x.head()
# # Linear Regression
# +
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
linear_regression = LinearRegression()
linear_regression_train = linear_regression.fit(x_train, y_train)
# -
y_pred = linear_regression.predict(x_test)
r2_score(y_test, y_pred)
linear_mse = mean_squared_error(y_test, y_pred)
sns.distplot(y_pred, color='r', label='PREDICTED GRADES', hist=False)
sns.distplot(y_test, color='g', label='REAL GRADES', hist=False)
plt.show()
# # Ridge Regression
# +
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.fit_transform(x_test)
# +
ridge_regression = Ridge(alpha=1, random_state=42)
ridge_regression.fit(x_train_scaled, y_train)
# -
y_pred = ridge_regression.predict(x_test_scaled)
r2_score(y_test, y_pred)
ridge_mse = mean_squared_error(y_test, y_pred)
sns.distplot(y_pred, color='r', label='PREDICTED GRADES', hist=False)
sns.distplot(y_test, color='g', label='REAL GRADES', hist=False)
plt.show()
# # Lasso Regressor
# +
lasso_regression = Lasso(alpha=0.5)
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.fit_transform(x_test)
lasso_regression.fit(x_train_scaled, y_train)
# -
y_pred = lasso_regression.predict(x_test_scaled)
lasso_mse = mean_squared_error(y_test, y_pred)
r2_score(y_test, y_pred)
linear_mse, ridge_mse, lasso_mse
sns.distplot(y_pred, color='r', label='PREDICTED GRADES', hist=False)
sns.distplot(y_test, color='g', label='REAL GRADES', hist=False)
plt.show()
# # Test
x_answer = df_test[['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_REDACAO']]
x_answer.fillna(0, inplace=True)
y_answer = linear_regression.predict(x_answer)
y_answer[y_answer<0] = 0
df_test['NU_NOTA_MT'] = np.round(y_answer, 2)
df_test = df_test[['NU_INSCRICAO', 'NU_NOTA_MT']]
df_test
df_test.to_csv('answer.csv', index=False)
| Desafio-7/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import OrderedDict
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import scipy as sp
from theano import shared
# -
# %config InlineBackend.figure_format = 'retina'
az.style.use('arviz-darkgrid')
# #### Code 11.1
trolley_df = pd.read_csv('Data/Trolley.csv', sep=';')
trolley_df.head()
# #### Code 11.2
# +
ax = (trolley_df.response
.value_counts()
.sort_index()
.plot(kind='bar'))
ax.set_xlabel("response", fontsize=14);
ax.set_ylabel("Frequency", fontsize=14);
# -
# #### Code 11.3
# +
ax = (trolley_df.response
.value_counts()
.sort_index()
.cumsum()
.div(trolley_df.shape[0])
.plot(marker='o'))
ax.set_xlim(0.9, 7.1);
ax.set_xlabel("response", fontsize=14)
ax.set_ylabel("cumulative proportion", fontsize=14);
# -
# #### Code 11.4
resp_lco = (trolley_df.response
.value_counts()
.sort_index()
.cumsum()
.iloc[:-1]
.div(trolley_df.shape[0])
.apply(lambda p: np.log(p / (1. - p))))
# +
ax = resp_lco.plot(marker='o')
ax.set_xlim(0.9, 7);
ax.set_xlabel("response", fontsize=14)
ax.set_ylabel("log-cumulative-odds", fontsize=14);
# -
# #### Code 11.5
with pm.Model() as m11_1:
a = pm.Normal(
'a', 0., 10.,
transform=pm.distributions.transforms.ordered,
shape=6, testval=np.arange(6) - 2.5)
resp_obs = pm.OrderedLogistic(
'resp_obs', 0., a,
observed=trolley_df.response.values - 1
)
with m11_1:
map_11_1 = pm.find_MAP()
# #### Code 11.6
map_11_1['a']
daf
# #### Code 11.7
sp.special.expit(map_11_1['a'])
# #### Code 11.8
with m11_1:
trace_11_1 = pm.sample(1000, tune=1000)
az.summary(trace_11_1, var_names=['a'], credible_interval=.89, rount_to=2)
# #### Code 11.9
def ordered_logistic_proba(a):
pa = sp.special.expit(a)
p_cum = np.concatenate(([0.], pa, [1.]))
return p_cum[1:] - p_cum[:-1]
ordered_logistic_proba(trace_11_1['a'].mean(axis=0))
# #### Code 11.10
(ordered_logistic_proba(trace_11_1['a'].mean(axis=0)) \
* (1 + np.arange(7))).sum()
# #### Code 11.11
ordered_logistic_proba(trace_11_1['a'].mean(axis=0) - 0.5)
# #### Code 11.12
(ordered_logistic_proba(trace_11_1['a'].mean(axis=0) - 0.5) \
* (1 + np.arange(7))).sum()
# #### Code 11.13
# +
action = shared(trolley_df.action.values)
intention = shared(trolley_df.intention.values)
contact = shared(trolley_df.contact.values)
with pm.Model() as m11_2:
a = pm.Normal(
'a', 0., 10.,
transform=pm.distributions.transforms.ordered,
shape=6,
testval=trace_11_1['a'].mean(axis=0)
)
bA = pm.Normal('bA', 0., 10.)
bI = pm.Normal('bI', 0., 10.)
bC = pm.Normal('bC', 0., 10.)
phi = bA * action + bI * intention + bC * contact
resp_obs = pm.OrderedLogistic(
'resp_obs', phi, a,
observed=trolley_df.response.values - 1
)
# -
with m11_2:
map_11_2 = pm.find_MAP()
# #### Code 11.14
with pm.Model() as m11_3:
a = pm.Normal(
'a', 0., 10.,
transform=pm.distributions.transforms.ordered,
shape=6,
testval=trace_11_1['a'].mean(axis=0)
)
bA = pm.Normal('bA', 0., 10.)
bI = pm.Normal('bI', 0., 10.)
bC = pm.Normal('bC', 0., 10.)
bAI = pm.Normal('bAI', 0., 10.)
bCI = pm.Normal('bCI', 0., 10.)
phi = bA * action + bI * intention + bC * contact \
+ bAI * action * intention \
+ bCI * contact * intention
resp_obs = pm.OrderedLogistic(
'resp_obs', phi, a,
observed=trolley_df.response - 1
)
with m11_3:
map_11_3 = pm.find_MAP()
# #### Code 11.15
def get_coefs(map_est):
coefs = OrderedDict()
for i, ai in enumerate(map_est['a']):
coefs[f'a_{i}'] = ai
coefs['bA'] = map_est.get('bA', np.nan)
coefs['bI'] = map_est.get('bI', np.nan)
coefs['bC'] = map_est.get('bC', np.nan)
coefs['bAI'] = map_est.get('bAI', np.nan)
coefs['bCI'] = map_est.get('bCI', np.nan)
return coefs
(pd.DataFrame.from_dict(
OrderedDict([
('m11_1', get_coefs(map_11_1)),
('m11_2', get_coefs(map_11_2)),
('m11_3', get_coefs(map_11_3))
]))
.astype(np.float64)
.round(2))
# #### Code 11.16
with m11_2:
trace_11_2 = pm.sample(1000, tune=1000)
with m11_3:
trace_11_3 = pm.sample(1000, tune=1000)
# +
comp_df = pm.compare({m11_1:trace_11_1,
m11_2:trace_11_2,
m11_3:trace_11_3})
comp_df.loc[:,'model'] = pd.Series(['m11.1', 'm11.2', 'm11.3'])
comp_df = comp_df.set_index('model')
comp_df
# -
# #### Code 11.17-19
pp_df = pd.DataFrame(np.array([[0, 0, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 1],
[0, 1, 0],
[0, 1, 1]]),
columns=['action', 'contact', 'intention'])
pp_df
# +
action.set_value(pp_df.action.values)
contact.set_value(pp_df.contact.values)
intention.set_value(pp_df.intention.values)
with m11_3:
pp_trace_11_3 = pm.sample_ppc(trace_11_3, samples=1500)
# +
PP_COLS = [f'pp_{i}' for i, _ in enumerate(pp_trace_11_3['resp_obs'])]
pp_df = pd.concat((pp_df,
pd.DataFrame(pp_trace_11_3['resp_obs'].T, columns=PP_COLS)),
axis=1)
# -
pp_cum_df = (pd.melt(
pp_df,
id_vars=['action', 'contact', 'intention'],
value_vars=PP_COLS, value_name='resp'
)
.groupby(['action', 'contact', 'intention', 'resp'])
.size()
.div(1500)
.rename('proba')
.reset_index()
.pivot_table(
index=['action', 'contact', 'intention'],
values='proba',
columns='resp'
)
.cumsum(axis=1)
.iloc[:, :-1])
pp_cum_df
for (plot_action, plot_contact), plot_df in pp_cum_df.groupby(level=['action', 'contact']):
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot([0, 1], plot_df, c='C0');
ax.plot([0, 1], [0, 0], '--', c='C0');
ax.plot([0, 1], [1, 1], '--', c='C0');
ax.set_xlim(0, 1);
ax.set_xlabel("intention");
ax.set_ylim(-0.05, 1.05);
ax.set_ylabel("probability");
ax.set_title(
"action = {action}, contact = {contact}".format(
action=plot_action, contact=plot_contact
)
);
# #### Code 11.20
# +
# define parameters
PROB_DRINK = 0.2 # 20% of days
RATE_WORK = 1. # average 1 manuscript per day
# sample one year of production
N = 365
# -
drink = np.random.binomial(1, PROB_DRINK, size=N)
y = (1 - drink) * np.random.poisson(RATE_WORK, size=N)
# #### Code 11.21
drink_zeros = drink.sum()
work_zeros = (y == 0).sum() - drink_zeros
# +
bins = np.arange(y.max() + 1) - 0.5
plt.hist(y, bins=bins);
plt.bar(0., drink_zeros, width=1., bottom=work_zeros, color='C1', alpha=.5);
plt.xticks(bins + 0.5);
plt.xlabel("manuscripts completed");
plt.ylabel("Frequency");
# -
# #### Code 11.22
with pm.Model() as m11_4:
ap = pm.Normal('ap', 0., 1.)
p = pm.math.sigmoid(ap)
al = pm.Normal('al', 0., 10.)
lambda_ = pm.math.exp(al)
y_obs = pm.ZeroInflatedPoisson('y_obs', 1. - p, lambda_, observed=y)
with m11_4:
map_11_4 = pm.find_MAP()
map_11_4
# #### Code 11.23
sp.special.expit(map_11_4['ap']) # probability drink
np.exp(map_11_4['al']) # rate finish manuscripts, when not drinking
# #### Code 11.24
def dzip(x, p, lambda_, log=True):
like = p**(x == 0) + (1 - p) * sp.stats.poisson.pmf(x, lambda_)
return np.log(like) if log else like
# #### Code 11.25
PBAR = 0.5
THETA = 5.
a = PBAR * THETA
b = (1 - PBAR) * THETA
# +
p = np.linspace(0, 1, 100)
plt.plot(p, sp.stats.beta.pdf(p, a, b));
plt.xlim(0, 1);
plt.xlabel("probability");
plt.ylabel("Density");
# -
# #### Code 11.26
admit_df = pd.read_csv('Data/UCBadmit.csv', sep=';')
admit_df.head()
with pm.Model() as m11_5:
a = pm.Normal('a', 0., 2.)
pbar = pm.Deterministic('pbar', pm.math.sigmoid(a))
theta = pm.Exponential('theta', 1.)
admit_obs = pm.BetaBinomial(
'admit_obs',
pbar * theta, (1. - pbar) * theta,
admit_df.applications.values,
observed=admit_df.admit.values
)
with m11_5:
trace_11_5 = pm.sample(1000, tune=1000)
# #### Code 11.27
pm.summary(trace_11_5, alpha=.11).round(2)
# #### Code 11.28
np.percentile(trace_11_5['pbar'], [2.5, 50., 97.5])
# #### Code 11.29
# +
pbar_hat = trace_11_5['pbar'].mean()
theta_hat = trace_11_5['theta'].mean()
p_plot = np.linspace(0, 1, 100)
plt.plot(
p_plot,
sp.stats.beta.pdf(p_plot, pbar_hat * theta_hat, (1. - pbar_hat) * theta_hat)
);
plt.plot(
p_plot,
sp.stats.beta.pdf(
p_plot[:, np.newaxis],
trace_11_5['pbar'][:100] * trace_11_5['theta'][:100],
(1. - trace_11_5['pbar'][:100]) * trace_11_5['theta'][:100]
),
c='C0', alpha=0.1
);
plt.xlim(0., 1.);
plt.xlabel("probability admit");
plt.ylim(0., 3.);
plt.ylabel("Density");
# -
# #### Code 11.30
with m11_5:
pp_trace_11_5 = pm.sample_ppc(trace_11_5)
# +
x_case = np.arange(admit_df.shape[0])
plt.scatter(
x_case,
pp_trace_11_5['admit_obs'].mean(axis=0) \
/ admit_df.applications.values
);
plt.scatter(x_case, admit_df.admit / admit_df.applications);
high = np.percentile(pp_trace_11_5['admit_obs'], 95, axis=0) \
/ admit_df.applications.values
plt.scatter(x_case, high, marker='x', c='k');
low = np.percentile(pp_trace_11_5['admit_obs'], 5, axis=0) \
/ admit_df.applications.values
plt.scatter(x_case, low, marker='x', c='k');
# -
# #### Code 11.31
# +
mu = 3.
theta = 1.
x = np.linspace(0, 10, 100)
plt.plot(x, sp.stats.gamma.pdf(x, mu / theta, scale=theta));
# +
import platform
import sys
import IPython
import matplotlib
import scipy
print("This notebook was createad on a computer {} running {} and using:\nPython {}\nIPython {}\nPyMC3 {}\nNumPy {}\nPandas {}\nSciPy {}\nMatplotlib {}\n".format(platform.machine(), ' '.join(platform.linux_distribution()[:2]), sys.version[:5], IPython.__version__, pm.__version__, np.__version__, pd.__version__, scipy.__version__, matplotlib.__version__))
| Rethinking/Chp_11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Migration of old table structure to the new one
#
# + [markdown] deletable=true editable=true
# ### Table columns:
# + deletable=true editable=true
import requests
import numpy as np
from itertools import product
import pandas as pd
import json
# + deletable=true editable=true
payload = {'q': 'select * from water_risk_indicators limit 2'}
r = requests.get('https://wri-01.carto.com/api/v2/sql', params=payload)
tableStructure= pd.read_json(json.dumps(r.json()['rows']), orient='records')
tableStructure.head(0)
# + [markdown] deletable=true editable=true
# ### Template sql that we will use to generate the migration query
# + [markdown] deletable=true editable=true
# Sql example for projected data:
# ```sql
# SELECT basinid, 'water supply' as indicator, bt2024tl as label,'optimistic' as model, 'year' as period, null as period_value, 'absolute' as type, bt2024tl as value, 2020 as year FROM aqueduct_projections_20150309
# ```
#
# Sql template for projected data:
# ```sql
# SELECT basinid, {{indicator}} as indicator, {II}{YY}{SS}{R}l as label, {{scenario}} as model, {{period}} as period, {{period_value}} as period_value, {{data_type}} as type, {II}{YY}{SS}{R}r as value, {{year}} as year FROM aqueduct_projections_20150309
# ```
#
# Sql example for projected data:
# ```sql
# SELECT basinid, 'water supply' as indicator, bws_cat as label,'historic' as model, 'year' as period, null as period_value, 'absolute' as type, bws as value, 2014 as year FROM aqueduct_global_dl_20150409
# ```
# Sql template for current conditions data:
#
# ```sql
# SELECT basinid, {{indicator}} as indicator, {II}_cat as label,'historic' as model, 'year' as period, null as period_value, 'absolute' as type, {II} as value, 2014 as year FROM aqueduct_global_dl_20150409
# ```
# + [markdown] deletable=true editable=true
#
# {{indicator}}
# {{scenario}}
# {{period_type}}
# {{period_value}}
# {{data_type}}
# {{year}}
#
# For future projections:
#
# {II}{YY}{SS}{T}{X}
# {II} indicator code
# {YY} year code
# {SS} scenario code
# {T} data type code
# {X} suffix
#
# {{indicator}} / {II} Indicator codes
#
# water stress (ws)
# seasonal variability (sv)
# water demand (ut)
# water supply (bt)
#
# {{period}} will be one of this:
#
# month,
# year,
# quarter
#
# {{period_value}} will depend on period_type value:
#
# month: 1:12
# year: null
# quarter: 1:4
#
# {{scenario}} / {SS} Scenario codes:
#
# historic ( bs )
# pessimistic ( 38 ) ssp3 rcp85
# optimistic ( 24 ) ssp2 rcp45
# bau ( 28 ) ssp2 rcp85
#
# {{data_type}} / {T} Data types:
#
# absolute ( t )
# change from baseline ( c )
# uncertainity ( u )
#
# {{year}} / {YY} Year codes:
#
# 2014 ( 00 )
# 2020 ( 20 )
# 2030 ( 30 )
# 2040 ( 40 )
#
#
# {X} Suffixes
#
# l label string
# r raw value
# + deletable=true editable=true
## 'SELECT basinid, {{indicator}} as indicator, {II}{YY}{SS}{R}l as label, {{scenario}} as model, {{period_type}} as period, {{period_value}} as period_value, {{data_type}} as type, {II}{YY}{SS}{R}r as value, {{year}} as year FROM aqueduct_projections_20150309'
sqlTemplate =["SELECT basinid, ",' as indicator, ', 'l::text as label, ', ' as model, ', ' as period, ', ' as period_value, ',' as type, ','r as value, ', ' as year FROM aqueduct_projections_20150309']
freplacedict={
'indicator':{
'{{indicator}}': ['\'water_stress\'', '\'seasonal_variability\'', '\'water_demand\'', '\'water_supply\''],
'{II}': ['ws', 'sv', 'ut', 'bt']
},
'scenario':{
'{{scenario}}': ['\'historic\'','\'pessimistic\'','\'optimistic\'','\'bau\''],
'{SS}': ['00','38','24','28']
},
'period_type':{
'{{period_type}}': [ '\'year\'','\'month\'', '\'quarter\'']
},
'period_value':{
'{{period_value}}': {
'month':{
'min': 1,
'max': 12
},
'year': 'NULL',
'quarter':{
'min': 1,
'max': 4
}
}
},
'data_type':{
'{{data_type}}': ['\'absolute\'','\'change_from_baseline\'','\'uncertainity\''],
'{T}': ['t','c','u']
},
'year':{
'{{year}}': [2014, 2020, 2030, 2040],
'{YY}': ['bs','20','30','40']
},
};
# + deletable=true editable=true
d = ['\'Baseline Water Stress\'',
'\'Interannual Variability\'',
'\'Seasonal Variability\'',
'\'Flood Occurrence\'',
'\'Drought Severity\'',
'\'Upstream Storage\'',
'\'Groundwater Stress\'',
'\'Return Flow Ratio\'',
'\'Upstream Protected Land\'',
'\'Media Coverage\'',
'\'Access to Water\'',
'\'Threatened Amphibians\'']
r=[]
for l in d:
r.append(l.lower().strip(' \t\n\r').replace(' ', '_'))
print(r)
# + deletable=true editable=true
#SELECT basinid, {{indicator}} as indicator, {II}_cat as label,'historic' as model, 'year' as period, null as period_value, 'absolute' as type, {II} as value, 2014 as year FROM aqueduct_global_dl_20150409
current_sqlTemplate=['SELECT basinid, ',' as indicator, ', '_cat as label,\'historic\' as model, \'year\' as period, null as period_value, \'absolute\' as type, ', ' as value, 2014 as year FROM aqueduct_global_dl_20150409', '']
currentplacedict={
'indicator':{
'{{indicator}}': ["'baseline_water_stress'", "'interannual_variability'", "'seasonal_variability'", "'flood_occurrence'", "'drought_severity'", "'upstream_storage'", "'groundwater_stress'", "'return_flow_ratio'", "'upstream_protected_land'", "'media_coverage'", "'access_to_water'", "'threatened_amphibians'"],
'{II}': ['bws','wsv','sv','hfo','dro','stor','gw','wri','eco_s','mc','wcg','eco_v']
},
'scenario':{
'{{scenario}}': ['\'historic\''],
},
'period_type':{
'{{period_type}}': [ '\'year\'','\'month\'', '\'quarter\'']
},
'period_value':{
'{{period_value}}': {
'month':{
'min': 1,
'max': 12
},
'year': 'NULL',
'quarter':{
'min': 1,
'max': 4
}
}
},
'data_type':{
'{{data_type}}': ['\'absolute\'','\'scores\''],
'{T}': ['','s']
},
'year':{
'{{year}}': [2014]
},
};
# + deletable=true editable=true
#{{indicator}},{II},{II}
def iter_items_c(dictionary):
replaceList=[]
for x in range(len(dictionary['indicator']['{II}'])):
if (dictionary['indicator']['{II}'][x]!='bws'):
replaceList.append([dictionary['indicator']['{{indicator}}'][x],
dictionary['indicator']['{II}'][x],
dictionary['indicator']['{II}'][x],
''
])
return replaceList
# + deletable=true editable=true
## 'SELECT basinid, {{indicator}} as indicator, {II}{YY}{SS}{R}l as label, {{scenario}} as model, {{period_type}} as period, {{period_value}} as period_value, {{data_type}} as type, {II}{YY}{SS}{R}r as value, {{year}} as year FROM aqueduct_projections_20150309'
def iter_items(dictionary):
replaceList=[]
for t in range(3):
for x, y, s in product(range(4), repeat=3):
if ( (dictionary['year']['{YY}'][y]!='bs' and dictionary['scenario']['{SS}'][s]!='00') or (dictionary['year']['{YY}'][y] =='bs' and dictionary['scenario']['{SS}'][s] =='00' and (dictionary['indicator']['{II}'][x]!='sv' and dictionary['indicator']['{II}'][x]!='bt' and dictionary['indicator']['{II}'][x]!='ut' and dictionary['data_type']['{T}'][t]!='c' and dictionary['data_type']['{T}'][t]!='u'))):
column = dictionary['indicator']['{II}'][x] + dictionary['year']['{YY}'][y] + dictionary['scenario']['{SS}'][s] + dictionary['data_type']['{T}'][t]
replaceList.append([dictionary['indicator']['{{indicator}}'][x],
column,
dictionary['scenario']['{{scenario}}'][s],
dictionary['period_type']['{{period_type}}'][0],
dictionary['period_value']['{{period_value}}']['year'],
dictionary['data_type']['{{data_type}}'][t],
column,
dictionary['year']['{{year}}'][y],
''
])
return replaceList
# + deletable=true editable=true
def sql_generator(sql_sentence, replacement):
return ''.join([str(a) + str(b) for a,b in zip(sql_sentence,replacement)])
# + deletable=true editable=true
import sys
sys.modules['itertools']
# + deletable=true editable=true
items = iter_items(freplacedict)
sql_sentences = []
for item in items:
sql_sentences.append(sql_generator(sqlTemplate, item))
# + deletable=true editable=true
citems = iter_items_c(currentplacedict)
sql_sentences = []
for item in citems:
sql_sentences.append(sql_generator(current_sqlTemplate, item))
' UNION ALL '.join(sql_sentences)
# + [markdown] deletable=true editable=true
# gu
# shape_leng
# shape_area
# basinid
# country
# basin_name
# withdrawal
# consumptio
# ba
# bws
# bws_s
# bws_cat
# wsv
# wsv_s
# wsv_cat
# sv
# sv_s
# sv_cat
# hfo
# hfo_s
# hfo_cat
# dro
# dro_s
# dro_cat
# bt
# stor
# stor_s
# stor_cat
# gw
# gw_s
# gw_cat
# wri
# wri_s
# wri_cat
# eco_s
# eco_s_s
# eco_s_cat
# mc
# mc_s
# mc_cat
# eco_v
# eco_v_s
# eco_v_cat
# wcg
# wcg_s
# wcg_cat
# def_pqual
# def_regrep
# w_semico
# _default
# w_constr
# w_chem
# w_power
# w_mine
# w_oilgas
# def_pquant
# w_agr
# w_foodbv
# w_tex
# owr_cat
# + [markdown] deletable=true editable=true
# | to mantain | to Change |
# |---|---|
# |5. Extremely high (>1.0) | Extremely high (>80%) |
# |4. High (0.75-1.0) | High (40-80%) |
# |3. Medium to high (0.5-0.75)| Medium-high (20-40%) |
# |2. Low to medium (0.25-0.5) | Low-medium (10-20%) |
# |1. Low (<0.25) | Low (<10%) |
# |Arid and low water use | |
# |No data | |
# + deletable=true editable=true
| Aqueduct/lab/insert_query.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Connector for YouTube
#
# In this example, we will be going over how to use Connector with youTube.
# ## Prerequisites
#
# connector is a component in the dataprep library that aims to simplify the data access by providing a standard API set. The goal is to help the users skip the complex API configuration. In this tutorial, we demonstrate how to use connector library with YouTube.
#
# If you haven't installed dataprep, run command `pip install dataprep` or execute the following cell.
# +
# Run me if you'd like to install
# #!pip install dataprep
# -
# In order for you to get the YouTube API working, you need to first have a [Google Account](https://accounts.google.com/signup/v2/webcreateaccount?continue=https%3A%2F%2Faccounts.google.com%2FManageAccount%3Fnc%3D1&flowName=GlifWebSignIn&flowEntry=SignUp).
#
# # Obtaining access token from youTube
#
# Assuming you have a google account, you can then fetch an API token following these three simple steps:
#
# 1. Login to [Google Cloud Platform](https://console.developers.google.com/) using your google account. On your dashboard, click on **Select a Project** and choose an existing project if you have one, or click on **New Project** - provide a project name and organization as required.
#
# 2. Next, click on **Enable APIs and Services** button on the top left corner of the window. Scroll down to find **YouTube Data API v3** and **Enable** the API service. Ensure that the API service is enabled as shown in the image below.
#
# 
#
# 3. Under **APIs & Services** on your project Navigation Menu, navigate to the **credentials** section. Click on **Create Credentials** on top of the window and select **API Key**. This will generate an API key that can be used to search data from YouTube using Connector!
#
# 
# # Initialize connector
#
# To initialize run the following code. Copy and paste the **YouTube Access Token Secret** into the **access_token** variable and ensure the connector path is correct. This returns an object establishing a connection with YouTube. Once you have that running you can use the built in functions available in connector.
# + tags=[]
from dataprep.connector import Connector
auth_token = "<your_access_token>"
dc = Connector('youtube', _auth={"access_token":auth_token})
dc
# -
# # Functionalities
#
# Connector has several functions you can perform to gain insight on the data downloaded from YouTube.
# ### Connector.info
# The info method gives information and guidelines of using the connector. There are 3 sections in the response and they are table, parameters and examples.
# >1. Table - The table(s) being accessed.
# >2. Parameters - Identifies which parameters can be used to call the method. For YouTube,
# * **q** is a required parameter that acts as a filter to fetch relevant video content.
# * **part** parameter is mandatory to retrieve any resource from YouTube. This parameter allows you to fetch partial resource components that your application actually uses. (Ex: snippet, contentDetails, player, statistics, etc). To know more about the part parameter, please visit [YouTube Developer Documentation](https://developers.google.com/youtube/v3/getting-started#part).
# * **type** is an optional parameter that allows you to specify the type of data (Ex: videos, channels, or playlists). Not specifying the type fetches all types of content related to your search query.
# * **maxResults** is an optional parameter used to specify the number of items to fetch per request.
# >3. Examples - Shows how you can call the methods in the Connector class.
# + tags=[]
dc.info()
# -
# ### Connector.show_schema
# The show_schema method returns the schema of the website data to be returned in a Dataframe. There are two columns in the response. The first column is the column name and the second is the datatype.
#
# As an example, lets see what is in the tweets table.
# + tags=[]
dc.show_schema("videos")
# -
# ### Connector.query
# The query method downloads the website data and displays it in a Dataframe. The parameters must meet the requirements as indicated in connector.info for the operation to run. You can use the **maxResults** parameter to specify the number of vidoes/channels/playlists to be fetched. Each request can currently fetch a maximum of 50 items.
#
# When the data is received from the server, it will either be in a JSON or XML format. The connector reformats the data in pandas Dataframe for the convenience of downstream operations.
#
# As an example, let's try to fetch **40 videos** related to **Data Science** from YouTube.
# #### Searching for Videos related to Data Science
df = dc.query("videos", q="Data Science", part="snippet", type='videos', maxResults=40)
df
# # That's all for now.
# If you are interested in writing your own configuration file or modify an existing one, refer to the [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs>).
| examples/DataConnector_Youtube.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Dib2001/Face-Mask-Detection/blob/master/Ai.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TUArtJnEsNzT"
import cv2
import numpy as np
import HandTrackingModule as htm
import time
import autopy
##########################
wCam, hCam = 640, 480
frameR = 100 # Frame Reduction
smoothening = 7
#########################
pTime = 0
plocX, plocY = 0, 0
clocX, clocY = 0, 0
cap = cv2.VideoCapture(1)
cap.set(3, wCam)
cap.set(4, hCam)
detector = htm.handDetector(maxHands=1)
wScr, hScr = autopy.screen.size()
# print(wScr, hScr)
while True:
# 1. Find hand Landmarks
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
# 2. Get the tip of the index and middle fingers
if len(lmList) != 0:
x1, y1 = lmList[8][1:]
x2, y2 = lmList[12][1:]
# print(x1, y1, x2, y2)
# 3. Check which fingers are up
fingers = detector.fingersUp()
# print(fingers)
cv2.rectangle(img, (frameR, frameR), (wCam - frameR, hCam - frameR),
(255, 0, 255), 2)
# 4. Only Index Finger : Moving Mode
if fingers[1] == 1 and fingers[2] == 0:
# 5. Convert Coordinates
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
# 6. Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening
# 7. Move Mouse
autopy.mouse.move(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
# 8. Both Index and middle fingers are up : Clicking Mode
if fingers[1] == 1 and fingers[2] == 1:
# 9. Find distance between fingers
length, img, lineInfo = detector.findDistance(8, 12, img)
print(length)
# 10. Click mouse if distance short
if length < 40:
cv2.circle(img, (lineInfo[4], lineInfo[5]),
15, (0, 255, 0), cv2.FILLED)
autopy.mouse.click()
# 11. Frame Rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
# 12. Display
cv2.imshow("Image", img)
cv2.waitKey(1)
# + id="XydzWbFiux-o"
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
# totalFingers = fingers.count(1)
return fingers
def findDistance(self, p1, p2, img, draw=True,r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
| Ai.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Зависимости
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.neural_network import MLPRegressor, MLPClassifier
from sklearn.metrics import mean_squared_error, f1_score
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
# +
# Инициализируем все известные генераторы случаынйх чисел / Setting all known random seeds
my_code = "Suminov"
seed_limit = 2 ** 32
my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit
os.environ['PYTHONHASHSEED']=str(my_seed)
random.seed(my_seed)
np.random.seed(my_seed)
tf.compat.v1.set_random_seed(my_seed)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# -
# Читаем данные из файла
example_data = pd.read_csv("datasets/Fish.csv")
example_data.head()
# Определим размер валидационной и тестовой выборок
val_test_size = round(0.2*len(example_data))
print(val_test_size)
# Создадим обучающую, валидационную и тестовую выборки
random_state = my_seed
train_val, test = train_test_split(example_data, test_size=val_test_size, random_state=random_state)
train, val = train_test_split(train_val, test_size=val_test_size, random_state=random_state)
print(len(train), len(val), len(test))
# +
# Значения в числовых столбцах преобразуем к отрезку [0,1].
# Для настройки скалировщика используем только обучающую выборку.
num_columns = ['Weight', 'Length1', 'Length2', 'Length3', 'Height', 'Width']
ord_columns = ['Species']
ct = ColumnTransformer(transformers=[
('numerical', MinMaxScaler(), num_columns),
('ordinal', OneHotEncoder(), ord_columns)])
ct.fit(train)
# -
# Преобразуем значения, тип данных приводим к DataFrame
sc_train = pd.DataFrame(ct.transform(train))
sc_test = pd.DataFrame(ct.transform(test))
sc_val = pd.DataFrame(ct.transform(val))
sc_train
# Устанавливаем названия столбцов
column_names = num_columns + list(range(7))
sc_train.columns = column_names
sc_test.columns = column_names
sc_val.columns = column_names
# В качестве входных параметров используем первые 5 числовых параметров,
# в качестве выходного - шестой числовой параметр.
x_labels = num_columns[:-1]
y_labels = num_columns[-1]
print(x_labels)
print(y_labels)
# +
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_labels]
y_test = sc_test[y_labels]
y_val = sc_val[y_labels]
# -
# Создадим нейроннную сеть для решения задачи регрессии на базе библиотеки sklearn
reg = MLPRegressor(alpha=0.0, batch_size=16, epsilon=1e-07, max_iter=50)
reg.get_params()
# Обучим нейронную сеть
reg.fit(x_train, y_train)
# Проверим работу обученной нейронной сети на валидационной выборке
pred_val = reg.predict(x_val)
mse1 = mean_squared_error(y_val, pred_val)
print(mse1)
# +
# Создадим нейронную сеть на базе библиотеки keras
model = Sequential()
model.add(Dense(100, input_dim=5, activation='relu', use_bias=False))
model.add(Dense(1, activation='linear', use_bias=False))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
model.summary()
# -
# Обучим нейронную сеть
history = model.fit(x_train, y_train, validation_data = (x_val, y_val), epochs=50, batch_size=16, verbose=0)
# Проверим работу обученной нейронной сети на валидационной выборке
pred_val2 = model.predict(x_val)
mse2 = mean_squared_error(y_val, pred_val2)
print(mse2)
# +
# Создаем нейронную сеть со слоем дропаута
drop = Sequential()
drop.add(Dense(100, input_dim=5, activation='relu', use_bias=False))
drop.add(Dropout(rate=0.5))
drop.add(Dense(1, activation='linear', use_bias=False))
drop.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
drop.summary()
# -
# Обучим нейронную сеть
history = drop.fit(x_train, y_train, validation_data = (x_val, y_val), epochs=50, batch_size=16, verbose=0)
# Проверим работу обученной нейронной сети на валидационной выборке
pred_val3 = drop.predict(x_val)
mse3 = mean_squared_error(y_val, pred_val3)
print(mse3)
print(mse1, mse2, mse3)
# Проверяем на тестовой выборке
pred_test = model.predict(x_test)
mse = mean_squared_error(y_test, pred_test)
print(mse)
# +
# Задание №1 - решение задачи классификации.
# В качестве входных параметров используем все числовые параметры,
# в качестве выходного - единственный категориальный параметр.
# -
x_labels = num_columns
y_labels = list(range(7))
print(x_labels)
print(y_labels)
# +
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_labels]
y_test = sc_test[y_labels]
y_val = sc_val[y_labels]
# +
# Создайте нейронную сеть для решения задачи классификации двумя способами: с помощью sklearn и keras.
# Сравните их эффективность.
# Для keras используйте loss и metrics = 'categorical_crossentropy'.
# -
# sklearn
clas = MLPClassifier(alpha=0.0, batch_size=16, epsilon=1e-07, max_iter=50)
clas.get_params()
clas.fit(x_train, y_train)
pred_val = clas.predict(x_val)
mse1 = mean_squared_error(y_val, pred_val)
print(mse1)
# +
# keras
model = Sequential()
model.add(Dense(100, input_dim=6, activation='relu', use_bias=False))
model.add(Dense(7, activation='softmax', use_bias=False))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_crossentropy'])
model.summary()
# -
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=50, batch_size=16, verbose=0)
pred_val2 = model.predict(x_val)
mse2 = mean_squared_error(y_val, pred_val2)
print(mse2)
# +
# Задание №2 - использование dropout-слоя.
# +
drop = Sequential()
drop.add(Dense(50, input_dim=6, activation='relu', use_bias=False))
drop.add(Dropout(rate=0.2))
drop.add(Dense(18, input_dim=6, activation='relu', use_bias=False))
drop.add(Dropout(rate=0.2))
drop.add(Dense(7, activation='softmax', use_bias=False))
drop.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_crossentropy'])
drop.summary()
# -
history = drop.fit(x_train, y_train, validation_data = (x_val, y_val), epochs=50, batch_size=16, verbose=0)
pred_val3 = drop.predict(x_val)
mse3 = mean_squared_error(y_val, pred_val3)
print(mse3)
print(mse1, mse2, mse3)
# +
# Создайте нейронную сеть для решения задачи классификации с помощью keras. Используйте dropout-слой.
# Сравните эффективность нейронных сетей с dropout-слоем и без него.
# Попробуйте найти такие параметры dropout-слоя, чтобы сеть с ним работала лучше, чем без него.
# -
# Проверяем на тестовой выборке
pred_test = model.predict(x_test)
mse = mean_squared_error(y_test, pred_test)
print(mse)
| 2021 Весенний семестр/Практическое задание 5/Суминов - ПР5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Resolução de problemas computacionais com Grafos
# Vetices são os pontos, arestas são as linhas
# $$ F + V = A + C$$
# ## Matriz de adjacencia
matriz = [[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0]]
def calc_vertice(matriz):
return len(matriz[0])
def calc_aresta(matriz):
if matriz == []:
return 0
return sum(matriz[0]) + calc_aresta(matriz[1:])
def vertices_arestas(matriz):
return (calc_vertice(matriz), calc_vertice(matriz))
vertices_arestas(matriz)
| python/Grafos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="JfOIB1KdkbYW"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="Ojb0aXCmBgo7"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="M9Y4JZ0ZGoE4"
# # Super resolution with TensorFlow Lite
# + [markdown] id="-uF3N4BbaMvA"
# ## Overview
# + [markdown] id="isbXET4vVHfu"
# The task of recovering a high resolution (HR) image from its low resolution counterpart is commonly referred to as Single Image Super Resolution (SISR).
#
# The model used here is ESRGAN
# ([ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks](https://arxiv.org/abs/1809.00219)). And we are going to use TensorFlow Lite to run inference on the pretrained model.
#
# The TFLite model is converted from this
# [implementation](https://tfhub.dev/captain-pool/esrgan-tf2/1) hosted on TF Hub. Note that the model we converted upsamples a 50x50 low resolution image to a 200x200 high resolution image (scale factor=4). If you want a different input size or scale factor, you need to re-convert or re-train the original model.
# + [markdown] id="2dQlTqiffuoU"
# ## Setup
# + [markdown] id="qKyMtsGqu3zH"
# Since we are going to need to do some color space transformation, let's install OpenCV first.
# + id="7YTT1Rxsw3A9"
# !pip install opencv-python matplotlib tensorflow
# + [markdown] id="Clz5Kl97FswD"
# Import dependencies.
# + id="2xh1kvGEBjuP"
import tensorflow as tf
import tensorflow_hub as hub
import cv2
import matplotlib.pyplot as plt
print(tf.__version__)
# + [markdown] id="i5miVfL4kxTA"
# Download and convert the ESRGAN model
# + id="X5PvXIXRwvHj"
model = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
concrete_func = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
concrete_func.inputs[0].set_shape([1, 50, 50, 3])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func], model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# Save the TF Lite model.
with tf.io.gfile.GFile('ESRGAN.tflite', 'wb') as f:
f.write(tflite_model)
esrgan_model_path = './ESRGAN.tflite'
# + [markdown] id="jH5-xPkyUEqt"
# Download a test image (insect head).
# + id="suWiStTWgK6e"
test_img_path = tf.keras.utils.get_file('lr.jpg', 'https://raw.githubusercontent.com/tensorflow/examples/master/lite/examples/super_resolution/android/app/src/main/assets/lr-1.jpg')
# + [markdown] id="rgQ4qRuFNpyW"
# ## Generate a super resolution image using TensorFlow Lite
# + id="J9FV4btf02-2"
lr = cv2.imread(test_img_path)
lr = cv2.cvtColor(lr, cv2.COLOR_BGR2RGB)
lr = tf.expand_dims(lr, axis=0)
lr = tf.cast(lr, tf.float32)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=esrgan_model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Run the model
interpreter.set_tensor(input_details[0]['index'], lr)
interpreter.invoke()
# Extract the output and postprocess it
output_data = interpreter.get_tensor(output_details[0]['index'])
sr = tf.squeeze(output_data, axis=0)
sr = tf.clip_by_value(sr, 0, 255)
sr = tf.round(sr)
sr = tf.cast(sr, tf.uint8)
# + [markdown] id="EwddQrDUNQGO"
# ## Visualize the result
# + id="aasKuozt1gNd"
lr = tf.cast(tf.squeeze(lr, axis=0), tf.uint8)
plt.figure(figsize = (1, 1))
plt.title('LR')
plt.imshow(lr.numpy());
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.title(f'ESRGAN (x4)')
plt.imshow(sr.numpy());
bicubic = tf.image.resize(lr, [200, 200], tf.image.ResizeMethod.BICUBIC)
bicubic = tf.cast(bicubic, tf.uint8)
plt.subplot(1, 2, 2)
plt.title('Bicubic')
plt.imshow(bicubic.numpy());
# + [markdown] id="0kb-fkogObjq"
# ## Performance Benchmarks
# + [markdown] id="tNzdgpqTy5P3"
# Performance benchmark numbers are generated with the tool
# [described here](https://www.tensorflow.org/lite/performance/benchmarks).
#
# <table>
# <thead>
# <tr>
# <th>Model Name</th>
# <th>Model Size </th>
# <th>Device </th>
# <th>CPU</th>
# <th>GPU</th>
# </tr>
# </thead>
# <tr>
# <td rowspan = 3>
# super resolution (ESRGAN)
# </td>
# <td rowspan = 3>
# 4.8 Mb
# </td>
# <td>Pixel 3</td>
# <td>586.8ms*</td>
# <td>128.6ms</td>
# </tr>
# <tr>
# <td>Pixel 4</td>
# <td>385.1ms*</td>
# <td>130.3ms</td>
# </tr>
#
# </table>
#
# **4 threads used*
| lite/examples/super_resolution/ml/super_resolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Noteworthy stuff
#
# some [source diving](https://nethackwiki.com/wiki/Source_diving)
# * `nle/include/hack.h:349` -- stdin user confirmation
# * auto-confirmation? requires game logic, level-design, and interaction analysis
# * somehow intercept the stdin stream
# * `nle/src/allmain.c:53-65` -- real world effects seeping into the game
# * `nle/src/rnd.c:64` -- biased uniform ${1..N}$ random nubmer generator
# * $\mathrm{uin64} \mod N$ is biased, should use rejection sampling over the next power-of-two
#
# * `nle/src/drawing.c:142` the topology, objects and other glyphs
import numpy
import torch
import gym
# * autopickup `@` -- toggle
# * safe movement `m` -- key comb
#
# +
import nle
from collections import namedtuple
# Bottom Line statistics namedtuple
BLStats = namedtuple('BLStats', [
'x',
'y',
'strength_percentage',
'str', # 'strength',
'dex', # 'dexterity',
'con', # 'constitution',
'int', # 'intelligence',
'wis', # 'wisdom',
'cha', # 'charisma',
'score',
'hitpoints',
'max_hitpoints',
'depth',
'gold',
'energy',
'max_energy',
'armor_class',
'monster_level',
'experience_level',
'experience_points',
'time',
'hunger_state',
'carrying_capacity',
'dungeon_number',
'level_number',
])
# -
class NLEWrapper(gym.ObservationWrapper):
def observation(self, observation):
observation['blstats'] = BLStats._make(observation['blstats'])
return observation
@property
def last_observation(self):
return self.observation(self.env._get_observation(self.env.last_observation))
def __getattr__(self, name):
# delegate the missing instance and calss attr's to the underlying env
return getattr(self.env, name)
# Let's create an aliased `npy-pyt` context to play around with
# +
from rlplay.engine.core import context
env = NLEWrapper(gym.make("NetHackScore-v0"))
ctx = context(env)
obs = env.last_observation
# +
# XXX we should prolly filter contorl ascii if act >= 0x20 else act
ctoa = {chr(act): act for act in env.env._actions}
aton = {act: j for j, act in enumerate(env.env._actions)}
# +
env.render()
obs, rew, done, info = env.step(aton[ctoa[input('>>> ')]])
env.render()
# 333 is the `@` glyph
bls = obs['blstats']
obs['glyphs'][
bls.y - 5:bls.y + 5,
bls.x - 5:bls.x + 5,
]
# -
# ```pre
# array([[ , , , , , , , , , ],
# [ , , , , , , , , , ],
# [ , , , , , , , , , ],
# [ , , , , , , , , , ],
# [ , , , , 2380, 2362, 2371, 2361, 2361, 2361],
# [ , , , , 2380, '@', 397, 2378, 2378, 2378],
# [ , , , , , 2360, 2378, 2378, 2378, 1215],
# [ , , , , , 2360, 2378, 2378, 2378, 2378],
# [ , , , , , 2364, 2361, 2361, 2361, 2361],
# [ , , , , , , , , , ]],
# dtype=int16)
# ```
# +
# 2359 -- UNK
# 2380 -- `#` -- walkable
# 397 -- door
# 2378 -- floor
# +
bls = obs['blstats']
obs['glyphs'][bls.y-5:bls.y+5,
bls.x-5:bls.x+5,]
# +
from plyr import suply
# suply(numpy.ndarray.shape.__get__, obs)
suply(lambda x: x.shape + (x.dtype,), obs)
# +
from nle import nethack
glyph = torch.nn.Embedding(nethack.MAX_GLYPH, embedding_dim=32, )
# -
x = glyph(ctx.pyt.obs['glyphs'].long())
ctx.npy.obs['glyphs']
env.render()
x
BLStats(*obs['blstats'].tolist())
obs['message'].view('S256')
obs['inv_glyphs'] # 55 -- items' look (viz.) ID
# obs['inv']
obs['inv_strs'].view('S80')
env.print_action_meanings()
obs, rew, done, info = env.step(0
)
env.render()
obs['glyphs']
# +
# obs['blstats']
# -
obs['glyphs'].shape
| stage/goal-setter-NLE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 8.4 文献引用格式
#
# Bibtex的最大特点是采用了标准化的数据库,对于论文、著作以及其他类型的文献,我们可以自定义文献的引用格式。Bibtex的样式会改变所引用文献的引用格式。
#
# ### 8.4.1 几种标准样式
#
# 一般而言,LaTeX中有一系列标准样式 (standard styles) 可供选择和使用。具体而言,这些标准样式对应的文件包括:
#
# - `plain.bst`
# - `acm.bst`:对应于Association for Computing Machinery期刊。
# - `ieeetr.bst`:对应于IEEE Transactions期刊。
# - `alpha.bst`
# - `abbrv.bst`
# - `siam.bst`:对应于SIAM。
#
# 当然,实际上还有很多`.bst`文件,这里给出的几个文件只是最为常用的。不得不提的是`natbib`工具包,这一工具包对一系列引用命令进行了标准化,而这种标准化不受不同文献样式的影响。
#
# > 参考[Choosing a BibTeX Style](https://www.reed.edu/cis/help/LaTeX/bibtexstyles.html)
# 【回放】[**8.3 Bibtex用法**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-8/section3.ipynb)
#
# 【继续】[**8.5 xxxx**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-8/section5.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-8/section4.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# # Monitoria: Cálculo Numérico (EAMB-018A)
# Data: 28/04/2021
#
#
# - **Monitores**:
# - <NAME>
# - <NAME>
# - <NAME>
#
#
# - **Assuntos abordados**:
# - Dúvidas sobre o trabalho da AB1 (SEL)
#
#
# - **Links úteis**:
# - [Operações com matrizes em Julia](https://tutorescn.github.io/)
# - [MATLAB-Python-Julia cheatsheet](https://cheatsheets.quantecon.org/)
# ## Como utilizar os valores tabelados?
# 
# 
# 
# ### Por exemplo, para o sistema com 5 elementos (molas)
n = 5
s, f = 1.0e+7, 2.0e+5 # Valores lidos da tabela
k = s * n # Valor da rigidez de cada mola
# Montando o vetor b de constantes
b = [0, 0, 0, 0, f]
# ou
b = zeros(n)
b[n] = f;
b
# +
# Montando a matriz A dos coeficientes
k1, k2, k3, k4, k5 = k, k, k, k, k
A = zeros(n, n)
# Acumular contribuição do primeiro elemento
A[1, 1] = A[1, 1] + k1
# Acumular contribuição do segundo elemento
i, j = 1, 2
A[i, i] += k2
A[i, j] -= k2
A[j, i] -= k2
A[j, j] += k2
# Acumular contribuição do terceiro elemento
i, j = 2, 3
A[i, i] += k3
A[i, j] -= k3
A[j, i] -= k3
A[j, j] += k3
# Acumular contribuição do quarto elemento
i, j = 3, 4
A[i, i] += k4
A[i, j] -= k4
A[j, i] -= k4
A[j, j] += k4
# Acumular contribuição do quinto elemento
i, j = 4, 5
A[i, i] += k5
A[i, j] -= k5
A[j, i] -= k5
A[j, j] += k5
A
# -
# Resolvendo o sistema
u5 = A \ b
# Usando uma estrutura de repetição (loops)
soma = 0.
for i = 1:6
soma += i # soma = soma + i
end
soma
# ## Como calcular a norma de um vetor
x = rand(5)
nL2 = 0. # norma L2 de um vetor
for x_ = x
nL2 += x_^2
end
nL2 = sqrt(nL2)
using LinearAlgebra
norm(x, 2), norm(x), norm(x, 1)
# ## Como calcular a norma de uma matriz
?norm
?opnorm
A = rand(10, 10)
norm(A, 2)
opnorm(A, 2)
# ## Como medir o tempo computacional
# +
A = rand(10, 10)
b = rand(10)
# Resolver sistema A * x = b
@time x = A \ b
@time x = A \ b
@time x = A \ b
@time x = A \ b
t = @elapsed x = A \ b
println("t = ", t, " secs.")
t5 = @elapsed begin
x = A \ b
x = A \ b
x = A \ b
x = A \ b
x = A \ b
x = A \ b
end
println("t5 = ", t5, " secs.")
# -
# ## Como construir gráficos de barras
using StatsPlots
# +
xlabels = ["n 1", "n 5", "n 3"]
groups = ["Grupo 1", "Grupo 2", "Grupo 3"]
nlabels, ngroups = length(xlabels), length(groups)
results = rand(nlabels, ngroups)
# -
name = repeat(xlabels, outer=ngroups)
leg = repeat(groups, inner=nlabels)
groupedbar(name, results, group=leg, xlabel="O que estou variando", ylabel="Resultados observados")
| calcnum-monitoria-28042021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
COMP_NAME = 'predict-west-nile-virus'
from fastai.tabular.all import *
WORKING_DIR = Path(".").absolute()
INPUT_DIR = Path("../input").absolute()
ARCHIVE_DIR = Path("../archive").absolute()
MODELS_DIR = Path("../models").absolute()
DATA_DIR = Path("../data").absolute()
submission_path = DATA_DIR
path = Path(INPUT_DIR/COMP_NAME); Path.BASE_PATH = path; path.ls()
import pandas as pd
import numpy as np
from sklearn import ensemble, preprocessing
# +
# Load dataset
# train = pd.read_csv('../input/train.csv')
# test = pd.read_csv('../input/test.csv')
# sample = pd.read_csv('../input/sampleSubmission.csv')
# weather = pd.read_csv('../input/weather.csv')
# -
train = pd.read_csv(path/'train.csv')
test = pd.read_csv(path/'test.csv')
sample = pd.read_csv(path/'sampleSubmission.csv')
weather = pd.read_csv(path/'weather.csv')
# Get labels
labels = train.WnvPresent.values
labels[100:110]
# Not using codesum for this benchmark
weather = weather.drop('CodeSum', axis=1)
weather.columns
weather.head()
# Split station 1 and 2 and join horizontally
weather_stn1 = weather[weather['Station']==1]
weather_stn2 = weather[weather['Station']==2]
weather_stn1 = weather_stn1.drop('Station', axis=1)
weather_stn2 = weather_stn2.drop('Station', axis=1)
weather = weather_stn1.merge(weather_stn2, on='Date')
weather.columns
weather.head()
# replace some missing values and T with -1
weather = weather.replace('M', -1)
weather = weather.replace('-', -1)
weather = weather.replace('T', -1)
weather = weather.replace(' T', -1)
weather = weather.replace(' T', -1)
# +
# Functions to extract month and day from dataset
# You can also use parse_dates of Pandas.
def create_month(x):
return x.split('-')[1]
def create_day(x):
return x.split('-')[2]
# -
train['month'] = train.Date.apply(create_month)
train['day'] = train.Date.apply(create_day)
test['month'] = test.Date.apply(create_month)
test['day'] = test.Date.apply(create_day)
# Add integer latitude/longitude columns
train['Lat_int'] = train.Latitude.apply(int)
train['Long_int'] = train.Longitude.apply(int)
test['Lat_int'] = test.Latitude.apply(int)
test['Long_int'] = test.Longitude.apply(int)
train.columns
test.columns
train.head()
# drop address columns
train = train.drop(['Address', 'AddressNumberAndStreet','WnvPresent', 'NumMosquitos'], axis = 1)
test = test.drop(['Id', 'Address', 'AddressNumberAndStreet'], axis = 1)
train.columns
test.columns
train.head()
# Merge with weather data
train = train.merge(weather, on='Date')
test = test.merge(weather, on='Date')
train = train.drop(['Date'], axis = 1)
test = test.drop(['Date'], axis = 1)
train.columns
test.columns
# Convert categorical data to numbers
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train['Species'].values) + list(test['Species'].values))
train['Species'] = lbl.transform(train['Species'].values)
test['Species'] = lbl.transform(test['Species'].values)
train.columns
test.columns
train.head()
lbl.classes_
lbl.fit(list(train['Street'].values) + list(test['Street'].values))
train['Street'] = lbl.transform(train['Street'].values)
test['Street'] = lbl.transform(test['Street'].values)
lbl.classes_
lbl.fit(list(train['Trap'].values) + list(test['Trap'].values))
train['Trap'] = lbl.transform(train['Trap'].values)
test['Trap'] = lbl.transform(test['Trap'].values)
lbl.classes_
train.loc[:,(train == -1).any(axis=0)].count()
train.count()
# drop columns with -1s
train = train.loc[:,(train != -1).any(axis=0)]
test = test.loc[:,(test != -1).any(axis=0)]
# Random Forest Classifier
clf = ensemble.RandomForestClassifier(n_jobs=-1, n_estimators=1000, min_samples_split=2)
clf.fit(train, labels)
# create predictions and submission file
predictions = clf.predict_proba(test)[:,1]
sample['WnvPresent'] = predictions
# !mkdir -p {submission_path}/abishek-beat-the-benchmark
sample.to_csv(submission_path/'abishek-beat-the-benchmark'/'submission1.csv', index=False)
sample.head()
| working/baseline-beat-the-benchmark-play1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PythonData] *
# language: python
# name: conda-env-PythonData-py
# ---
#Import Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
import pymongo
# ## NASA Mars News
# URL of page to be scraped
url = 'https://mars.nasa.gov/news/'
# Retrieve page with the requests module
response = requests.get(url)
#response.headers
#response.content
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
# +
# Examine the results, then determine element that contains sought info
#print(soup.prettify())
# -
# First paragraph result returned for first article on page
news_paragraph = soup.find('div', class_="rollover_description_inner").text.strip()
news_paragraph
# First title result returned for first article on page
news_title = soup.find('div', class_="content_title").a.text.strip()
news_title
# ## JPL Mars Space Images - Featured Image
#
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Scrape page into Soup
html = browser.html
image_soup = BeautifulSoup(html, 'html.parser')
image_elem = image_soup.find_all('div', class_='carousel_items')
# Iterate through the feature image botton
feature_image_botton = image_elem[0].find('a', class_="button fancybox")
feature_image_botton
# +
# Find the website source of the image
browser.links.find_by_partial_text('FULL IMAGE').click()
browser.links.find_by_partial_text('more info').click()
# -
#Delay the extraction of information since the website took several seconds to load completely
browser.is_element_present_by_css("figure.lede", wait_time=1)
# +
# Store the html of the current website
image_html = browser.html
# +
# Scrape page into Soup
image_soup = BeautifulSoup(image_html, 'html.parser')
# +
# Retrieve the figure tag
figure_tag= image_soup.find_all('figure', class_='lede')
# -
# Retrieve the image source from figure tag
image_source = figure_tag[0].find('a')['href']
image_source
# +
# Store the image url
website_url = 'https://www.jpl.nasa.gov'
featured_image_url = website_url + image_source
featured_image_url
# -
# ## Mars Facts
# Use splinter to obtain the information from the website
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://space-facts.com/mars/'
browser.visit(url)
# Use pandas to read the html table data on the page into a list of dictionaries
tables = pd.read_html(url)
#tables
# Read the first dictionary in the list into a pandas dataframe and name columns
df = tables[0]
df.columns = ['Parameter', 'Value']
# Read the first dictionary in the list into a pandas dataframe and name columns
df = tables[0]
df.columns = ['Parameter', 'Value']
# +
df.set_index('Parameter', inplace=True)
df
# +
# Convert the dataframe into an html table, strip the end of line newlines and
# write the result to an html file to view
# Convert the table to html
html_mars_facts = mars_facts.to_html(classes = 'table table-striped', header =True, index=True,justify='left')
print(html_mars_facts)
# +
# # Inspect the result in a browser
# df.to_html('table.html')
# # !explorer table.html
# -
# ## Mars Hemispheres
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# Get page html and make beautifulsoup object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Get the html containing the titles and put into a list
title_list = soup.find_all('div', class_='description')
title_list
# Loop through the div objects and scrape titles and urls of hires images
# Initiate the list to store dictionaries
hemisphere_image_urls = []
for title in title_list:
# Navigate browser to page then click on title link to hires image page
browser.visit(url)
browser.click_link_by_partial_text(title.a.h3.text)
# Grab the destination page html and make into BeautifulSoup object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Parse the hires image source(src) relative url then append to domain name
# for absolute url
img_url_list = soup.find('img', class_='wide-image')
img_url = f"https://astrogeology.usgs.gov{img_url_list['src']}"
# Create dictionary with returned values and add dict to hemisphere_image_urls list
post = {
'title': title.a.h3.text,
'image_url': img_url
}
hemisphere_image_urls.append(post)
hemisphere_image_urls
| .ipynb_checkpoints/mission_to_mars (2)-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Data Preparation
#
# Photon and spacecraft data are all that a user needs for the analysis. For the definition of LAT data products, see the information in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data/LAT_DP.html).
#
# The LAT data can be extracted from the Fermi Science Support Center web site as described in the section [Extract LAT data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/extract_latdata.html). Preparing these data for analysis depends on the type of analysis you wish to perform (e.g. point source, extended source, GRB spectral analysis, timing analysis, etc). The different cuts to the data are described in detail in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data/LAT_DP.html).
# Data preparation consists of two steps:
# * ([gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt)): Used to make cuts based on columns in the event data file such as time, energy, position, zenith angle, instrument coordinates, event class, and event type (new in Pass 8).
# * ([gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt)): In addition to cutting the selected events, gtmktime makes cuts based on the spacecraft file and updates the Good Time Interval (GTI) extension.
#
#
# Here we give an example of how to prepare the data for the analysis of a point source. For your particular source analysis you have to prepare your data performing similar steps, but with the cuts suggested in Cicerone for your case.
# ## 1. Event Selection with gtselect
#
# In this section, we look at making basic data cuts using [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt). By default, gtselect prompts for cuts on:
# * Time
# * Energy
# * Position (RA,Dec,radius)
# * Maximum Zenith Angle
#
# However, by using the following hidden parameters (or using the '_Show Advanced Parameters_' check box in GUI mode), you can also make cuts on:
#
# * Minimum Event class ID (``evclsmin``)
# * Maximum Event class ID (``evclsmax``)
# * Event conversion type ID (``convtype``)
# * Minimum pulse phase (``phasemin``)
# * Maximum pulse phase (``phasemax``)
# For this example, we use data that was extracted using the procedure described in the [Extract LAT Data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/extract_latdata.html) tutorial. The original selection used the following information:
#
# * Search Center (RA,DEC) = (193.98,-5.82)
# * Radius = 20 degrees
# * Start Time (MET) = 239557417 seconds (2008-08-04 T15:43:37)
# * Stop Time (MET) = 255398400 seconds (2009-02-04 T00:00:00)
# * Minimum Energy = 100 MeV
# * Maximum Energy = 500000 MeV
#
# The LAT operated in survey mode for that period of time. We provide the user with the photon and spacecraft data files extracted in the same method as described in the [Extract LAT data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/extract_latdata.html) tutorial:
#
# 1. L1506091032539665347F73_PH00.fits
# 2. L1506091032539665347F73_PH01.fits
# 3. L1506091032539665347F73_SC00.fits
# !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/dataPreparation/L1506091032539665347F73_PH00.fits
# !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/dataPreparation/L1506091032539665347F73_PH01.fits
# !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/dataPreparation/L1506091032539665347F73_SC00.fits
# !mkdir data
# !mv *.fits ./data
# If more than one photon file was returned by the [LAT Data Server](https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi), we will need to provide an input file list in order to use all the event data files in the same analysis. This text file can be generated by typing:
# !ls ./data/*_PH* > ./data/events.txt
# !cat ./data/events.txt
# This input file list can be used in place of a single input events (or FT1) file by placing an `@` symbol before the text filename. The output from [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt) will be a single file containing all events from the combined file list that satisfy the other specified cuts.
#
# For a simple point source analysis, it is recommended that you only include events with a high probability of being photons. This cut is performed by selecting "source" class events with the the [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt) tool by including the hidden parameter ``evclass`` on the command line. For LAT Pass 8 data, `source` events are specified as event class 128 (the default value).
#
# Additionally, in Pass 8, you can supply the hidden parameter `evtype` (event type) which is a sub-selection on `evclass`. For a simple analysis, we wish to include all front+back converting events within all PSF and Energy subclasses. This is specified as `evtype` 3 (the default value).
#
# The recommended values for both `evclass` and `evtype` may change as LAT data processing develops.
#
# Now run [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt) to select the data you wish to analyze. For this example, we consider the "source class" photons within a 20 degree acceptance cone of the blazar 3C 279. We apply the **gtselect** tool to the data file as follows:
# + language="bash"
# gtselect evclass=128 evtype=3
# infile = @./data/events.txt
# outfile = ./data/3C279_region_filtered.fits
# ra = 193.98
# dec = -5.82
# rad = 20
# tmin = INDEF
# tmax = INDEF
# emin = 100
# emax = 500000
# zmax = 90
#
# #### Parameters:
# # Input file or files (if multiple files are in a .txt file,
# # don't forget the @ symbol)
# # Output file
# # RA for new search center
# # Dec or new search center
# # Radius of the new search region
# # Start time (MET in s)
# # End time (MET in s)
# # Lower energy limit (MeV)
# # Upper energy limit (MeV)
# # Maximum zenith angle value (degrees)
# -
# The filtered data will be found in the file `./data/3C279_region_filtered.fits`.
#
# **Note**: If you don't want to make a selection on a given parameter, just enter a zero (0) as the value.
#
# In this step we also selected the maximum zenith angle value as suggested in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data_Exploration/Data_preparation.html). Gamma-ray photons coming from the Earth limb ("albedo gammas") are a strong source of background. You can minimize this effect with a zenith angle cut. The value of `zmax` = 90 degrees is suggested for reconstructing events above 100 MeV and provides a sufficient buffer between your region of interest (ROI) and the Earth's limb.
#
# In the next step, [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) will remove any time period for which our ROI overlaps this buffer region. While increasing the buffer (reducing `zmax`) may decrease the background rate from albedo gammas, it will also reduce the amount of time your ROI is completely free of the buffer zone and thus reduce the livetime on the source of interest.
# **Notes**:
#
# * The RA and Dec of the search center must exactly match that used in the dataserver selection. If they are not the same, multiple copies of the source position will appear in your prepared data file which will cause later stages of analysis to fail. See "DSS Keywords" below.
#
#
# * The radius of the search region selected here must lie entirely within the region defined in the dataserver selection. They can be the same values, with no negative effects.
#
#
# * The time span selected here must lie within the time span defined in the dataserver selection. They can be the same values with no negative effects.
#
#
# * The energy range selected here must lie within the time span defined in the dataserver selection. They can be the same values with no negative effects.
# **BE AWARE**: [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt) writes descriptions of the data selections to a series of _Data Sub-Space_ (DSS) keywords in the `EVENTS` extension header.
#
# These keywords are used by the exposure-related tools and by [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) for calculating various quantities, such as the predicted number of detected events given by the source model. These keywords MUST be same for all of the filtered event files considered in a given analysis.
#
# [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) will check to ensure that all of the DSS keywords are the same in all of the event data files. For a discussion of the DSS keywords see the [Data Sub-Space Keywords page](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/dss_keywords.html).
# There are multiple ways to view information about your data file. For example:
# * You may obtain the value of start and end time of your file by using the fkeypar tool. This tool is part of the [FTOOLS](http://heasarc.nasa.gov/lheasoft/ftools/ftools_menu.html) software package and is used to read the value of a FITS header keyword and write it to an output parameter file. For more information on `fkeypar`, type:
# `fhelp fkeypar`
# * The [gtvcut](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtvcut.txt) tool can be used to view the DSS keywords in a given extension, where the EVENTS extension is assumed by default. This is an excellent way to to find out what selections have been made already on your data file (by either the dataserver, or previous runs of gtselect).
#
# * NOTE: If you wish to view the (very long) list of good time intervals (GTIs), you can use the hidden parameter `suppress_gtis=no` on the command line. The full list of GTIs is suppressed by default.
# ## 2. Time Selection with gtmktime
#
# Good Time Intervals (GTIs):
#
# * A GTI is a time range when the data can be considered valid. The GTI extension contains a list of these GTI's for the file. Thus the sum of the entries in the GTI extension of a file corresponds to the time when the data in the file is "good."
#
# * The initial list of GTI's are the times that the LAT was collecting data over the time range you selected. The LAT does not collect data while the observatory is transiting the South Atlantic Anomaly (SAA), or during rare events such as software updates or spacecraft maneuvers.
#
# **Notes**:
# * Your object will most likely not be in the field of view during the entire time that the LAT was taking data.
#
# * Additional data cuts made with gtmktime will update the GTIs based on the cuts specified in both gtmktime and gtselect.
#
# * The Fermitools use the GTIs when calculating exposure. If the GTIs have not been properly updated, the exposure correction made during science analysis may be incorrect.
# [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) is used to update the GTI extension and make cuts based on spacecraft parameters contained in the spacecraft (pointing and livetime history) file. It reads the spacecraft file and, based on the filter expression and specified cuts, creates a set of GTIs. These are then combined (logical and) with the existing GTIs in the Event data file, and all events outside this new set of GTIs are removed from the file. New GTIs are then written to the GTI extension of the new file.
#
# Cuts can be made on any field in the spacecraft file by adding terms to the filter expression using C-style relational syntax:
#
# ! -> not, && -> and, || -> or, ==, !=, >, <, >=, <=
#
# ABS(), COS(), SIN(), etc., also work
#
# >**NOTE**: Every time you specify an additional cut on time, ROI, zenith angle, event class, or event type using [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt), you must run [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) to reevaluate the GTI selection.
#
# Several of the cuts made above with **gtselect** will directly affect the exposure. Running [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) will select the correct GTIs to handle these cuts.
# It is also especially important to apply a zenith cut for small ROIs (< 20 degrees), as this brings your source of interest close to the Earth's limb. There are two different methods for handling the complex cut on zenith angle:
#
# * One method is to exclude time intervals where the buffer zone defined by the zenith cut intersects the ROI from the list of GTIs. In order to do that, run [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) and answer "yes" at the prompt:
# ```
# > gtmktime
# ...
# > Apply ROI-based zenith angle cut [] yes
# ```
# >**NOTE**: If you are studying a very broad region (or the whole sky) you would lose most (all) of your data when you implement the ROI-based zenith angle cut in [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt).
# >
# >In this case you can allow all time intervals where the cut intersects the ROI, but the intersection lies outside the FOV. To do this, run _gtmktime_ specifying a filter expression defining your analysis region, and answer "no" to the question regarding the ROI-based zenith angle cut:
# >
# >`> Apply ROI-based zenith angle cut [] no`
# >
# >Here, RA_of_center_ROI, DEC_of_center_ROI and radius_ROI correspond to the ROI selection made with gtselect, zenith_cut is defined as 90 degrees (as above), and limb_angle_minus_FOV is (zenith angle of horizon - FOV radius) where the zenith angle of the horizon is 113 degrees.
# * Alternatively, you can apply the zenith cut to the livetime calculation while running [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtltcube.txt). This is the method that is currently recommended by the LAT team (see the [Livetimes and Exposure](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/Exposure.html) section of the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/)), and is the method we will use most commonly in these analysis threads. To do this, answer "no" at the [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) prompt:
# `> Apply ROI-based zenith angle cut [] no`
#
# You'll then need to specify a value for gtltcube's `zmax` parameter when calculating the livetime cube:
#
# `> gtltcube zmax=90`
# [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) also provides the ability to exclude periods when some event has negatively affected the quality of the LAT data. To do this, we select good time intervals (GTIs) by using a logical filter for any of the [quantities in the spacecraft file](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data/LAT_Data_Columns.html#SpacecraftFile). Some possible quantities for filtering data are:
#
# * `DATA_QUAL` - quality flag set by the LAT instrument team (1 = ok, 2 = waiting review, 3 = good with bad parts, 0 = bad)
#
# * `LAT_CONFIG` - instrument configuration (0 = not recommended for analysis, 1 = science configuration)
#
# * `ROCK_ANGLE` - can be used to eliminate pointed observations from the dataset.
#
# >**NOTE**: A history of the rocking profiles that have been used by the LAT can be found in the [SSC's LAT observations page.](https://fermi.gsfc.nasa.gov/ssc/observations/types/allsky/)
# The current [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) filter expression recommended by the LAT team is:
#
# **(DATA_QUAL>0)&&(LAT_CONFIG==1).**
#
# >**NOTE**: The "DATA_QUAL" parameter can be set to different values, based on the type of object and analysis the user is interested into (see this page of the Cicerone for the most updated detailed description of the parameter's values). Typically, setting the parameter to 1 is the best option. For GRB analysis, on the contrary, the parameter should be set to ">0".
#
# Here is an example of running [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) on the 3C 279 filtered events file. For convienience, we rename the spacecraft file to `spacecraft.fits`.
# !mv ./data/L1506091032539665347F73_SC00.fits ./data/spacecraft.fits
# Now, we run **gtmktime**:
# + language="bash"
# gtmktime
# ./data/spacecraft.fits
# (DATA_QUAL>0)&&(LAT_CONFIG==1)
# no
# ./data/3C279_region_filtered.fits
# ./data/3C279_region_filtered_gti.fits
#
# #### Parameters specified above are:
# # Spacecraft file
# # Filter expression
# # Apply ROI-based zenith angle cut
# # Event data file
# # Output event file name
# -
# !ls ./data/
# The filtered event file, [3C279_region_filtered_gti.fits,](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/dataPreparation/3C279_region_filtered_gti.fits) output from [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt) can be downloaded from the Fermi SSC site.
#
# After the data preparation, it is advisable to examine your data before beginning detailed analysis. The [Explore LAT data](3.ExploreLATData.ipynb) tutorial has suggestions on methods of getting a quick preview of your data.
| DataSelection/2.DataPreparation/2.DataPreparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.8 64-bit (''d2l'': venv)'
# language: python
# name: python3
# ---
import torch
from torch import nn
import d2l.torch as d2l
import torch.nn.functional as F
class Residual(nn.Module):
def __init__(self, in_channels, out_channels, use_1x1_conv=False, strides=1):
super(Residual, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=strides)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
if use_1x1_conv:
self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
Y = F.relu(self.bn1(self.conv1(x)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
x = self.conv3(x)
return F.relu(x+Y)
def resnet_block(in_channels, out_channels, num_residuals, first_block=False):
blk = []
for i in range(num_residuals):
if i == 0 and not first_block:
blk.append(Residual(in_channels, out_channels, use_1x1_conv=True, strides=2))
else:
blk.append(Residual(out_channels, out_channels))
return blk
# +
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
b3 = nn.Sequential(*resnet_block(64, 128, 2))
b4 = nn.Sequential(*resnet_block(128, 256, 2))
b5 = nn.Sequential(*resnet_block(256, 512, 2))
net = nn.Sequential(b1, b2, b3, b4, b5,
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(),nn.Linear(512, 10))
net
# -
X = torch.randn((1,1,224,224))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape:\t',X.shape)
lr, num_epochs, batch_size = 0.05, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
| chapter 7 Modern Convolutional Networks/ResNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''base'': conda)'
# name: python3
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# ---
# author: <NAME> (<EMAIL>)
# ---
# + [markdown] cell_id="00000-f923ca3c-736b-4733-ab4c-30e895dbf05b" deepnote_cell_type="markdown" tags=[]
# The solution below uses an example dataset about the teeth of 10 guinea pigs at three Vitamin C dosage levels (in mg) with two delivery methods (orange juice vs. ascorbic acid). (See how to quickly load some sample data.)
# + cell_id="00000-70935978-8842-4734-b091-b8d698af4e32" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=16 execution_start=1626020427072 source_hash="3efdab3d" tags=[]
from rdatasets import data
df = data('ToothGrowth')
# + [markdown] cell_id="00002-8a82368b-fd17-410f-9a7d-deb52f81e9a4" deepnote_cell_type="markdown" tags=[]
# To plot the interaction effects among tooth length, supplement, and dosage, we can use the `pointplot` function in the Seaborn package.
# + cell_id="00002-830deff4-2683-4a78-b30c-13022ddadf59" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=254 execution_start=1626020707618 source_hash="9f02b94c" tags=[]
import seaborn as sns
import matplotlib.pyplot as plt
sns.pointplot(x='dose',y='len',hue='supp',data=df)
plt.legend(loc='lower right') # Default is upper right, which overlaps the data here.
plt.show()
# + [markdown] cell_id="00003-71adff77-a18a-498d-ad55-74558955c41e" deepnote_cell_type="markdown" tags=[]
# Looking at the output, we first see that there is an interaction effect because the two supp lines intersect. We also see that there is a difference in length when giving 0.5mg and 1mg dosage of either of the two delivery methods. However, there is barely any difference between the delivery methods when the dosage level is 2mg.
| database/tasks/How to plot interaction effects of treatments/Python, using Matplotlib and Seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Step 1: Data Acquisition
# The below data is can be found at https://figshare.com/articles/dataset/Untitled_Item/5513449 and https://docs.google.com/spreadsheets/d/1CFJO2zna2No5KqNm9rPK5PCACoXKzb-nycJFhV689Iw/edit?usp=sharing.
#
import pandas as pd
import json
from pandas import json_normalize
import numpy as np
import requests
# reading in the csv files found in the raw-data folder
page_data = pd.read_csv("raw-data/page_data.csv")
population_data = pd.read_csv("raw-data/WPDS_2020_data - WPDS_2020_data.csv")
# previewing page data file
page_data.head()
# previewing population data file
population_data.head()
# ## Step 2: Data Cleaning
# Next I cleaned the data. For page_data I removed pages that started with "Template:" as these are not Wikipedia articles. I then mapped each country to it's corresponding region, assuming that countries can be found underneath the region it corresponds to in the population_data file. I then seperated the countries from the regions so they could be joined with the page_data file later on.
# removing pages that start with "Template:"
page_data = page_data[page_data["page"].str.startswith("Template:")==False]
# filtering out regions by assuming all countries have type equal to country
population_country_lvl = population_data[population_data['Type'] == "Country"]
# outputting cleaned data files into csvs in the clean-data folder
page_data.to_csv("clean-data/page_data.csv", index=False)
population_country_lvl.to_csv("clean-data/population_country_lvl.csv", index=False)
# creating new list for country and region
country_region_map = []
# mapping all countries to the region that is above them in the population data file.
for i in population_data.index[population_data["Type"]=="Country"].tolist():
j = i
while population_data.iloc[j]["Type"] == "Country":
j = j - 1
country_region_map.append((population_data.iloc[j]["Name"], population_data.iloc[i]['Name']))
# converting list into data frame with columns Region and Country
country_region_map = pd.DataFrame(data = country_region_map, columns=['Region', 'Country'])
# retaining region information in seperate file for use in analysis section
# outputting to csv in clean-data folder
country_region_map.to_csv("clean-data/country_region_map.csv")
# ## Step 3: Getting Article Quality Predictions
# Next I got the predicted quality scores for each Wikipedia article using the ORES API, a machine learning tool that predicts the quality score of a given Wikipedia article. The documentation for the ORES API can be found here: https://ores.wikimedia.org/v3/#!/scoring/get_v3_scores_context_revid_model For my purposes I used the value for prediction as the quality score.
# API endpoint for enwiki and articlequality model
endpoint = 'https://ores.wikimedia.org/v3/scores/enwiki?models=articlequality&revids={rev_id}'
# My github and email contact information
headers = {
'User-Agent': 'https://github.com/geiercc',
'From': '<EMAIL>'
}
# api_call takes in an endpoint, an int i and an int n which are used for signifying
# the start and end points of a batch GET call. api_call returns a reponse
def api_call(endpoint, i, n):
# calling multiple rev_ids at once in a batch
rev_id = "|".join(str(rev_id) for rev_id in page_data.rev_id.iloc[i:n])
call = requests.get(endpoint.format(rev_id = rev_id), headers=headers)
response = call.json()
return response
# +
# creating seperate lists for articles that have a score associated with them and those that don't
no_score = []
ores_data = []
# api can only GET 50 responses at a time so I create a loop to go through all the rev_ids in page_data
for x in range(len(page_data["rev_id"]) // 50 + 1):
# updating start and end numbers
i = x * 50
n = (min((x+1) * 50, len(page_data["rev_id"])))
response = api_call(endpoint, i, n)
# updating lists with either the rev_id and the score or with the rev_id and error message if there is no prediction
for rev in response:
for rev in response["enwiki"]["scores"]:
if response["enwiki"]["scores"][rev]["articlequality"].get("score") is None:
no_score.append(response["enwiki"]["scores"][rev]["articlequality"]["error"]["message"])
else:
ores_data.append([rev, response["enwiki"]["scores"][rev]["articlequality"]["score"]["prediction"]])
# +
# outputting the rev_ids with scores and rev_ids with no scores to seperate csv files in the clean-data folder
ores_data = pd.DataFrame(ores_data, columns=['revision_id', 'article_quality_est.'])
ores_data.to_csv("clean-data/ores_data.csv")
no_score = pd.DataFrame(data = no_score, columns = ['Error Message'])
no_score.to_csv("clean-data/no_quality_score.csv")
# -
# ## Step 4: Combining the Datasets
#
# Next I merge the ORES data for each article with the wikipedia page data and the population data.
# The rows that do not have matching data are output in a CSV file called wp_wpds_countries-no_match.csv while the remaining rows are output into a csv called wp_wpds_politicians_by_country.csv
# merging page_data with population data in an inner join to get rid of rows that don't match up
wp_wpds_politicians = pd.merge(page_data, population_country_lvl, left_on = "country", right_on = "Name", how="inner")
# finding rows that don't match through the indicator and keeping those that are in either right only or left only but not both
wp_wpds_politicians_no_match = pd.merge(page_data, population_country_lvl, left_on = "country", right_on = "Name", how='outer', indicator=True)
wp_wpds_politicians_no_match = wp_wpds_politicians_no_match[wp_wpds_politicians_no_match["_merge"].isin(["right_only", "left_only"])]
# outputting these rows to csv in the clean-data folder
wp_wpds_politicians_no_match.to_csv("clean-data/wp_wpds_countries-no_match.csv", index=False)
# merging ores_data with the merged page data and population data, making sure the rev_id is the same type
# using an inner join so all articles have a score
ores_data = ores_data.astype({"revision_id": np.int64})
wp_wpds_politicians = pd.merge(wp_wpds_politicians, ores_data, left_on = "rev_id", right_on = "revision_id", how="inner")
# keeping relevant columns - country, page, revision_id, article_quality_est., and Population
wp_wpds_politicians_by_country = wp_wpds_politicians[["country", "page", "revision_id", "article_quality_est.", "Population"]]
# renaming page column to article_name for clarity
wp_wpds_politicians_by_country = wp_wpds_politicians_by_country.rename(columns={"page": "article_name"})
# outputting this final merged file to a csv in results called wp_wpds_politicians_by_country.csv
wp_wpds_politicians_by_country.to_csv("results/wp_wpds_politicians_by_country.csv", index = False)
# ## Step 5: Analysis
#
# Calculating proportions, as a percentage, of articles per population and high-quality articles for each country and geographic region. High-quality is defined as an article with either a Featured Article or Good Article rating.
# calculating the total article count for each country
article_count = wp_wpds_politicians_by_country.groupby('country').count()['article_name']
# calculating the number of quality articles for each country
quality_count = wp_wpds_politicians_by_country[(wp_wpds_politicians_by_country['article_quality_est.'] == "FA") | (wp_wpds_politicians_by_country['article_quality_est.'] == "GA")].groupby('country').count()['article_name']
# converting both article and quality counts to data frames
article_count = article_count.to_frame()
quality_count = quality_count.to_frame()
# renaming column to article_count
article_count = article_count.rename(columns={"article_name": "country_article_count"})
# renaming column to quality_count
quality_count = quality_count.rename(columns={"article_name": "quality_count"})
# adding article_count column to cleaned final dataframe
wp_wpds_politicians_analysis = pd.merge(wp_wpds_politicians_by_country, article_count, on = "country")
# selecting relevant columns country, Population, and country_article_count
wp_wpds_politicians_analysis = wp_wpds_politicians_analysis[["country", "Population", "country_article_count"]]
# calculating articles per population percentage from country_article_count and Population
wp_wpds_politicians_analysis["articles-per-pop"] = (wp_wpds_politicians_analysis["country_article_count"] / wp_wpds_politicians_analysis["Population"]) * 100.0
# dropping duplicate country rows
country_coverage = wp_wpds_politicians_analysis.drop_duplicates()
# getting descending sort
country_coverage_desc = country_coverage.sort_values(by = "articles-per-pop", ascending=False)
# getting ascending sort
country_coverage_asc = country_coverage.sort_values(by = "articles-per-pop", ascending=True)
# getting top 10 and bottom ten countries by coverage
top_10_countries_by_coverage = country_coverage_desc.head(10)
bottom_10_countries_by_coverage = country_coverage_asc.head(10)
# merging new dataframe with article count with the quality counts,
# using a left join to account for countries with no quality articles
wp_wpds_politicians_analysis_with_quality = pd.merge(wp_wpds_politicians_analysis, quality_count, on = "country", how = "left")
# replacing NaN values with 0 for countries with no quality articles
wp_wpds_politicians_analysis_with_quality = wp_wpds_politicians_analysis_with_quality.replace(np.nan, 0)
# calculating the quality article percentage
wp_wpds_politicians_analysis_with_quality["quality_articles_pct"] = (wp_wpds_politicians_analysis_with_quality["quality_count"] / wp_wpds_politicians_analysis_with_quality["country_article_count"]) * 100.0
# selecting the relevant columns of country, Population, country_article_count, quality_count, and quality_articles_pct
wp_wpds_politicians_analysis_for_quality = wp_wpds_politicians_analysis_with_quality[["country", "Population", "country_article_count", "quality_count", "quality_articles_pct"]]
# getting ascending and descending sort for quality article percentage
country_level_quality_asc = wp_wpds_politicians_analysis_for_quality.sort_values(by = "quality_articles_pct", ascending=True).drop_duplicates()
country_level_quality_desc = wp_wpds_politicians_analysis_for_quality.sort_values(by = "quality_articles_pct", ascending=False).drop_duplicates()
# getting top and bottom 10 countries by quality article percentage
bottom_10_countries_by_quality = country_level_quality_asc.head(10)
top_10_countries_by_quality = country_level_quality_desc.head(10)
# dropping all duplicate country rows for region level analysis
region_level_analysis = wp_wpds_politicians_analysis_with_quality.drop_duplicates()
# merging this with the mapping from earlier of a country to its region
region_level_analysis = pd.merge(region_level_analysis, country_region_map, left_on="country", right_on="Country")
# grouping by region to get the sum of all the counts for each region instead of each country
region_level_analysis = region_level_analysis.groupby('Region').sum()
# calculating the regional level article and quality percentages
region_level_analysis["regional_article_pct"] = (region_level_analysis["country_article_count"] / region_level_analysis["Population"]) * 100.0
region_level_analysis["regional_quality_pct"] = (region_level_analysis["quality_count"] / region_level_analysis["country_article_count"]) * 100.0
# renaming country_article_count column to signify it is on a regional level
region_level_analysis = region_level_analysis.rename(columns={"country_article_count": "regional_article_count"})
# selecting relevant columns
region_level_analysis = region_level_analysis[["Population", "regional_article_count", "quality_count", "regional_article_pct", "regional_quality_pct"]]
# getting descending sorted rows for article coverage perecentage and quality coverage percentage
region_level_analysis_article_count = region_level_analysis.sort_values(by = "regional_article_pct", ascending=False)
region_level_analysis_quality_count = region_level_analysis.sort_values(by = "regional_quality_pct", ascending=False)
# ## Step 6: Results
# Embedding 6 tables in Jupyter notebook with the results of the analysis.
# ### Top 10 Countries By Coverage
# Below are the 10 highest-ranked countries in terms of number of politician articles as a proportion of country population
top_10_countries_by_coverage
# ### Bottom 10 Countries By Coverage
# Below are the 10 lowest-ranked countries in terms of number of politician articles as a proportion of country population
#
bottom_10_countries_by_coverage
# ### Top 10 countries by relative quality
# Below are the 10 highest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality
top_10_countries_by_quality
# ### Bottom 10 countries by relative quality
# Below are the 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality. Note that these comprise of countries with 0 quality articles and that there are other countries with 0 quality articles that would be "tied" with these for bottom 10.
#
bottom_10_countries_by_quality
# ### Geographic regions by coverage
# Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population
display(region_level_analysis_article_count)
# ### Geographic regions by high quality article coverage
# Ranking of geographic regions (in descending order) in terms of the relative proportion of politician articles from countries in each region that are of GA and FA-quality
display(region_level_analysis_quality_count)
| data-512-a2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Scope of a variable**
# +
# Local variables are declared inside function and can not be accessed outside of the function
def local_var_example():
'''Local variables are declared inside function and can not be accessed outside of the function'''
name = 'Anil'
print(global_var_example.__doc__)
print(name, 'is accessed inside the function', local_var_example.__name__)
local_var_example()
# Outside of the function local_var_example and it will throw NameError
#print(name, 'is accessed outside the function', local_var_example.__name__)
# +
# Global variables are declared outside function and can be accessed both inside and outside of the function
last_name = 'Adhikari'
def global_var_example():
'''Global variables are declared outside function and can be accessed both inside and outside of the function'''
print(global_var_example.__doc__)
print(last_name, 'is accessed inside the function', global_var_example.__name__)
global_var_example()
# Outside of the function local_var_example and it will throw NameError
print(last_name, 'is accessed outside the function', global_var_example.__name__)
# -
| 03 Deep Dive- Functions and OOPs/03_scope_of_variable.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
print(cv2.__version__)
import sys
print(sys.version)
import geopandas
print(geopandas.__version__)
import multiprocessing
import joblib
print(joblib.__version__)
print(multiprocessing)
| test/notebooks/SoftwareVersions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import plotly
import data_xray
from data_xray import file_io as fio
fio.GetData.find_grids("/Users/5nm/Dropbox (ORNL)/JSTM/Data - JTSTM/2021-07-09 Large vortex map on FeSe", get_data=False)
dat.FindGrids("/Users/5nm/Dropbox (ORNL)/JSTM/Data - JTSTM/2021-07-09 Large vortex map on FeSe", get_data=False)
dat.SelectFolder()
fio.GetData
from data_xray import nanonisio as nio
import data_xray as dx
from data_xray import file_io as fio
import plotly.io as pio
pio.renderers.default = "notebook_connected"
grids = dx.file_io.FindGrids("/Users/5nm/Dropbox (ORNL)/JSTM/Data - JTSTM/2021-07-09 Large vortex map on FeSe");
grids[0].ds
grids[0].ds.cf[:,:,9].plot()
import plotly.express as px
px.imshow(grids[0].ds.cf,animation_frame='bias')
dx.file_io.GetFile()
_
_[0]
a = 2 if 1 > 0 else 3
a
import numpy as np
class hello:
def __init__(self, message='hello'):
self.message = message
def doit(self):
print(self.message)
@classmethod
def doitnow(cls, message):
a = cls("wow")
a.doit()
print(message)
@staticmethod
def doitStatic(message):
print(message)
hello.doitStatic("hello")
hello().__dict__
hello.doitnow("hello")
# %pip install tensorflow
# %pip install probflow
# +
# Imports
import probflow as pf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
randn = lambda *x: np.random.randn(*x).astype('float32')
# Generate some data
x = randn(100)
y = 2*(x**2) -1 + randn(100)
# Plot it
plt.plot(x, y, '.')
# -
class SimpleLinearRegression(pf.ContinuousModel):
def __init__(self):
self.w = pf.Parameter(name='Weight')
self.b = pf.Parameter(name='Bias')
self.s = pf.ScaleParameter(name='Std')
def __call__(self, x):
return pf.Normal((x*self.w())**2+self.b(), self.s())
model = SimpleLinearRegression()
model.fit(x, y)
# +
# Make predictions
x_test = np.array([-3, 3]).astype('float32')
preds = model.predict(x_test)
# Plot em
plt.plot(x_test, preds, 'r')
plt.plot(x, y, '.')
# -
model.residuals_plot(x, y)
model.posterior_plot(ci=0.95)
# +
# Compute 95% predictive confidence intervals
x_eval = np.linspace(-3, 3, 100).astype('float32')
lb, ub = model.predictive_interval(x_eval, ci=0.9)
# Plot em
plt.fill_between(x_eval, lb, ub, alpha=0.2)
plt.plot(x, y, '.')
# -
model.pred_dist_plot(x_eval[:1], ci=0.95)
# +
# Draw sample fits from the model
x_eval = np.array([-3, 3]).astype('float32')
samples = model.predictive_sample(x_eval, n=100)
# Plot em
x_plot = np.broadcast_to(x_eval[:, np.newaxis], samples.T.shape)
plt.plot(x_plot, samples.T, 'r', alpha=0.1)
plt.plot(x, y, '.')
# -
model.r_squared_plot(x, y)
# +
# Imports
import probflow as pf
import numpy as np
import matplotlib.pyplot as plt
rand = lambda *x: np.random.rand(*x).astype('float32')
randn = lambda *x: np.random.randn(*x).astype('float32')
zscore = lambda x: (x-np.mean(x, axis=0))/np.std(x, axis=0)
# Create the data
N = 1024
x = 10*rand(N, 1)-5
y = np.sin(x)/(1+x*x) + 0.05*randn(N, 1)
# Normalize
x = zscore(x)
y = zscore(y)
# Plot it
plt.plot(x, y, '.')
# -
y = [xx**2 + 5 + .8*np.random.randn() for xx in x]
y = np.asarray(y).astype(np.float32)
plt.plot(x,y, 'r.')
plt.plot(x,y,'b.')
# +
import tensorflow as tf
class DenseLayer(pf.Module):
def __init__(self, d_in, d_out):
self.w = pf.Parameter([d_in, d_out])
self.b = pf.Parameter([1, d_out])
def __call__(self, x):
return x @ self.w() + self.b()
# -
class DenseNetwork(pf.Module):
def __init__(self, dims):
Nl = len(dims)-1 #number of layers
self.layers = [DenseLayer(dims[i], dims[i+1]) for i in range(Nl)]
self.activations = (Nl-1)*[tf.nn.relu] + [lambda x: x]
def __call__(self, x):
for i in range(len(self.activations)):
x = self.layers[i](x)
x = self.activations[i](x)
return x
class DenseRegression(pf.ContinuousModel):
def __init__(self, dims):
self.net = DenseNetwork(dims)
self.s = pf.ScaleParameter([1, 1])
def __call__(self, x):
return pf.Normal(self.net(x), self.s())
model = DenseRegression([1, 20,20, 1])
model.fit(x, y, epochs=1000, lr=0.02)
# +
# Test points to predict
x_test = np.linspace(min(x), max(x), 101).astype('float32').reshape(-1, 1)
# Predict them!
preds = model.predict(x_test)
# Plot it
plt.plot(x, y, '.', label='Data')
plt.plot(x_test, preds, 'r.', label='Predictions')
# +
# Compute 95% confidence intervals
lb, ub = model.predictive_interval(x_test, ci=0.95)
# Plot em!
plt.fill_between(x_test[:, 0], lb[:, 0], ub[:, 0],
alpha=0.2, label='95% ci')
plt.plot(x, y, '.', label='Data')
# -
# probflow_trainer
# +
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import probflow as pf
# +
# Generate some data
N = 3*1024
X = np.random.randn(N, 2).astype('float32')
X[:1024, :] += [2, 0]
X[1024:2048, :] -= [2, 4]
X[2048:, :] += [-2, 4]
# Plot the data
plt.plot(X[:, 0], X[:, 1], '.', alpha=0.2)
# -
class GaussianMixtureModel(pf.Model):
def __init__(self, k, d):
self.mu = pf.Parameter([k, d])
self.sigma = pf.ScaleParameter([k, d])
self.theta = pf.DirichletParameter(k)
def __call__(self):
dists = tfd.MultivariateNormalDiag(self.mu(), self.sigma())
return pf.Mixture(dists, probs=self.theta())
model = GaussianMixtureModel(3, 2)
model.fit(X, lr=0.03, epochs=500, batch_size=1024)
| testing/functions_uses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/zaq9/simpleOption/blob/master/example/ratio_mar.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="wkoUZQTi4mrU" colab_type="code" colab={}
#Google Colab 上からブラウザ経由で利用する場合、初回はインストールが必要(約20秒)
# !pip install simpleOption
# + id="T3e989fb42AD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="6649a1ca-a7d3-466b-a27c-d9d62d8af833"
from simpleOption import *
import pandas as pd
import numpy as np
% matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
p = Portfolio(
"""
03/C22000[1]
03/C22250[-2]
""")
x = np.arange(21300, 22400)
setting(21300, 16, 20190221) #マーケット情報1(IV16%と仮定)
plt.plot(x, np.vectorize(p.v)(x), label= 'Ratio_feb21' )
setting(evaluationDate=20190225)
plt.plot(x, np.vectorize(p.v)(x), label= 'Ratio_feb25' )
setting(evaluationDate=20190305)
plt.plot(x, np.vectorize(p.v)(x), label= 'Ratio_Mar05' )
plt.plot(x, np.vectorize(p.pay)(x), label= 'Payoff',linestyle="dashed" )
plt.legend(loc="best")
| example/ratio_mar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ranking of articles using open-access citation-metadata
# Scholarly publications have seen exponential growth in past decades, however, linear growth is estimated for the research topics. It suggests that numerous articles appear every day within a research field. Researchers invest their time and energy in keeping up-to-date with the current state-of-the-art. Research is a continuous process and it builds on the past work that has been done attributed through citations. Although, there are numerous reasons why a research article gets cited, as well as, its critics as to why citations should not be used to assess the value of current work. However, with the current information overload, it is not easy to keep abreast of all the published work. Researchers in 20th century would dig through all the available literature to find out the latest trends but the researcher of today has more stuff to read on a topic than their lifetime. They need access to current research as soon as it happens but the citation-count metrics, currently in practice, limit this approach. To use citation-based metrics, the articles must acquire a reasonable number of citations which can vary from field to field. Our main contribution is to use a heterogeneous network that includes the article, author and journal to recommend articles in a research field.
# # Import
# +
import Ranking # from https://github.com/bilal-dsu/Ranking/
from matplotlib import pyplot
from scipy.stats import spearmanr
import json, os,sys,snap
import csv
import pandas as pd
import snap
import numpy as np
import re
from itertools import combinations
from os import path
import seaborn as sns
import matplotlib.pyplot as plt
import measures # from https://github.com/dkaterenchuk/ranking_measures
# -
# # Initializations Total Citations
# The original graph is filtered to work with nodes between the year 2000 till 2018, termed Total Citations (TC). Further, we remove any journals with zero out-degree since they do not participate in the ranking method.
# +
# following files must be present in the CWD
metaDataCSV = "MetaData 2000-2018.csv"
ArticleGraph = "ArticleGraph.graph"
ArticleHash = "ArticleHash.hash"
# following files will be created in the CWD
JournalCitationTXT = "JournalCitation.txt"
JournalGraph = "JournalGraph.graph"
JournalHash = "JournalHash.hash"
JournalSubGraph = "JournalSubGraph.graph"
SubMetaDataCSV = "SubMetaData.csv"
AuthorCitationTXT = "AuthorCitation.txt"
ArticleCitationTXT = "ArticleCitation.txt"
AuthorGraph = "AuthorGraph.graph"
AuthorHash = "AuthorHash.hash"
AuthorInfoCSV = "AuthorInfo.csv"
JournalInfoCSV = "JournalInfo.csv"
ArticleInfoCSV = "ArticleInfo.csv"
AuthorRankCSV = "AuthorRank.csv"
JournalRankCSV = "JournalRank.csv"
ArticleRankCSV = "ArticleRank.csv"
ArticlesGraphStats="ArticleGraphStats.csv"
JournalGraphStats="JournalGraphStats.csv"
AuthorGraphStats="AuthorGraphStats.csv"
GraphStatsOverall="GraphStatsOverall.csv"
# -
# # Generate Total Citations
Ranking.generateJournanalCitationNetworkText(metaDataCSV, JournalCitationTXT)
Ranking.generateJournalCitationGraph(JournalCitationTXT, JournalGraph, JournalHash)
Ranking.generateSubGraph(JournalHash, JournalGraph, JournalSubGraph, metaDataCSV, SubMetaDataCSV)
Ranking.generateAuthorArticleCitationNetworkText(SubMetaDataCSV, AuthorCitationTXT, ArticleCitationTXT)
Ranking.generateAuthorArticleGraph(AuthorCitationTXT, AuthorGraph, AuthorHash, ArticleCitationTXT,
ArticleGraph, ArticleHash)
# # Initializations Early Citations
# To evaluate the ranking technique, we take nodes of the year 2005 and apply a cut-off on citations till 2010, termed Early Citations (EC). The cutoff window is configurable. Only a few past years are considered to give equal chance to early career researchers.
# +
# Provide values for Early Citations cutoff
RankYearStart = 2005
RankYearEnd = 2005
CutOffStart = 2000
CutOffEnd = 2010
# following files will be created in the CWD
metaDataRankYearCSV = "metaData" + str(RankYearStart) + "-" + str(RankYearEnd) + ".csv"
metaDataCutOffYearCSV = "metaData" + str(CutOffStart) + "-" + str(CutOffEnd) + ".csv"
JournalCutOffYearTXT = "Journal" + str(CutOffStart) + "-" + str(CutOffEnd) + ".txt"
JournalCutOffYearGraph = "Journal" + str(CutOffStart) + "-" + str(CutOffEnd) + ".graph"
JournalCutOffYearHash = "Journal" + str(CutOffStart) + "-" + str(CutOffEnd) + ".hash"
JournalCutOffYearInfoCSV = "Journal" + str(CutOffStart) + "-" + str(CutOffEnd) + "Info.csv"
JournalCutOffYearRankCSV = "Journal" + str(CutOffStart) + "-" + str(CutOffEnd) + "Rank.csv"
JournalCutOffYearSubGraph = "JournalSubGraph"+ str(CutOffStart) + "-" + str(CutOffEnd) + ".graph"
ArticleCutOffYearTXT = "Article" + str(CutOffStart) + "-" + str(CutOffEnd) + ".txt"
ArticleCutOffYearGraph = "Article" + str(CutOffStart) + "-" + str(CutOffEnd) + ".graph"
ArticleCutOffYearHash = "Article" + str(CutOffStart) + "-" + str(CutOffEnd) + ".hash"
ArticleCutOffYearInfoCSV = "Article" + str(CutOffStart) + "-" + str(CutOffEnd) + "Info.csv"
ArticleCutOffYearRankCSV = "Article" + str(CutOffStart) + "-" + str(CutOffEnd) + "Rank.csv"
AuthorCutOffYearTXT = "Author" + str(CutOffStart) + "-" + str(CutOffEnd) + ".txt"
AuthorCutOffYearGraph = "Author" + str(CutOffStart) + "-" + str(CutOffEnd) + ".graph"
AuthorCutOffYearHash = "Author" + str(CutOffStart) + "-" + str(CutOffEnd) + ".hash"
AuthorCutOffYearInfoCSV = "Author" + str(CutOffStart) + "-" + str(CutOffEnd) + "Info.csv"
AuthorCutOffYearRankCSV = "Author" + str(CutOffStart) + "-" + str(CutOffEnd) + "Rank.csv"
AuthorGraphStatsCutOffYear = "AuthorGraphStats" + str(CutOffStart) + "-" + str(CutOffEnd) + ".csv"
ArticleGraphStatsCutOffYear = "ArticleGraphStats" + str(CutOffStart) + "-" + str(CutOffEnd) + ".csv"
JournalGraphStatsCutOffYear = "JournalGraphStats" + str(CutOffStart) + "-" + str(CutOffEnd) + ".csv"
GraphStatsCutOffYear = "GraphStats" + str(CutOffStart) + "-" + str(CutOffEnd) + ".csv"
# -
# # Generate Early Citations
Ranking.generateTemporalNetwork(SubMetaDataCSV, RankYearStart,RankYearEnd, CutOffStart, CutOffEnd,
metaDataRankYearCSV, metaDataCutOffYearCSV, ArticleHash, ArticleGraph)
Ranking.generateJournanalCitationNetworkText(metaDataCutOffYearCSV, JournalCutOffYearTXT)
Ranking.generateJournalCitationGraph(JournalCutOffYearTXT, JournalCutOffYearGraph, JournalCutOffYearHash)
Ranking.generateAuthorArticleCitationNetworkText(metaDataCutOffYearCSV, AuthorCutOffYearTXT, ArticleCutOffYearTXT)
Ranking.generateAuthorArticleGraph(AuthorCutOffYearTXT, AuthorCutOffYearGraph, AuthorCutOffYearHash,
ArticleCutOffYearTXT, ArticleCutOffYearGraph, ArticleCutOffYearHash)
# # Calculate Rank
# The rank of a journal or author is given by the PageRank measure in Equation 1.
# \begin{equation}
# \label{eq:Rank}
# R(i) = ((1-\alpha)/n) + \alpha * \sum_{\substack{j}} R(j)\frac{aij}{Ni}
# \end{equation}
# where, n is the total no. of nodes in the network,
#
# $\alpha$ $\epsilon$ $(0 , 1)$ is damping factor (set to $0.85$),
#
# aij is 1 if node (i) cites node (j) and 0 otherwise,
#
# Ni is the total neighbours of node i.
#
# The rank of journal and author is transferred to the article given by Equation 2, thereby, inflating the rank of the article which was cited by any influential journal or author. The rank of the article “a” published in journal “b” by the author(s) “c” is:
# \begin{equation} \label{eq:ArticleRank}
# AR(a) = ((1-\beta) * R(b) + \beta * \frac {\sum_{\substack{i}} R (i)}{c})
# \end{equation}
# where, $\beta$ $\epsilon$ $(0 , 1)$ is adjustment for weight-age of author and journal influence (set to $0.5$).
Ranking.generateAuthorJournalRank(AuthorHash, AuthorGraph, AuthorInfoCSV, JournalHash, JournalSubGraph, JournalInfoCSV, JournalGraphStats, AuthorGraphStats)
Ranking.generateArticleRank(JournalInfoCSV, SubMetaDataCSV, ArticleGraph, ArticleHash, AuthorInfoCSV, ArticleInfoCSV, ArticlesGraphStats)
Ranking.generateAuthorJournalRank(AuthorCutOffYearHash, AuthorCutOffYearGraph, AuthorCutOffYearInfoCSV,
JournalCutOffYearHash, JournalCutOffYearGraph, JournalCutOffYearInfoCSV, JournalGraphStatsCutOffYear,
AuthorGraphStatsCutOffYear)
Ranking.generateArticleRank(JournalCutOffYearInfoCSV, metaDataCutOffYearCSV, ArticleCutOffYearGraph, ArticleCutOffYearHash,
AuthorCutOffYearInfoCSV, ArticleCutOffYearInfoCSV, ArticleGraphStatsCutOffYear)
# # Analysis
# On the temporal citation network, we correlate the EC rank of publications with the rank calculated using TC. It is used as a baseline for evaluating the ranking mechanism. To identify whether our technique captures key articles with a high EC rank that went on to attain a high rank in TC, we apply Spearman's rank correlation. Our preliminary analysis suggests that the ranking technique is stable. The rank calculated with EC correlates with rank calculated with TC. However, there is no significant correlation with citation count, suggesting that the technique does not rely on merely counting citations. It essentially means that instead of only counting citations the value of a citation coming from a reputable journal gets a higher rank.
Ranking.generateGraphStats(JournalGraphStats, AuthorGraphStats, ArticlesGraphStats, GraphStatsOverall)
Ranking.generateGraphStats(JournalGraphStatsCutOffYear, AuthorGraphStatsCutOffYear,
ArticleGraphStatsCutOffYear , GraphStatsCutOffYear)
Ranking.correlationAnalysis(AuthorInfoCSV, AuthorCutOffYearInfoCSV, JournalInfoCSV,
JournalCutOffYearInfoCSV, ArticleInfoCSV, ArticleCutOffYearInfoCSV, metaDataRankYearCSV)
| Ranking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import random
from PIL import Image
import cv2
import numpy as np
WIDTH = 707
HEIGHT = 557
icon_files = os.listdir('icons/')
icons = {}
for f in icon_files:
icons[f[:-4]] = Image.open(f'icons/{f}')
icons = list(icons.items())
background = Image.open('background.jpg')
background = background.convert('RGBA')
QUADRANTS = [
(0, 0),
(1, 0),
(2, 0),
(0, 1),
(1, 1),
(2, 1),
(0, 2),
(1, 2),
(2, 2),
]
def remove_background(image):
image = np.array(image)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
l_h = 0
u_h = 179
l_s = 0
u_s = 60
l_v = 0
u_v = 255
lower_green = np.array([l_h, l_s, l_v])
upper_green = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_green, upper_green)
mask_inv = cv2.bitwise_not(mask)
fg = cv2.bitwise_and(image, image, mask=mask_inv)
return Image.fromarray(fg)
# +
DIRECTORY = 'images_224/'
random.seed(5)
# !rm -rf $DIRECTORY
# !mkdir -p $DIRECTORY
for i in range(10000):
used_icons = []
image = background.copy()
quadrants = list(QUADRANTS)
usable_icons = list(icons)
for _ in range(random.randint(1, 5)):
#for _ in range(0, 1):
icon_name, icon = random.choice(usable_icons)
usable_icons.remove((icon_name, icon))
used_icons.append(icon_name)
# resize
width = random.randint(200, 500)
height = int(HEIGHT * (width / WIDTH))
icon = icon.resize((width, height))
# rotate
icon = icon.rotate(random.randint(0, 360))
# position
q = random.choice(quadrants)
quadrants.remove(q)
x = q[0] * 250 # + random.randint(0, 100)
y = q[1] * 250 # + random.randint(0, 100)
image.paste(icon, (x, y), icon)
image = image.convert('RGB')
# image = remove_background(image)
image = image.resize((224, 224), Image.LANCZOS)
filename = '_'.join(used_icons)
image.save(f'{DIRECTORY}/{i:05d}_{filename}.png')
# -
| notebooks/icons/generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="DqjH9ukPywr-" colab_type="text"
# # Download the Dataset and Imports
# + id="q_DmcYyHycfH" colab_type="code" outputId="77d5f7f7-3c59-4b36-cba9-87d681cae5af" colab={"base_uri": "https://localhost:8080/", "height": 121}
# !curl --output dataset.zip https://archive.ics.uci.edu/ml/machine-learning-databases/00462/drugsCom_raw.zip
# !unzip dataset.zip
# # !rm dataset.zip
# + id="d6QBBCUnykJ8" colab_type="code" outputId="d6b3c901-964b-4e9f-b13f-69dd3d50e7ec" colab={"base_uri": "https://localhost:8080/"}
import pandas as pd
import html
from nltk.tokenize import word_tokenize,RegexpTokenizer
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
nltk.download('punkt')
import gensim
import numpy as np
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
# + [markdown] id="zpgivv-3zoBf" colab_type="text"
# #Loading Data and Preprocessing
# Tokenization,removing stopwords
# + id="JlQqiCrizOjJ" colab_type="code" outputId="8d859425-de26-4075-fb5d-409504e86757" colab={"base_uri": "https://localhost:8080/", "height": 284}
# Loading Data
def load_dataset(filename):
df = pd.read_csv(filename, sep='\t')
df.drop(df.columns[0], axis=1, inplace=True)
df.drop(['drugName', 'condition', 'date', 'usefulCount'], axis=1, inplace=True)
df['review'] = df['review'].apply(lambda x: x[1:-1])
df['review'] = df['review'].apply(html.unescape)
df['rating'] = df['rating'].astype('int64')
df['label'] = df['rating']
df.label[df['rating'] <= 4] = 0
df.label[(df['rating'] <= 6) & (df['rating'] >= 5)] = 1
df.label[df['rating'] >= 7] = 2
return df
df_train = load_dataset("drugsComTrain_raw.tsv")
df_test = load_dataset("drugsComTest_raw.tsv")
# + id="DH-MKgxtzwFF" colab_type="code" colab={}
stop_words = set(stopwords.words('english'))
tokenizer = RegexpTokenizer(r'\w+')
# + id="UZP4w-Fozzb4" colab_type="code" colab={}
def func(row):
l = tokenizer.tokenize(row['review'])
s = [i.lower() for i in l if i not in stop_words]
return s
df_train['processed'] = df_train.apply (lambda row: func(row), axis=1)
df_test['processed'] = df_test.apply (lambda row: func(row), axis=1)
# + [markdown] id="ONdAOZ00cFA8" colab_type="text"
# #Embedings
# + [markdown] colab_type="text" id="BMELCU3VPpvS"
# ## Glove embeddings
# + [markdown] id="r99TFj4eDqD8" colab_type="text"
# ### Download Glove embeddings
# + id="_ux6hL9qRGZW" colab_type="code" outputId="155fef3a-5e28-40b1-b832-1ae98692d96b" colab={"base_uri": "https://localhost:8080/", "height": 104}
# !wget "http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip"
# !unzip glove.6B.zip
# !rm glove.6B.zip
# + [markdown] id="6QKB2ccxJ_9L" colab_type="text"
# ### Util functions
# + id="9bl9-7Zsfm2j" colab_type="code" colab={}
def embed(processed, w2v_dict, default_vec):
vectors = []
for w in processed:
if(w in w2v_dict):
vectors.append(w2v_dict[w])
else:
vectors.append(default_vec)
vectors = np.array(vectors)
return vectors
def embed_using_glove(df, embed_dim=50):
# Load Glove embeddings.
GLOVE_FILE = 'glove.6B.' + str(embed_dim) + 'd.txt'
# Get number of vectors and hidden dimensions
with open(GLOVE_FILE, 'r') as f:
for i, line in enumerate(f):
pass
n_vec = i + 1
hidden_dim = len(line.split(' ')) - 1
# Find the average of all embeddings to be assigned to tokens not in the embeddings.
# Create a word to vector dictionary for embedding.
avg_vec = np.zeros((hidden_dim), dtype=np.float32)
w2v_dict = {}
count = 0
with open(GLOVE_FILE, 'r') as f:
for line in f:
w2v_dict[line.split(' ')[:1][0]] = np.array([float(n) for n in line.split(' ')[1:]], dtype=np.float32)
avg_vec += np.array([float(n) for n in line.split(' ')[1:]], dtype=np.float32)
count+=1
avg_vec/=count
df['vectors'] = df.apply(lambda row: embed(row['processed'], w2v_dict, avg_vec), axis=1)
return avg_vec
# + [markdown] id="ke6uwaoIz6Az" colab_type="text"
# ### Embedding the tokens using Glove
# Embeding_D = 50
# + id="gtRtl4FKz1Ck" colab_type="code" colab={}
EMBEDDING_D =50
# + id="bqqm7eMb0EVv" colab_type="code" colab={}
GLOVE_FILE = 'glove.6B.'+str(EMBEDDING_D)+'d.txt'
# Get number of vectors and hidden dim
with open(GLOVE_FILE, 'r') as f:
for i, line in enumerate(f):
pass
n_vec = i + 1
hidden_dim = len(line.split(' ')) - 1
# + id="zGfiI1G00HWv" colab_type="code" outputId="5f39ba94-797e-4b8a-edc8-6be442374126" colab={"base_uri": "https://localhost:8080/", "height": 208}
avg_vec = np.zeros((hidden_dim), dtype=np.float32)
w2v_dict = {}
count = 0
with open(GLOVE_FILE, 'r') as f:
for line in f:
w2v_dict[line.split(' ')[:1][0]] = np.array([float(n) for n in line.split(' ')[1:]], dtype=np.float32)
avg_vec += np.array([float(n) for n in line.split(' ')[1:]], dtype=np.float32)
count+=1
avg_vec/=count
avg_vec
# + id="k_7eXHHJ0fha" colab_type="code" outputId="bee34719-93e9-4f18-e1ba-b9c97a5cc85a" colab={"base_uri": "https://localhost:8080/", "height": 424}
def foo(row,s,e):
vectors = []
for w in row[s:e]:
if(w in w2v_dict):
vectors.append(w2v_dict[w])
else:
vectors.append(avg_vec)
vectors = np.array(vectors)
return vectors
df['vectors'] = df.apply (lambda row: foo(row['processed']), axis=1)
df
# + [markdown] colab_type="text" heading_collapsed=true id="OwG4T9TRMvaQ"
# ## Word2Vec embeddings
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="O5sARRHwM0Fl"
# ### Download the Google word2vec embeddings
# + colab_type="code" hidden=true id="24KezgZuRbwk" outputId="9e03d2e4-534c-4889-b1b6-7b0346c13ca0" colab={"base_uri": "https://localhost:8080/", "height": 69}
# !curl --output word2vec.bin.gz https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz
# !gunzip word2vec.bin.gz
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="f_Koey_WSjiY"
# ### Load the word2vec embeddings into memory
# + colab_type="code" hidden=true id="3Iiw7NcRSqjA" outputId="4ea549f1-6b3c-46cb-a064-acb3ba11b5be" colab={"base_uri": "https://localhost:8080/", "height": 73}
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="cNUQxRghW70T"
# ### Util funcitons
# + colab_type="code" hidden=true id="yyQQerUzW68n" colab={}
def embed_w2v(processed, w2v_dict, default_vec):
vectors = []
for w in processed:
if(w in w2v_dict):
vectors.append(w2v_dict[w])
else:
vectors.append(default_vec)
vectors = np.array(vectors)
return vectors
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="1epAFPfIxWC-"
# ### Get equal-class versions of train and test dataset
# + id="pRK6atCUdLNN" colab_type="code" colab={}
# df_test_w2v['label'].value_counts()
# df_test_w2v
# + colab_type="code" hidden=true id="Aqw1UeLhxWDR" colab={}
# df_train_w2v = pd.concat([df_train[df_train['label']==0].sample(n=4829).reset_index(drop=True),df_train[df_train['label']==1].sample(n=4829).reset_index(drop=True),df_train[df_train['label']==2].sample(n=4829).reset_index(drop=True)],ignore_index=True)
# df_test_w2v = pd.concat([df_test[df_test['label']==0].sample(n=4829).reset_index(drop=True),df_test[df_test['label']==1].sample(n=4829).reset_index(drop=True),df_test[df_test['label']==2].sample(n=4829).reset_index(drop=True)],ignore_index=True)
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="HHE64eMvfUg9"
# ### Convert the tokens into Word2Vec embeddings
# + colab_type="code" hidden=true id="sUw50_VeUU5c" colab={}
# w2v_avg = np.average(w2v_model.vectors, axis=0)
# df_train_w2v['vectors'] = df_train_w2v.apply(lambda row: embed_w2v(row['processed'], w2v_model, w2v_avg), axis=1)
# df_test_w2v['vectors'] = df_test_w2v.apply(lambda row: embed_w2v(row['processed'], w2v_model, w2v_avg), axis=1)
# + [markdown] heading_collapsed=true hidden=true id="unLYkdPwYLtR" colab_type="text"
# ### Save the Embeddings to file
# + colab_type="code" hidden=true id="LH6rJMjryvzC" colab={}
# df_train_w2v.to_pickle("train_w2v.pckl")
# df_test_w2v.to_pickle("test_w2v.pckl")
# + id="3ND-rucOSrUR" colab_type="code" colab={}
def embeding_using_w2v(df):
from gensim.models import KeyedVectors
w2v_model = KeyedVectors.load_word2vec_format('word2vec.bin', binary=True)
w2v_avg = np.average(w2v_model.vectors, axis=0)
df['vectors'] = df.apply(lambda row: embed_w2v(row['processed'], w2v_model, w2v_avg), axis=1)
df['vectors'] = df.apply(lambda row: embed_w2v(row['processed'], w2v_model, w2v_avg), axis=1)
return w2v_avg
# + [markdown] id="ug4lJ29I_Kks" colab_type="text"
# ## Elmo embeddings
# + [markdown] id="fvtbnbXTXkJc" colab_type="text"
# ### Install allennlp
# + id="wX4DiLtJXnxI" colab_type="code" outputId="0d763e9c-f708-4a7a-c680-17a1079b9593" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install allennlp
# + [markdown] id="Hv2O9HmcoSV7" colab_type="text"
# ### Imports
# + id="LhYYiddBjbfK" colab_type="code" colab={}
from allennlp.commands.elmo import ElmoEmbedder
import sys
# + [markdown] id="vne2Ej3koUES" colab_type="text"
# ### Util functions
# + id="uhMhSy3xXoeN" colab_type="code" colab={}
avg_elmo_embedding = ['1']
def create_elmo_embeddings(elmo, documents, max_sentences = 1000):
num_sentences = min(max_sentences, len(documents)) if max_sentences > 0 else len(documents)
print("\n\n:: Lookup of " + str(num_sentences) + " ELMo representations. This takes a while ::")
embeddings = []
tokens = documents['processed'].to_numpy()
documentIdx = 0
for elmo_embedding in elmo.embed_sentences(tokens):
document = documents.iloc[documentIdx]
# Average the 3 layers returned from ELMo
avg_elmo_embedding = np.average(elmo_embedding, axis=0)
embeddings.append(avg_elmo_embedding)
# Some progress info
documentIdx += 1
percent = 100.0 * documentIdx / num_sentences
line = '[{0}{1}]'.format('=' * int(percent / 2), ' ' * (50 - int(percent / 2)))
status = '\r{0:3.0f}%{1} {2:3d}/{3:3d} sentences'
sys.stdout.write(status.format(percent, line, documentIdx, num_sentences))
if max_sentences > 0 and documentIdx >= max_sentences:
break
return embeddings
# + [markdown] colab_type="text" id="qmrAEErXwpDA"
# ### Get smaller versions of train and test dataset
# + id="6dzCGQ9MZN7u" colab_type="code" colab={}
# df_train_elmo = df_train.sample(n=int(df_train.shape[0]/20)).reset_index(drop=True)
# df_test_elmo = df_test.sample(n=int(df_test.shape[0]/20)).reset_index(drop=True)
# + [markdown] id="uqmLae0uoXe7" colab_type="text"
# ### Get ELMo embeddings
# + id="2frbjy9yokoS" colab_type="code" colab={}
# elmo = ElmoEmbedder(cuda_device=0) #Set cuda_device to the ID of your GPU if you have one
# df_train_elmo['elmo_embeddings'] = create_elmo_embeddings(elmo, df_train_elmo, -1)
# df_test_elmo['elmo_embeddings'] = create_elmo_embeddings(elmo, df_test_elmo, -1)
# + id="eUw1U_vTUi7A" colab_type="code" colab={}
def embeding_using_elmo(df):
elmo = ElmoEmbedder(cuda_device=0) #Set cuda_device to the ID of your GPU if you have one
df['vectors'] = create_elmo_embeddings(elmo, df, -1)
return np.full_like(range(1024),0.0)
# + [markdown] id="lsBWWstumTyv" colab_type="text"
# # GetData
# + id="ejdTuYy6OMe3" colab_type="code" colab={}
def getleftContext(df,w2v_avg):
left_context_as_array = df['vectors'].to_numpy()
app = []
for i in range(left_context_as_array.shape[0]):
app.append([w2v_avg,])
app = np.array(app)
left_context_as_array = pad_sequences(left_context_as_array, maxlen=sequence_length,dtype='float32')
print('left context shape')
print(app.shape)
print(left_context_as_array.shape)
left_context_as_array = np.concatenate([app,left_context_as_array],axis = 1)
print(left_context_as_array.shape)
left_context_as_array = left_context_as_array[:,:-1,:]
print('left_context shape',left_context_as_array.shape)
return left_context_as_array
def getrightContext(df,w2v_avg):
right_context_as_array = df['vectors'].to_numpy()
app = []
for i in range(right_context_as_array.shape[0]):
app.append([w2v_avg,])
app = np.array(app)
right_context_as_array = pad_sequences(right_context_as_array, maxlen=sequence_length,dtype='float32')
# for el in right_context_as_array:
# el = np.vstack([el[1:],[w2v_avg,]])
right_context_as_array = np.concatenate([right_context_as_array,app],axis = 1)
print(right_context_as_array.shape)
right_context_as_array = right_context_as_array[:,1:,:]
print('right_context shape',right_context_as_array.shape)
return right_context_as_array
# + id="0lHLzIavkPeO" colab_type="code" outputId="2aadfcf8-91cd-457f-cbc3-5811bff2d601" colab={"base_uri": "https://localhost:8080/", "height": 86}
a = np.array([[[1,2],[3,4],[5,6]]])
b = np.array([[[4,5],],])
np.concatenate([b,a],axis=1)
# + id="fssYrFmO0yVX" colab_type="code" colab={}
sequence_length = 100
EMBEDDING_D = 300
# + id="r83PZ5gIltcI" colab_type="code" colab={}
def getData(normalize = False,k = -1,k_test = 1000,classes=3,embeding='glove'):
df_train = load_dataset("drugsComTrain_raw.tsv",classes)
df_test = load_dataset("drugsComTest_raw.tsv",classes)
print(df_train.groupby('label').size())
print(df_test.groupby('label').size())
if(normalize):
mn = df_train.groupby('label').size().min()
df_train = pd.concat([df_train[df_train['label']==0].sample(n=mn).reset_index(drop=True),df_train[df_train['label']==1].sample(n=mn).reset_index(drop=True),df_train[df_train['label']==2].sample(n=mn).reset_index(drop=True)],ignore_index=True)
mn = df_test.groupby('label').size().min()
df_test = pd.concat([df_test[df_test['label']==0].sample(n=mn).reset_index(drop=True),df_test[df_test['label']==1].sample(n=mn).reset_index(drop=True),df_test[df_test['label']==2].sample(n=mn).reset_index(drop=True)],ignore_index=True)
if(k!=-1):
df_train = df_train.sample(n=k).reset_index(drop=True)
if(k_test!=-1):
df_test = df_test.sample(n=k_test).reset_index(drop=True)
df_train['processed'] = df_train.apply (lambda row: func(row), axis=1)
df_test['processed'] = df_test.apply (lambda row: func(row), axis=1)
if(embeding == 'glove'):
w2v_avg = embed_using_glove(df_train, 50)
w2v_avg = embed_using_glove(df_test, 50)
EMBEDDING_D = 50
if(embeding == 'w2v'):
w2v_avg = embeding_using_w2v(df_train)
w2v_avg = embeding_using_w2v(df_test)
EMBEDDING_D = 300
if(embeding == 'elmo'):
w2v_avg = embeding_using_elmo(df_train)
w2v_avg = embeding_using_elmo(df_test)
EMBEDDING_D = 1024
# df_train['vectors'] = df_train.apply (lambda row: foo(row['processed']), axis=1)
# df_test['vectors'] = df_test.apply (lambda row: foo(row['processed']), axis=1)
train_data = pad_sequences(df_train['vectors'], maxlen=sequence_length,dtype='float32')
test_data = pad_sequences(df_test['vectors'], maxlen=sequence_length,dtype='float32')
l_c = getleftContext(df_train,w2v_avg)
r_c = getrightContext(df_train,w2v_avg)
train_data = {'doc': train_data,'l_c':l_c,'r_c':r_c}
l_c = getleftContext(df_test,w2v_avg)
r_c = getrightContext(df_test,w2v_avg)
test_data = {'doc': test_data,'l_c':l_c,'r_c':r_c}
train_labels = pd.get_dummies(df_train['label']).to_numpy().reshape(-1,classes)
test_labels = pd.get_dummies(df_test['label']).to_numpy().reshape(-1,classes)
print(df_train.groupby('label').size())
print(df_test.groupby('label').size())
return train_data,test_data,train_labels,test_labels
# + [markdown] id="ogGIQ6gUXfr9" colab_type="text"
# #Model
# + id="rfelJyWp2mFI" colab_type="code" colab={}
from tensorflow import keras
from keras.layers import Conv1D,Concatenate,Reshape,Dense,Dropout,MaxPool1D,Input,LSTM,concatenate
from keras.models import Model
from keras.layers import Lambda
from keras import backend
import tensorflow as tf
import keras.backend as K
import os
# os.environ['TF_KERAS'] = '1'
# + id="3yvNmMGj2pJC" colab_type="code" colab={}
def RCNN(embed = 'glove'):
if(embed == 'glove'):
EMBEDDING_D = 50
elif(embed == 'w2v'):
EMBEDDING_D = 300
else:
EMBEDDING_D = 1024
MAX_TOKENS = 100
hidden_dim_1 = 200
hidden_dim_2 = 100
NUM_CLASSES = 3
document = Input(shape = (sequence_length,EMBEDDING_D),name='doc')
left_context = Input(shape = (sequence_length,EMBEDDING_D ),name='l_c')
right_context = Input(shape = (sequence_length,EMBEDDING_D ),name='r_c')
# I use LSTM RNNs instead of vanilla RNNs as described in the paper.
forward = LSTM(hidden_dim_1, return_sequences = True)(left_context)
backward = LSTM(hidden_dim_1, return_sequences = True, go_backwards = True)(right_context)
# Keras returns the output sequences in reverse order.
print(backward.shape)
# print(type(backward))
backward = Lambda(lambda x: K.reverse(x,axes=1) )(backward)
# l = Lambda(lambda x: K.reverse(x,axes=1),output_shape=backward.shape )
# backward = l(backward)
print(backward.shape)
together = concatenate([forward, document, backward], axis = 2)
semantic = Conv1D(hidden_dim_2, kernel_size = 1, activation = "tanh")(together)
# Keras provides its own max-pooling layers, but they cannot handle variable length input
# (as far as I can tell). As a result, I define my own max-pooling layer here.
pool_rnn = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (hidden_dim_2, ))(semantic)
output = Dense(NUM_CLASSES, input_dim = hidden_dim_2, activation = "softmax")(pool_rnn)
model = Model(inputs = [document, left_context, right_context], outputs = output)
model.compile(optimizer = "adadelta", loss = "categorical_crossentropy", metrics = ["accuracy"])
return model
# + [markdown] id="muDTCw0UbBNc" colab_type="text"
# # Training model
# + [markdown] id="QV70dG6Fc1qV" colab_type="text"
# ##Glove
# + id="KyjWOPLOXvp4" colab_type="code" outputId="4705b35a-56c8-4238-c45c-e7166124b0a6" colab={"base_uri": "https://localhost:8080/", "height": 1000}
xTrain,xVal,yTrain,yVal = getData(normalize=True,k=10000)
model = RCNN('glove')
# + id="6a-LYVtJ8MF8" colab_type="code" outputId="3b7e1531-520f-46ee-be8d-774f9580125b" colab={"base_uri": "https://localhost:8080/", "height": 555}
training = model.fit(x=xTrain, y = yTrain, epochs=15, batch_size=128,validation_split=0.1)
# + id="t3t7kBjWvSMA" colab_type="code" outputId="adfd5c67-03db-475a-f3c4-6b78d8d47392" colab={"base_uri": "https://localhost:8080/", "height": 191}
from sklearn.metrics import classification_report
import numpy as np
Y_test = np.argmax(yVal, axis=1) # Convert one-hot to index
y_pred = model.predict(xVal)
y_pred = y_pred.argmax(axis=-1)
print(classification_report(Y_test, y_pred))
# + id="MdCe73C_wSmD" colab_type="code" outputId="5b996d4d-962e-421b-e927-6edc6cf1b1c2" colab={"base_uri": "https://localhost:8080/", "height": 337}
from sklearn import metrics
cm=metrics.confusion_matrix(Y_test,y_pred)
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
df_cm = pd.DataFrame(cm, index = ['negative','neutral','positive'],
columns = ['negative','neutral','postive'])
plt.figure(figsize = (5,5))
ax = sn.heatmap(df_cm, annot=True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# + [markdown] id="03cblYnhvHt_" colab_type="text"
# ## Word2Vec
# + colab_type="code" outputId="3131ad7a-0ae3-486e-9179-de98a705a1dc" id="QU9BO8754vbD" colab={"base_uri": "https://localhost:8080/", "height": 1000}
xTrain,xVal,yTrain,yVal = getData(embeding='w2v',normalize=True,k=10000)
model = RCNN('w2v')
# + colab_type="code" outputId="197eaa72-8828-49ef-d0f9-f9237d67cca0" id="Bs5nt1gX4vbS" colab={"base_uri": "https://localhost:8080/", "height": 555}
training = model.fit(x=xTrain, y = yTrain, epochs=15, batch_size=128,validation_split=0.1)
# + colab_type="code" outputId="ca7790a5-6ad0-487d-dc0f-5442b8fb928f" id="5sv1R0pA4vbd" colab={"base_uri": "https://localhost:8080/", "height": 191}
from sklearn.metrics import classification_report
import numpy as np
Y_test = np.argmax(yVal, axis=1) # Convert one-hot to index
y_pred = model.predict(xVal)
y_pred = y_pred.argmax(axis=-1)
print(classification_report(Y_test, y_pred))
# + colab_type="code" outputId="f30f504f-ebf8-4ac9-df63-59f933e93378" id="kkPBkfA64vbl" colab={"base_uri": "https://localhost:8080/", "height": 337}
from sklearn import metrics
cm=metrics.confusion_matrix(Y_test,y_pred)
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
df_cm = pd.DataFrame(cm, index = ['negative','neutral','positive'],
columns = ['negative','neutral','postive'])
plt.figure(figsize = (5,5))
ax = sn.heatmap(df_cm, annot=True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# + [markdown] id="u5DW7vNtvNOL" colab_type="text"
# ## Elmo
# + colab_type="code" outputId="f765a513-59f6-41fd-a0b3-e5eb343ededc" id="HXn_WD6m_DVy" colab={"base_uri": "https://localhost:8080/", "height": 1000}
xTrain,xVal,yTrain,yVal = getData(embeding='elmo',normalize=True,k=5000)
model = RCNN('elmo')
# + colab_type="code" outputId="e39922df-d2c3-42c2-abf2-2f60a5fe6105" id="vJ5jzd0J_DWA" colab={"base_uri": "https://localhost:8080/", "height": 905}
training = model.fit(x=xTrain, y = yTrain, epochs=15, batch_size=128,validation_split=0.1)
# + colab_type="code" outputId="3d423d3a-910a-4df4-cc32-4c7111b36b06" id="brdbQruZ_DWK" colab={"base_uri": "https://localhost:8080/", "height": 191}
from sklearn.metrics import classification_report
import numpy as np
Y_test = np.argmax(yVal, axis=1) # Convert one-hot to index
y_pred = model.predict(xVal)
y_pred = y_pred.argmax(axis=-1)
print(classification_report(Y_test, y_pred))
# + colab_type="code" outputId="18f64b35-cdd0-4ca3-b93c-b868608a2b34" id="cyFp1Ixe_DWT" colab={"base_uri": "https://localhost:8080/", "height": 337}
from sklearn import metrics
cm=metrics.confusion_matrix(Y_test,y_pred)
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
df_cm = pd.DataFrame(cm, index = ['negative','neutral','positive'],
columns = ['negative','neutral','postive'])
plt.figure(figsize = (5,5))
ax = sn.heatmap(df_cm, annot=True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# + id="hIENpce4Ht5z" colab_type="code" colab={}
| RCNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Action of boundary and co-boundary maps on a chain
#
# __*Definition.*__ An abstract simplicial complex $K$ is a collection of finite sets that is closed
# under set inclusion, i.e. if $\sigma \in K$ and $\tau \subseteq \sigma$, then $\tau \in K$.
#
# __*Definition.*__ The boundary operator $\partial_d : C_d(K) \rightarrow C_{d-1}(K)$ is the linear function defined for each oriented $d$-simplex $\sigma = [v_0, ..., v_d]$ by
# \begin{equation}
# \partial_d (\sigma) = \partial_d [v_0, ..., v_d] = \sum_{i=0}^d (-1)^i [v_0, ..., \hat{v}_i, ..., v_d],
# \end{equation}
# where $[v_0, ... \hat{v}_i, ..., v_d]$ is the subset of $[v_0, ..., v_d]$ obtained by removing the vertex $v_i$.
#
# Let $S_d(K)$ be the set of all oriented $d$-simplices of the simplicial complex $K$ (i.e. the set of basis elements of $C_d(K)$), and let $\tau in S_{d-1}(K)$. Then define the two sets
# \begin{align}
# S^+_d (K) &= \{ \sigma \in S_d(K) \, | \text{ the coefficient of } \tau \text{ in } \partial_d (\sigma) \text{ is } +1 \} \\
# S^-_d (K) &= \{ \sigma \in S_d(K) \, | \text{ the coefficient of } \tau \text{ in } \partial_d (\sigma) \text{ is } -1 \}
# \end{align}
#
# __*Lemma (see [1])*__ Let $\partial^*_d$ be the adjoint of the boundary operator $\partial_d$, and let $\tau \in S_{d-1}(K)$. Then
# \begin{equation}
# \partial^*_d (\tau) = \sum_{\sigma' \in S^+_d (K, \tau)} \sigma' - \sum_{\sigma'' \in S^-_d (K, \tau)} \sigma''.
# \end{equation}
#
#
# In this section we are going to see how boundary maps act on
# ## References <a class="anchor" id="refs"></a>
#
# 1. [R.Gustavson, Laplacians of Covering Complexes](https://scholar.rose-hulman.edu/cgi/viewcontent.cgi?article=1099&context=rhumj)
# 2. [From Topological Data Analysis to Deep Learning: No Pain No Gain](https://towardsdatascience.com/from-tda-to-dl-d06f234f51d)
# 3. [<NAME> and <NAME>, Random walks on simplicial complexes and harmonics](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5324709/)
#
# 4. [<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Random Walks on Simplicial Complexes and the normalized Hodge 1-Laplacian](https://arxiv.org/pdf/1807.05044.pdf)
# 5. [GUDHI Library](http://gudhi.gforge.inria.fr/doc/latest/index.html)
#
# 6. [<NAME>, Bochner's Method for Cell Complexes and Combinatorial Ricci Curvature](https://link.springer.com/content/pdf/10.1007%2Fs00454-002-0743-x.pdf)
| examples/theory_simplicial_diffusion.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Racket
# language: racket
# name: racket
# ---
# **Exercise 1.16:** Design a procedure that evolves an iterative exponentiation process that uses successive squaring and uses a logarithmic number of steps, as does `fast-expt`. (Hint: Using the observation that ${(b^{n/2})^2}={(b^2)^{n/2}}$, keep, along with the exponent $n$ and the base $b$, an additional state variable $a$, and define the state transformation in such a way that the product ${ab^n}$ is unchanged from state to state. At the beginning of the process $a$ is taken to be 1, and the answer is given by the value of $a$ at the end of the process. In general, the technique of defining an _invariant quantity_ that remains unchanged from state to state is a powerful way to think about the design of iterative algorithms.)
(define (fast-expt b n) ; original of SICP, recursive
(cond ((= n 0) 1)
((even? n) (square (fast-expt b (/ n 2))))
(else (* b (fast-expt b (- n 1))))))
# ## 답
#
# 힌트에서는 새로운 상태 a를 만들어서 ${ab^n}$ 이 항상 같도록 하라는 수수께끼 같은 힌트를 줬다. 어떻게 하라는 건지 잘 모르겠어서 일단 a를 넣고 식을 무작정 세워 봤다. 힌트를 참조해서 2와 n/2의 위치도 바꿔 봤다.
#
# $$ab^n=a(b^2)^{n/2}$$
# $$ab^n=abb^{n-1}$$
#
# 이제 이걸 코드로 옮겨 보자.
(define (f-expt b n)
(let iter ((a 1) (b b) (n n))
(displayln (list a b n))
(cond ((= n 0) a)
((even? n) (iter a (* b b) (/ n 2)))
(else (iter (* a b) b (- n 1))))))
(f-expt 9 7)
| 1/16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
import glob
files = glob.glob("data/ml-latest-small/*csv")
names = [file.split("/")[-1].split(".csv")[0] for file in files]
pd_dict = {}
for file, name in zip(files, names):
pd_dict[name] = pd.read_csv(file)
movies = pd_dict["movies"]
ratings = pd_dict["ratings"]
# Get user ids
user_ids = ratings["userId"]
# Select user ids
n_user_ids = 50
np.random.seed(42)
selected_user_ids = np.random.choice(np.unique(list(user_ids)), size=n_user_ids, replace=False)
# create a boolean that has the above user ids
selection_boolean = []
for user_id in list(user_ids):
if user_id in selected_user_ids:
selection_boolean.append(True)
else:
selection_boolean.append(False)
# use the boolean above to select rows in ratings
ratings_subset = ratings[selection_boolean]
# set index for ratings_subset
ratings_subset.set_index("movieId", inplace=True)
# set index for movies
movies.set_index("movieId", inplace=True)
# join ratings_subset and movies
ratings_join = ratings_subset.join(movies)
ratings_subset
ratings_join
# select only userid, rating and title
ratings_movie = ratings_join[["userId", "rating", "title"]]
ratings_movie
# Pivot the table (reshape to wide format)
ratings_movie = ratings_movie.pivot_table(index="userId", columns="title", aggfunc="mean")
# Save this reshaped table
ratings_movie
# Fill missing values with mean of the column
ratings_movie.fillna(ratings.mean(), inplace=True)
ratings_movie
# Save reshaped file
ratings_movie.reset_index(inplace=True)
ratings_movie.columns = [i[1] for i in ratings_movie.columns]
ratings_movie.columns = ["userId" if column == "" else column for column in list(ratings_movie.columns)]
ratings_movie.set_index("userId", inplace=True)
ratings_movie.to_csv("ratings_movie_reshaped.csv")
# Define a non-negative matrix factorization with 20 components
nmf = NMF(n_components=20, max_iter=5000)
# Fit the ratings to the nmf
nmf.fit(ratings_movie)
# save the model
filename = 'nmf_movies.sav'
import pickle
pickle.dump(nmf, open(filename, "wb"))
sample_movies = np.random.choice(ratings_movie.columns, replace=False, size=10)
own_ratings = {}
for sample_movie in sample_movies:
own_ratings[sample_movie] = np.random.randint(1, 6)
own_ratings
own_ratings = {'Omen, The (1976)': 5,
'Parent Trap, The (1998)': 4,
'<NAME>: Back to the Future (Ivan Vasilievich menyaet professiyu) (1973)': 5,
'Mutant Aliens (2001)': 5,
'Death Note: Desu nôto (2006–2007)': 4,
'Affair to Remember, An (1957)': 3,
'Superman II (1980)': 3,
'English Patient, The (1996)': 3,
'Midnight Run (1988)': 4,
'Best of the Best (1989)': 2}
# Read ratings movie reshaped
reshaped_data = pd.read_csv("ratings_movie_reshaped.csv", index_col=0)
# Get movies
movies = list(reshaped_data.columns)
# Create a full own ratings dictionary
own_ratings_full = {}
for movie in movies:
if movie in own_ratings:
own_ratings_full[movie] = own_ratings[movie]
else:
own_ratings_full[movie] = None
# Create a dataframe for the above movies
own_ratings_df = pd.DataFrame(own_ratings_full, index=[0])
own_ratings_df
# Fill missing values
own_ratings_df.fillna(own_ratings_df.iloc[0].mean(), inplace=True)
own_ratings_df
# Calculate matrix P
P = nmf.transform(own_ratings_df)
Pdf = pd.DataFrame(P)
Pdf
# Extract matrix Q (movie-genre)
Q = nmf.components_
Qdf = pd.DataFrame(Q)
Qdf.columns = reshaped_data.columns
# Generating predicted ratings by multiplying P and Q.T
predicted_ratings = Pdf.dot(Qdf)
predicted_ratings
# Remove already rated movies
predicted_ratings.drop(list(own_ratings.keys()), axis=1, inplace=True)
predictions = predicted_ratings.T
predictions.columns = ["rating"]
top_3 = list(predictions.sort_values("rating", ascending=False).iloc[:3].index)
top_3
| ipython_folder/sampling_movies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
name = '2016-09-30-scripts-and-modules'
title = 'Using Python scripts and modules '
tags = 'basics'
author = '<NAME>'
# +
from nb_tools import connect_notebook_to_post
from IPython.core.display import HTML
html = connect_notebook_to_post(name, title, tags, author)
# -
# You can type all the instructions in the Python interpreter. But for longer sets of instructions you definitely need to change track and write the code in text files, that are usually called *scripts*.
# ## Ways of running scripts
# There are several ways of executing, or running, a script. If you frequently work in a command line, you would run a Python script just by typing
#
# ```bash
# $ python some_script.py
# ```
#
# where `some_script` is of course the name of your script.
# On Unix machines, if the script starts with **`#!/usr/bin/env python`** and the script is executable, you can just type the name of the script to run it:
# ```bash
# $ ./demo.py
# ```
# If you want to run a script from a Python interpreter, you need to use `execfile` command:
# ```python
# >>> execfile('some_script.py')
# ```
# The IPython console offers another way of running a script. Yes, you guessed it, it's just literally `run` command:
#
# ```ipython
# # %run demo.py
# ```
# In this case, not only the script was executed, but also the variables defined in the script are now available inside the interpreter's namespace.
# ## Debugging and profiling in IPython
# This section is shamelessly taken from [here](https://github.com/drivendata/data-science-is-software/blob/master/notebooks/lectures/3.0-refactoring.ipynb).
# #### Standard Python debugger: pdb
# Interrupt execution with:
# * `%debug` magic: drops you out into the most recent error stacktrace in pdb
# * `import q; q.d()`: drops you into pdb, even outside of IPython
#
# Interrupt execution on an `Exception` with `%pdb` magic. Use pdb the Python debugger to debug inside a notebook.
#
# **Key commands for pdb are:**
#
# * `p`: Evaluate and print Python code
# * `w`: Where in the stack trace am I?
# * `u`: Go up a frame in the stack trace.
# * `d`: Go down a frame in the stack trace.
# * `c`: Continue execution
# * `q`: Stop execution
# #### IPython profiler
# Sometimes your code is slow. See which functions are called, how many times, and how long they take!
# The `%prun` magic reports these to you right in the Jupyter notebook!
# #### The world beyond Jupyter
# Modern graphical IDEs are shipped with built-in profiling and debugging interfaces. One of the most powerful Python IDEs is PyCharm. It has tons of integrations with the normal development flow. Some of the features include:
# * git integration
# * interactive graphical debugger
# * flake8 linting
# * smart refactoring/go to
# ## Reusing code by importing modules
# If you want to write larger and better organized programs (compared to simple scripts), where some objects are defined, (variables, functions, classes) and that you want to reuse several times, you have to create your own modules.
#
# Let us create a module demo contained in the file demo.py:
# ```python
# # A demo module
#
#
# def show_me_a():
# """Prints a."""
# print('a')
#
# def show_me_b():
# """Prints b."""
# print('b')
#
# c = 2
# d = 2
# ```
# In this file, we defined two functions `show_me_a` and `show_me_b`. Suppose we want to call the `show_me_a` function from the interpreter. We could execute the file as a script, but since we just want to have access to the function `show_me_a`, we are rather going to import it as a module. The syntax is as follows.
# ```ipython
# In [1]: import demo
#
#
# In [2]: demo.show_me_a()
# a
# ```
# ### Pythonic import statements
# #### <font color='green'>Good</font>
# import <font color='green'>sys</font>
#
# from os import <font color='green'>path</font>
#
# import statistics <font color='green'>as stats</font>
#
# from custom_package import <font color='green'>mode</font>
#
# from statistics import <font color='green'>mean, median</font>
# #### <font color='red'>Bad:</font> silently overwrites previous imports
# from pylab import <font color='red'><b>*</b></font>
# ### Module caching
# Modules are cached: if you modify ``demo.py`` and re-import it in the
# old session, you will get the old one.
#
# Solution:
#
# ```ipython
# In [1]: reload(demo)
# ```
#
# In Python 3 instead ``reload`` is not builtin, so you have to import the ``importlib`` module first and then do:
#
# ```python
# In [1]: importlib.reload(demo)
# ```
# #### Auto-reloading in IPython
# ```ipython
# # %load_ext autoreload
# # always reload modules marked with "%aimport"
# # %autoreload 1
# # reload all
# # %autoreload 2
# ```
# Let us test it out! First we import the module using the magic:
# %load_ext autoreload
# %autoreload 2
import demo
demo.show_me_a()
# Then we change that function to so that it prints something else:
# ```python
# # A demo module
#
#
# def show_me_a():
# """Prints a."""
# print('Something else')
#
# def show_me_b():
# """Prints b."""
# print('b')
#
# c = 2
# d = 2
# ```
# Now `demo.show_me_a()` prints out "Something else" instead of "a".
# ## `'__main__'` and how to use it
# Sometimes we want code to be executed when a module is run directly, but not when it is imported by another module. `if __name__ == '__main__'` allows us to check whether the module is being run directly.
# So now if the script demo.py looks like this:
# ```python
# def show_me_a():
# """Prints a."""
# print('Something else')
#
# def show_me_b():
# """Prints b."""
# print('b')
#
# # show_me_b() runs on import
# show_me_b()
#
# if __name__ == '__main__':
# # show_me_a() is only executed when the module is run directly.
# show_me_a()
# ```
# ## Using packages and creating your own modules
# In order to import your local modules, you must do three things:
# * put the .py file in a separate folder
# * add an empty `__init__.py` file to the folder
# * add that folder to the Python path with `sys.path.append()`
# If you are getting too good at writing code and it's becoming useful for other projects or people, you should consider refactoring it into a standalone package. You can then make it available online via PyPi or Anaconda. There are great templates out there. To name but a few:
# * [Cookiecutter](https://github.com/wdm0006/cookiecutter-pipproject)
# * [Shablona](https://github.com/uwescience/shablona)
# ## Resources
# * [TalkPython course "Write Pythonic Code Like a Seasoned Developer"](https://training.talkpython.fm/courses/details/write-pythonic-code-like-a-seasoned-developer)
# * [SciPy lectures](http://www.scipy-lectures.org/intro/language/reusing_code.html)
# * [Data Science is Software. SciPy 2016 Tutorial by <NAME> & <NAME>](https://www.youtube.com/watch?v=EKUy0TSLg04&index=10&list=WL)
HTML(html)
| content/notebooks/2016-09-30-scripts-and-modules.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .fs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: F#
// language: fsharp
// name: ifsharp
// ---
// # Ordering and simulations (bounded model checking)
//
// In each of the examples so far we have looked at systems that are effectively static in time. We
// don't consider how entities change over time, just their relationships with one another. This is
// useful in some situations but, particularly in biology, we want to know how systems develop over
// time. Here we will start to look at some examples where we describe this type of system.
//
// This can be referred to as bounded model checking; we look for solutions up to a bound (a number
// of steps taken). Our solutions are therefore restricted to the bound; sometimes this is fine, but
// in others it can be a limitation. Increasing the bound makes the solutions slower and harder to
// find as the size of state space increases. In a worst case scenario an increase of a single step
// may transform the problem from one that can be solved in ms to one that takes years! There may
// therefore be a largest theoretical bound that is greater than the bound that can practically be
// tested, and you should be aware that the results may not hold for larger, untestable bounds.
//
// ## Getting started
//
// In this section we will be using Z3 as a library. To start we will need to download the files if they are not already available. These first cells download Z3 as a zip, extract it, and load it into memory. We then reference the extracted file and open it as a module.
//
// ** If the first cell does not run, manually download z3 from the link for your operating system and unzip it to a folder with the notebooks called "z3" **
// +
#r "System.IO.Compression.FileSystem.dll"
open System
open System.IO
open System.IO.Compression
open System.Net
//Specify Tls version to avoid cryptic connection errors
System.Net.ServicePointManager.SecurityProtocol <- SecurityProtocolType.Tls12 ||| SecurityProtocolType.Tls11
let wc = new WebClient()
type OS =
| OSX
| Windows
| Linux
let getOS =
match int Environment.OSVersion.Platform with
| 4 | 128 -> Linux
| 6 -> OSX
| _ -> Windows
if true <> System.IO.File.Exists("z3/LICENSE.txt") then
match getOS with
| Linux -> wc.DownloadFile("https://github.com/Z3Prover/z3/releases/download/z3-4.6.0/z3-4.6.0-x64-ubuntu-16.04.zip", @"z3.zip")
//This will take a while
ZipFile.ExtractToDirectory("z3.zip", ".")
System.IO.Directory.Move("z3-4.6.0-x64-ubuntu-16.04","z3")
| Windows ->wc.DownloadFile("https://github.com/Z3Prover/z3/releases/download/z3-4.6.0/z3-4.6.0-x64-win.zip", @"z3.zip")
//This will take a while
ZipFile.ExtractToDirectory("z3.zip", ".")
System.IO.Directory.Move("z3-4.6.0-x64-win","z3")
| _ -> ()
// -
open System.Net
= SecurityProtocolType.Tls11| SecurityProtocolType.Tls12;
#r "z3/bin/Microsoft.Z3.dll"
open Microsoft.Z3
// ## Die Hard with a Vengance
//
// You have a 3 litre jug and a 5 litre jug, and need to measure 4 litres of water. You can empty
// the jugs onto the ground and into each other, and you can fill them from each other and the tap.
// Without measuring the volumes explicitly, how do you get 4 litres?
//
// Now we have variables that change with time. The way we create them is not different from before
// but we need to consider the initial state and the relationships between timepoints.
//
// In previous examples where variables did not change we created constants with the name of the
// variable; now we will add the time explictly to the variable name. So in the initial case we
// will have just two variables "Five-0" and "Three-0". Consistant naming is important so that we
// can encode the behaviour in a loop; other times will be written as "Five-%t" whtere %t is the time.
let assertUpdate (ctx:Context) (s:Solver) t t' =
//Convienience integers
let zZero = ctx.MkInt(0)
let zTwo = ctx.MkInt(2)
let zFour = ctx.MkInt(4)
let zFive = ctx.MkInt(5)
let zThree = ctx.MkInt(3)
//Create the variables
let fiveState = ctx.MkIntConst(sprintf "Five-%d" t)
let threeState = ctx.MkIntConst(sprintf "Three-%d" t)
let fiveState' = ctx.MkIntConst(sprintf "Five-%d" t')
let threeState' = ctx.MkIntConst(sprintf "Three-%d" t')
//Simple updates; do nothing, fill from tap, empty to ground
let doNothingFive = ctx.MkEq(fiveState,fiveState')
let doNothingThree = ctx.MkEq(threeState,threeState')
let fillFive = ctx.MkEq(fiveState',zFive)
let fillThree = ctx.MkEq(threeState',zThree)
let emptyFive = ctx.MkEq(fiveState',zZero)
let emptyThree = ctx.MkEq(threeState',zZero)
//Complex updates; fill three from five, fill five from three
//You can transfer only if one jug ends up full or empty
let transfer = ctx.MkEq(ctx.MkAdd(fiveState,threeState),ctx.MkAdd(fiveState',threeState'))
//List all of the possible updates, turn them into constraints, add them to the solver
let possibleUpdates = [|
ctx.MkAnd(doNothingFive,fillThree)
ctx.MkAnd(doNothingFive,emptyThree)
ctx.MkAnd(fillFive,doNothingThree)
ctx.MkAnd(emptyFive,doNothingThree)
ctx.MkAnd(transfer,emptyFive)
ctx.MkAnd(transfer,emptyThree)
ctx.MkAnd(transfer,fillFive)
ctx.MkAnd(transfer,fillThree)
|]
let constraints = ctx.MkOr(possibleUpdates)
s.Add(constraints)
// ## Initialising the model- assertUpdate
//
// We then initialise the model, by specifying how full the jugs are initially. Each jug starts off empty
// so we set the variables "Five-0" and "Three-0" to be equal to zero.
//
// We then define the transitions that the jugs can make according to the actions we can perform.
// This is done in the step function that asserts how the jugs update between two times, and the
// bounds of the jugs (i.e. the total amount of water they can hold). The update itself is specified
// in assertUpdate. We can do a limited number of things;
//
// * Empty each of the jugs (*emptyThree,emptyFive*)
// * Do nothing to each of the jugs (*doNothingThree,doNothingFive*)
// * Fill each of the jugs (*fillThree,fillFive*)
// * Transfer fluid from one jug to the other, leading to either one jug being filled or one emptied (*transfer*)
//
// In the last case the total volume of water must stay the same, and one of the jugs must be either
// emptied or filled. We can then add all of the different options to an "Or" expression, and add this
// as a constraint.
// ## Additional constraints- assertBounds
//
// Arguably we don't need it, but *assertBounds* ensures that the jugs stay in their defined limits. This may prevent certain bugs
let assertBounds (ctx:Context) (s:Solver) t =
//Convienience integers
let zZero = ctx.MkInt(0)
let zFive = ctx.MkInt(5)
let zThree = ctx.MkInt(3)
//Create the variables
let fiveState = ctx.MkIntConst(sprintf "Five-%d" t)
let threeState = ctx.MkIntConst(sprintf "Three-%d" t)
let constraints = ctx.MkAnd([|
ctx.MkGe(fiveState,zZero)
ctx.MkLe(fiveState,zFive)
ctx.MkGe(threeState,zZero)
ctx.MkLe(threeState,zThree)
|])
s.Add(constraints)
// Finally we define functions that tie these together. Step asserts that between two given times, t and t', an action is taken
let step ctx s t t' =
assertUpdate ctx s t t'
assertBounds ctx s t
assertBounds ctx s t'
// *setState* is a convienince function that allows us to specify the state of a system at a given time. This is important for defining the initial state, and the final state.
// +
let setState (ctx:Context) (s:Solver) t (three:int) (five:int) =
//Convienience integers
let zThree = ctx.MkInt(three)
let zFive = ctx.MkInt(five)
//Create the variables
let fiveState = ctx.MkIntConst(sprintf "Five-%d" t)
let threeState = ctx.MkIntConst(sprintf "Three-%d" t)
let constraints = ctx.MkAnd([|
ctx.MkEq(fiveState,zFive)
ctx.MkEq(threeState,zThree)
|])
s.Add(constraints)
let initial ctx s t =
setState ctx s t 0 0
let final (ctx:Context) (s:Solver) t =
let zFour = ctx.MkInt(4)
let fiveState = ctx.MkIntConst(sprintf "Five-%d" t)
s.Add(ctx.MkEq(fiveState,zFour))
// -
// To test different bounds we then then use a loop and add a new step for each turn of the loop, testing
// at each stage for a solution. If we run the main function with a maximum bound of 10 we can find a
// solution quickly, in only 6 steps!
let main maxBound =
let ctx = new Context()
let s = ctx.MkSolver()
initial ctx s 0
let rec core i =
if i = maxBound then printf "No results within bound of %d\n" maxBound else
step ctx s (i-1) i
s.Push()
final ctx s i
let r = s.Check()
match r with
| Status.UNSATISFIABLE ->
s.Pop()
printf "Unsat- No answer with a bound of %d\n" i
core (i+1)
| Status.SATISFIABLE ->
s.Pop()
printf "Sat- Got a result at bound %d\n" i
printf "3Jug\t5Jug\n"
for t=0 to i do
let threeState = s.Model.ConstInterp(ctx.MkIntConst(sprintf "Three-%d" t))
let fiveState = s.Model.ConstInterp(ctx.MkIntConst(sprintf "Five-%d" t))
printf "%O\t%O\n" threeState fiveState
| _ -> failwith "Unknown response from Z3"
core 1
main 10
// ## Exercises
//
// 1. Now imagine that some updates are not allowed; for example, you couldn't empty the 3 litre jug without transferring the contents to the other jug. What happens to the solution if you prevent those from occuring? Modify the code to find out
// 2. Within a small bound, are there any update types you must have?
| Timing Z3 example.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,jl:hydrogen
# text_representation:
# extension: .jl
# format_name: hydrogen
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# %%
using BenchmarkTools
using Base.Sort
# %%
function f!(a, b)
for x in b
push!(a, x)
sort!(a)
end
a
end
function g!(a, b)
append!(a, b)
sort!(a)
end
function h!(a, b)
for x in b
i = Base.Sort.searchsortedfirst(a, x)
insert!(a, i, x)
end
a
end
# %%
a, b = sort(rand(1000)), rand(10)
# %%
f!(copy(a), b) == g!(copy(a), b) == h!(copy(a), b)
# %%
@btime f!(A, $b) setup=(A = copy(a));
# %%
@btime g!(A, $b) setup=(A = copy(a));
# %%
@btime h!(A, $b) setup=(A = copy(a));
# %%
@btime f!(A, $b) setup=(A = copy(a); sizehint!(A, length(a)+length(b)));
# %%
@btime g!(A, $b) setup=(A = copy(a); sizehint!(A, length(a)+length(b)));
# %%
@btime h!(A, $b) setup=(A = copy(a); sizehint!(A, length(a)+length(b)));
# %%
A = copy(a)
@btime begin
for i in 1:10
push!(A, b[i])
sort!(A)
end
A
end
# %%
@btime begin
for i in 1:10
push!(B, b[i])
sort!(B)
end
B
end setup=(B = copy(a); sizehint!(B, length(a)+length(b)));
# %%
| 0019/searchsortedfirst.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9 (tensorflow)
# language: python
# name: tensorflow
# ---
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_3_python_collections.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # T81-558: Applications of Deep Neural Networks
# **Module 1: Python Preliminaries**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 1 Material
#
# * Part 1.1: Course Overview [[Video]](https://www.youtube.com/watch?v=IzZSwS45vt4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_1_overview.ipynb)
# * Part 1.2: Introduction to Python [[Video]](https://www.youtube.com/watch?v=czq5d53vKvo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_2_intro_python.ipynb)
# * **Part 1.3: Python Lists, Dictionaries, Sets and JSON** [[Video]](https://www.youtube.com/watch?v=kcGx2I5akSs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_3_python_collections.ipynb)
# * Part 1.4: File Handling [[Video]](https://www.youtube.com/watch?v=FSuSLCMgCZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_4_python_files.ipynb)
# * Part 1.5: Functions, Lambdas, and Map/Reduce [[Video]](https://www.youtube.com/watch?v=jQH1ZCSj6Ng&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_5_python_functional.ipynb)
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
# +
try:
from google.colab import drive
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# -
# # Part 1.3: Python Lists, Dictionaries, Sets, and JSON
#
# Like most modern programming languages, Python includes Lists, Sets, Dictionaries, and other data structures as built-in types. The syntax appearance of both of these is similar to JSON. Python and JSON compatibility is discussed later in this module. This course will focus primarily on Lists, Sets, and Dictionaries. It is essential to understand the differences between these three fundamental collection types.
#
# * **Dictionary** - A dictionary is a mutable unordered collection that Python indexes with name and value pairs.
# * **List** - A list is a mutable ordered collection that allows duplicate elements.
# * **Set** - A set is a mutable unordered collection with no duplicate elements.
# * **Tuple** - A tuple is an immutable ordered collection that allows duplicate elements.
#
# Most Python collections are mutable, meaning the program can add and remove elements after definition. An immutable collection cannot add or remove items after definition. It is also essential to understand that an ordered collection means that items maintain their order as the program adds them to a collection. This order might not be any specific ordering, such as alphabetic or numeric.
#
# Lists and tuples are very similar in Python and are often confused. The significant difference is that a list is mutable, but a tuple isn’t. So, we include a list when we want to contain similar items and a tuple when we know what information goes into it ahead of time.
#
# Many programming languages contain a data collection called an array. The array type is noticeably absent in Python. Generally, the programmer will use a list in place of an array in Python. Arrays in most programming languages were fixed-length, requiring the program to know the maximum number of elements needed ahead of time. This restriction leads to the infamous array-overrun bugs and security issues. The Python list is much more flexible in that the program can dynamically change the size of a list.
#
# The next sections will look at each collection type in more detail.
#
# ### Lists and Tuples
#
# For a Python program, lists and tuples are very similar. Both lists and tuples hold an ordered collection of items. It is possible to get by as a programmer using only lists and ignoring tuples.
#
# The primary difference that you will see syntactically is that a list is enclosed by square braces [], and a tuple is enclosed by parenthesis (). The following code defines both list and tuple.
# +
l = ['a', 'b', 'c', 'd']
t = ('a', 'b', 'c', 'd')
print(l)
print(t)
# -
# The primary difference you will see programmatically is that a list is mutable, which means the program can change it. A tuple is immutable, which means the program cannot change it. The following code demonstrates that the program can change a list. This code also illustrates that Python indexes lists starting at element 0. Accessing element one modifies the second element in the collection. One advantage of tuples over lists is that tuples are generally slightly faster to iterate over than lists.
# +
l[1] = 'changed'
#t[1] = 'changed' # This would result in an error
print(l)
# -
# Like many languages, Python has a for-each statement. This statement allows you to loop over every element in a collection, such as a list or a tuple.
# Iterate over a collection.
for s in l:
print(s)
# The **enumerate** function is useful for enumerating over a collection and having access to the index of the element that we are currently on.
# Iterate over a collection, and know where your index. (Python is zero-based!)
for i,l in enumerate(l):
print(f"{i}:{l}")
# A **list** can have multiple objects added, such as strings. Duplicate values are allowed. **Tuples** do not allow the program to add additional objects after definition.
# Manually add items, lists allow duplicates
c = []
c.append('a')
c.append('b')
c.append('c')
c.append('c')
print(c)
# Ordered collections, such as lists and tuples, allow you to access an element by its index number, as done in the following code. Unordered collections, such as dictionaries and sets, do not allow the program to access them in this way.
print(c[1])
# A **list** can have multiple objects added, such as strings. Duplicate values are allowed. Tuples do not allow the program to add additional objects after definition. The programmer must specify an index for the insert function, an index. These operations are not allowed for tuples because they would result in a change.
# Insert
c = ['a', 'b', 'c']
c.insert(0, 'a0')
print(c)
# Remove
c.remove('b')
print(c)
# Remove at index
del c[0]
print(c)
# ## Sets
# A Python **set** holds an unordered collection of objects, but sets do *not* allow duplicates. If a program adds a duplicate item to a set, only one copy of each item remains in the collection. Adding a duplicate item to a set does not result in an error. Any of the following techniques will define a set.
s = set()
s = { 'a', 'b', 'c'}
s = set(['a', 'b', 'c'])
print(s)
# A **list** is always enclosed in square braces [], a **tuple** in parenthesis (), and similarly a **set** is enclosed in curly braces. Programs can add items to a **set** as they run. Programs can dynamically add items to a **set** with the **add** function. It is important to note that the **append** function adds items to lists, whereas the **add** function adds items to a **set**.
# Manually add items, sets do not allow duplicates
# Sets add, lists append. I find this annoying.
c = set()
c.add('a')
c.add('b')
c.add('c')
c.add('c')
print(c)
# ## Maps/Dictionaries/Hash Tables
#
# Many programming languages include the concept of a map, dictionary, or hash table. These are all very related concepts. Python provides a dictionary that is essentially a collection of name-value pairs. Programs define dictionaries using curly braces, as seen here.
# +
d = {'name': "Jeff", 'address':"123 Main"}
print(d)
print(d['name'])
if 'name' in d:
print("Name is defined")
if 'age' in d:
print("age defined")
else:
print("age undefined")
# -
# Be careful that you do not attempt to access an undefined key, as this will result in an error. You can check to see if a key is defined, as demonstrated above. You can also access the dictionary and provide a default value, as the following code demonstrates.
d.get('unknown_key', 'default')
# You can also access the individual keys and values of a dictionary.
# +
d = {'name': "Jeff", 'address':"123 Main"}
# All of the keys
print(f"Key: {d.keys()}")
# All of the values
print(f"Values: {d.values()}")
# -
# Dictionaries and lists can be combined. This syntax is closely related to [JSON](https://en.wikipedia.org/wiki/JSON). Dictionaries and lists together are a good way to build very complex data structures. While Python allows quotes (") and apostrophe (') for strings, JSON only allows double-quotes ("). We will cover JSON in much greater detail later in this module.
#
# The following code shows a hybrid usage of dictionaries and lists.
# +
# Python list & map structures
customers = [
{"name": "Jeff & <NAME>", "pets": ["Wynton", "Cricket",
"Hickory"]},
{"name": "<NAME>", "pets": ["rover"]},
{"name": "<NAME>"}
]
print(customers)
for customer in customers:
print(f"{customer['name']}:{customer.get('pets', 'no pets')}")
# -
# The variable **customers** is a list that holds three dictionaries that represent customers. You can think of these dictionaries as records in a table. The fields in these individual records are the keys of the dictionary. Here the keys **name** and **pets** are fields. However, the field **pets** holds a list of pet names. There is no limit to how deep you might choose to nest lists and maps. It is also possible to nest a map inside of a map or a list inside of another list.
# ## More Advanced Lists
#
# Several advanced features are available for lists that this section introduces. One such function is **zip**. Two lists can be combined into a single list by the **zip** command. The following code demonstrates the **zip** command.
# +
a = [1,2,3,4,5]
b = [5,4,3,2,1]
print(zip(a,b))
# -
# To see the results of the **zip** function, we convert the returned zip object into a list. As you can see, the **zip** function returns a list of tuples. Each tuple represents a pair of items that the function zipped together. The order in the two lists was maintained.
# +
a = [1,2,3,4,5]
b = [5,4,3,2,1]
print(list(zip(a,b)))
# -
# The usual method for using the zip command is inside of a for-loop. The following code shows how a for-loop can assign a variable to each collection that the program is iterating.
# +
a = [1,2,3,4,5]
b = [5,4,3,2,1]
for x,y in zip(a,b):
print(f'{x} - {y}')
# -
# Usually, both collections will be of the same length when passed to the zip command. It is not an error to have collections of different lengths. As the following code illustrates, the zip command will only process elements up to the length of the smaller collection.
# +
a = [1,2,3,4,5]
b = [5,4,3]
print(list(zip(a,b)))
# -
# Sometimes you may wish to know the current numeric index when a for-loop is iterating through an ordered collection. Use the **enumerate** command to track the index location for a collection element. Because the **enumerate** command deals with numeric indexes of the collection, the zip command will assign arbitrary indexes to elements from unordered collections.
#
# Consider how you might construct a Python program to change every element greater than 5 to the value of 5. The following program performs this transformation. The enumerate command allows the loop to know which element index it is currently on, thus allowing the program to be able to change the value of the current element of the collection.
a = [2, 10, 3, 11, 10, 3, 2, 1]
for i, x in enumerate(a):
if x>5:
a[i] = 5
print(a)
# The comprehension command can dynamically build up a list. The comprehension below counts from 0 to 9 and adds each value (multiplied by 10) to a list.
lst = [x*10 for x in range(10)]
print(lst)
# A dictionary can also be a comprehension. The general format for this is:
#
# ```
# dict_variable = {key:value for (key,value) in dictonary.items()}
# ```
#
# A common use for this is to build up an index to symbolic column names.
text = ['col-zero','col-one', 'col-two', 'col-three']
lookup = {key:value for (value,key) in enumerate(text)}
print(lookup)
# This can be used to easily find the index of a column by name.
print(f'The index of "col-two" is {lookup["col-two"]}')
# ## An Introduction to JSON
#
# Data stored in a CSV file must be flat; it must fit into rows and columns. Most people refer to this type of data as structured or tabular. This data is tabular because the number of columns is the same for every row. Individual rows may be missing a value for a column; however, these rows still have the same columns.
#
# This data is convenient for machine learning because most models, such as neural networks, also expect incoming data to be of fixed dimensions. Real-world information is not always so tabular. Consider if the rows represent customers. These people might have multiple phone numbers and addresses. How would you describe such data using a fixed number of columns? It would be useful to have a list of these courses in each row that can be variable length for each row or student.
#
# JavaScript Object Notation (JSON) is a standard file format that stores data in a hierarchical format similar to eXtensible Markup Language (XML). JSON is nothing more than a hierarchy of lists and dictionaries. Programmers refer to this sort of data as semi-structured data or hierarchical data. The following is a sample JSON file.
#
# ```
# {
# "firstName": "John",
# "lastName": "Smith",
# "isAlive": true,
# "age": 27,
# "address": {
# "streetAddress": "21 2nd Street",
# "city": "New York",
# "state": "NY",
# "postalCode": "10021-3100"
# },
# "phoneNumbers": [
# {
# "type": "home",
# "number": "212 555-1234"
# },
# {
# "type": "office",
# "number": "646 555-4567"
# },
# {
# "type": "mobile",
# "number": "123 456-7890"
# }
# ],
# "children": [],
# "spouse": null
# }
# ```
#
# The above file may look somewhat like Python code. You can see curly braces that define dictionaries and square brackets that define lists. JSON does require there to be a single root element. A list or dictionary can fulfill this role. JSON requires double-quotes to enclose strings and names. Single quotes are not allowed in JSON.
#
# JSON files are always legal JavaScript syntax. JSON is also generally valid as Python code, as demonstrated by the following Python program.
jsonHardCoded = {
"firstName": "John",
"lastName": "Smith",
"isAlive": True,
"age": 27,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "office",
"number": "646 555-4567"
},
{
"type": "mobile",
"number": "123 456-7890"
}
],
"children": [],
"spouse": None
}
# Generally, it is better to read JSON from files, strings, or the Internet than hard coding, as demonstrated here. However, for internal data structures, sometimes such hard-coding can be useful.
#
# Python contains support for JSON. When a Python program loads a JSON the root list or dictionary is returned, as demonstrated by the following code.
# +
import json
json_string = '{"first":"Jeff","last":"Heaton"}'
obj = json.loads(json_string)
print(f"First name: {obj['first']}")
print(f"Last name: {obj['last']}")
# -
# Python programs can also load JSON from a file or URL.
# +
import requests
r = requests.get("https://raw.githubusercontent.com/jeffheaton/"
+"t81_558_deep_learning/master/person.json")
print(r.json())
# -
# Python programs can easily generate JSON strings from Python objects of dictionaries and lists.
python_obj = {"first":"Jeff","last":"Heaton"}
print(json.dumps(python_obj))
# A data scientist will generally encounter JSON when they access web services to get their data. A data scientist might use the techniques presented in this section to convert the semi-structured JSON data into tabular data for the program to use with a model such as a neural network.
| t81_558_class_01_3_python_collections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="L603ykxcs4hc"
# # Deep Q-Network implementation
#
# This notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
# + id="godLUdqzs4hr"
import sys, os
if 'google.colab' in sys.modules:
# %tensorflow_version 1.x
if not os.path.exists('.setup_complete'):
# !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash
# !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py
# !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week4_approx/submit.py
# !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week4_approx/framebuffer.py
# !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week4_approx/replay_buffer.py
# !touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
# !bash ../xvfb start
os.environ['DISPLAY'] = ':1'
# + [markdown] id="5XY10Pxhs4hs"
# __Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
# + id="FhAm_dIAs4ht"
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="Je82q5eFs4hu"
# ### Let's play some old videogames
# 
#
# This time we're gonna apply approximate q-learning to an Atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before.
#
# + [markdown] id="Jbsg7LV_s4hv"
# ### Processing game image
#
# Raw Atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.
#
# We can thus save a lot of time by preprocessing game image, including
# * Resizing to a smaller shape, 64 x 64
# * Converting to grayscale
# * Cropping irrelevant image parts (top & bottom)
# + id="UjrKQMn1s4hv"
from gym.core import ObservationWrapper
from gym.spaces import Box
import cv2
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (64, 64, 1)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _to_gray_scale(self, rgb_image, channel_weights=[0.8, 0.1, 0.1]):
<YOUR CODE>
def observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * Crop image, remove irrelevant parts.
# * Resize image to self.img_size. Use cv2.resize or any other library you want,
# e.g. PIL or Keras. Do not use skimage.transform.resize because it is roughly
# 6x slower than cv2.resize.
# * Cast image to grayscale.
# * Convert image pixels to (0, 1) range, float32 type.
<YOUR CODE>
return <YOUR CODE>
# + id="bQ0n8eDvs4hv"
import gym
# spawn game instance for tests
env = gym.make("BreakoutNoFrameskip-v4") # create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
# test observation
assert obs.ndim == 3, "observation must be [height, width, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs)) > 2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <= 1, "convert image pixels to (0,1) range"
assert np.max(obs) >= 0.5, "It would be easier to see a brighter observation"
assert np.mean(obs) >= 0.1, "It would be easier to see a brighter observation"
print("Formal tests seem fine. Here's an example of what you'll get.")
plt.title("what your network gonna see")
plt.imshow(obs, interpolation='none', cmap='gray')
# + [markdown] id="FJ6lliiQs4hx"
# ### Frame buffer
#
# Our agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.
#
# To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
# + id="F2yJpqbbs4hx"
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutNoFrameskip-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
# + id="XQLY_cyvs4h-"
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0, 2, 1]).reshape([state_dim[0], -1]))
# + [markdown] id="rfIiBo0Ks4iD"
# ### Building a network
#
# We now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.
#
# You can build any architecture you want, but for reference, here's something that will more or less work:
# + [markdown] id="e5b4bGqvs4iD"
# 
# + id="xTdFlTGks4iD"
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
# + id="uNNVyPQ6s4iE"
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
<YOUR CODE: define your network body here. Please make sure you don't use any layers created elsewhere>
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None, ] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
<YOUR CODE: apply your network layers here>
qvalues = <YOUR CODE: symbolic tensor for q-values>
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
# + id="W9jd-Mn4s4iF"
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
# + [markdown] id="MQnRpVSws4iF"
# Now let's try out our agent to see if it raises any errors.
# + id="cyyeVD1ss4iF"
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
# + id="f_I3bxNus4iF"
evaluate(env, agent, n_games=1)
# + [markdown] id="zoLLHSlls4iG"
# ### Experience replay
# For this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__.
#
# 
# + [markdown] id="-EVPVcNMs4iH"
# #### The interface is fairly simple:
# * `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer
# * `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.
# * `len(exp_replay)` - returns number of elements stored in replay buffer.
# + id="CzpA5Nsps4iH"
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
# + id="gtRJ-DGMs4iH"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE>
# + id="Wi7TpTEws4iH"
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, (
"play_and_record should have added exactly 10000 steps, " +
"but instead added %i") % len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, (
"Please make sure you restart the game whenever it is 'done' " +
"and record the is_done correctly into the buffer." +
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"
) % (np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s" % str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape)
assert [int(i) in (0, 1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
# + [markdown] id="hRws_LPvs4iH"
# ### Target networks
#
# We also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:
#
# The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.
#
# $$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
#
# 
#
#
# + id="YHWzsjexs4iI"
target_network = DQNAgent("target_network", state_dim, n_actions)
# + id="noz_dqHrs4iJ"
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
# tf.get_default_session().run(assigns)
return assigns
# + id="3Uzde0Rns4iJ"
# create the tf copy graph only once.
copy_step = load_weigths_into_target_network(agent, target_network)
sess.run(copy_step)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)])
print("It works!")
# + [markdown] id="iK5qbXA7s4iJ"
# ### Learning with... Q-learning
# Here we write a function similar to `agent.update` from tabular q-learning.
# + id="p2MzW444s4iM"
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
# + [markdown] id="5Yz6wZlws4iN"
# Take q-values for actions agent just took
# + id="7HVwdP3Ks4iN"
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
# + [markdown] id="IEsWnv3ys4iN"
# Compute Q-learning TD error:
#
# $$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$
#
# With Q-reference defined as
#
# $$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$
#
# Where
# * $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__
# * $s, a, r, s'$ are current state, action, reward and next state respectively
# * $\gamma$ is a discount factor defined two cells above.
# + id="SmunyRTMs4iS"
next_qvalues_target = <YOUR CODE: compute q-values for NEXT states with target network>
next_state_values_target = <YOUR CODE: compute state values by taking max over next_qvalues_target for all actions>
reference_qvalues = <YOUR CODE: compute Q_reference(s,a) as per formula above>
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
# + id="Gx0D4eats4iW"
sess.run(tf.global_variables_initializer())
# + id="PkZK6vvqs4iW"
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
# + [markdown] id="V-_XI4QYs4iW"
# ### Main loop
#
# It's time to put everything together and see if it learns anything.
# + id="6ZOC8VmLs4iW"
from tqdm import trange
import pandas as pd
from IPython.display import clear_output
import matplotlib.pyplot as plt
# %matplotlib inline
def moving_average(x, span=100, **kw):
return pd.DataFrame({'x': np.asarray(x)}).x.ewm(span=span, **kw).mean().values
mean_rw_history = []
td_loss_history = []
# + id="aNw3R2kqs4iX"
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph: obs_batch,
actions_ph: act_batch,
rewards_ph: reward_batch,
next_obs_ph: next_obs_batch,
is_done_ph: is_done_batch,
}
# + id="jo3ZpMr2s4iX"
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
# You could think that loading weights onto a target network is simply
# load_weigths_into_target_network(agent, target_network)
# but actually calling this function repeatedly creates a TF copy operator
# again and again, which bloats memory consumption with each training step.
# Instead, you should create 'copy_step' once.
sess.run(copy_step)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1, 2, 1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 2)
plt.title("TD loss history (moving average)")
plt.plot(moving_average(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
# + id="-bwOlbvbs4iX"
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
# + [markdown] id="MvgvmaIhs4iX"
# __ How to interpret plots: __
#
#
# This aint no supervised learning so don't expect anything to improve monotonously.
# * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.
# * __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...).
# * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.
# * __ buffer size__ - this one is simple. It should go up and cap at max size.
# * __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.
# * Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.
#
# At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.
#
# __Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps.
#
# But hey, look on the bright side of things:
#
# 
# + [markdown] id="vYfdG3A-s4iZ"
# ### Video
# + id="2eJh2hKKs4iZ"
# Don't forget to reset epsilon back to previous value if you want to go on training
agent.epsilon = 0
# + id="6WbSVzVWs4jC"
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
# + id="n7Js0LEvs4jD"
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from IPython.display import HTML
video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(video_names[-1])) # You can also try other indices
# + [markdown] id="A-5390zrs4jD"
# ## More
#
# If you want to play with DQN a bit more, here's a list of things you can try with it:
#
# ### Easy:
# * Implementing __double q-learning__ shouldn't be a problem if you've already have target networks in place.
# * You will probably need `tf.argmax` to select best actions
# * Here's an original [article](https://arxiv.org/abs/1509.06461)
#
# * __Dueling__ architecture is also quite straightforward if you have standard DQN.
# * You will need to change network architecture, namely the q-values layer
# * It must now contain two heads: V(s) and A(s,a), both dense layers
# * You should then add them up via elemwise sum layer.
# * Here's an [article](https://arxiv.org/pdf/1511.06581.pdf)
# + [markdown] id="qqzZjuMss4jE"
# ### Hard: Prioritized experience replay
#
# In this section, you're invited to implement prioritized experience replay
#
# * You will probably need to provide a custom data structure
# * Once pool.update is called, collect the pool.experience_replay.observations, actions, rewards and is_alive and store them in your data structure
# * You can now sample such transitions in proportion to the error (see [article](https://arxiv.org/abs/1511.05952)) for training.
#
# It's probably more convenient to explicitly declare inputs for "sample observations", "sample actions" and so on to plug them into q-learning.
#
# Prioritized (and even normal) experience replay should greatly reduce amount of game sessions you need to play in order to achieve good performance.
#
# While it's effect on runtime is limited for atari, more complicated envs (further in the course) will certainly benefit for it.
#
# Prioritized experience replay only supports off-policy algorithms, so pls enforce `n_steps=1` in your q-learning reference computation (default is 10).
# + id="kP0Fr4wXs4jE"
from submit import submit_breakout
env = make_env()
submit_breakout(agent, env, evaluate, '<EMAIL>', 'YourAssignmentToken')
| Practical_Reinforcement_learning/week_4/notebooks/dqn_atari.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import required packages
from __future__ import print_function, division
#import cv2
import dlib
import time
from skimage import io
import os
import shutil
import torch
import pandas as pd
import torchvision
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.stats.api as sms
from datetime import datetime
from torch.utils.data import Dataset, DataLoader
from os import listdir
from os.path import isfile, join
from collections import namedtuple
from skimage.transform import rescale
from skimage.transform import resize
import sys
import glob
import PIL
import os
import shutil
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn.functional as F
import math
import PIL
import imgaug as ia
from imgaug import augmenters as iaa
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from PIL import Image
from pathlib import Path
from collections import Counter
import imgaug as ia
from imgaug import augmenters as iaa
import cv2
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
print(os.getcwd())
#np.random.seed(198467)
torch.cuda.empty_cache()
# +
frame = pd.read_csv('ExpW_OpenFace_result_with_expression_gender_race_age.csv')
AU_intensity_OpenFace_common2 = ['AU01_r', 'AU02_r', 'AU04_r', 'AU05_r', 'AU06_r', 'AU09_r', 'AU10_r',
'AU12_r', 'AU15_r', 'AU17_r', 'AU20_r', 'AU25_r', 'AU26_r']
optimal_threshold = np.array([4.4, 4.8, 2.4, 3.7, 1.8, 2.8, 0.2, 1.3, 1.3, 5. , 5. , 0.7, 5. ])
for i in range(len(AU_intensity_OpenFace_common2)):
frame[AU_intensity_OpenFace_common2[i] + "_c"] = frame[AU_intensity_OpenFace_common2[i]].apply(lambda x: 0 if x <= optimal_threshold[i] else 1)
frame = frame.dropna()
frame = frame[['ImageName', 'Expression', 'gender_preds', 'AU06_r_c', 'AU12_r_c']]
frame["Expression"] = frame["Expression"].apply(lambda x: 1 if x == 3 else 0)
frame = frame.dropna()
frame['AU'] = frame.AU06_r_c.astype(int).astype(str) + frame.AU12_r_c.astype(int).astype(str)
def convert_AU_to_4types(s):
if s == '00': return 0
elif s == '10': return 1
elif s == '01': return 2
elif s == '11': return 3
frame.AU = frame.AU.apply(convert_AU_to_4types)
frame.head()
# -
frame.shape
frame.Expression.value_counts()
frame.gender_preds.value_counts()
frame.AU.value_counts()
frame['ImageName'] = frame['ImageName'].apply(lambda x: '/data/ExpW_cropped_by_their_coordinates/' + x) # change ImageName to full path
frame_copy = frame.copy()
result_rows_list = []
for k in range(1,6):
# Split into train/validation/test sets
frame = frame_copy.sample(n = 20000, random_state = k).reset_index(drop=True) # shuffle data frame
n_images = len(frame)
n_train = int(0.8 * n_images)
n_val = int((n_images - n_train) / 2)
n_test = n_images - n_train - n_val
train_frame = frame[0 : n_train].reset_index(drop=True)
val_frame = frame[n_train : n_train + n_val].reset_index(drop=True)
test_frame = frame[n_train + n_val : ].reset_index(drop=True)
print("{} train faces, {} validation faces, {} test faces".format(len(train_frame), len(val_frame), len(test_frame)))
# Data loaders and transforms for training
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second
# image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image.
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential(
[
#
# Apply the following augmenters to most images.
#
iaa.Fliplr(0.5), # horizontally flip 50% of all images
#iaa.Flipud(0.2), # vertically flip 20% of all images
# crop some of the images by 0-10% of their height/width
sometimes(iaa.Crop(percent=(0, 0.05))),
# Apply affine transformations to some of the images
# - scale to 80-120% of image height/width (each axis independently)
# - translate by -20 to +20 relative to height/width (per axis)
# - rotate by -45 to +45 degrees
# - shear by -16 to +16 degrees
# - order: use nearest neighbour or bilinear interpolation (fast)
# - mode: use any available mode to fill newly created pixels
# see API or scikit-image for which modes are available
# - cval: if the mode is constant, then use a random brightness
# for the newly created pixels (e.g. sometimes black,
# sometimes white)
iaa.Affine(
scale={"x": (1, 1.1), "y": (1, 1.1)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -10 to +10 percent (per axis)
rotate=(-15, 15), # rotate by -15 to +15 degrees
shear=(-8, 8), # shear by -8 to +8 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
#cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=['edge'] # use any of scikit-image's warping modes (see 2nd image from the top for examples)
),
#
# Execute 0 to 5 of the following (less important) augmenters per
# image. Don't execute all of them, as that would often be way too
# strong.
#
iaa.SomeOf((0, 5),
[
# Convert some images into their superpixel representation,
# sample between 20 and 200 superpixels per image, but do
# not replace all superpixels with their average, only
# some of them (p_replace).
sometimes(
iaa.Superpixels(
p_replace=(0, 0.1),
n_segments=(50, 200)
)
),
# Blur each image with varying strength using
# gaussian blur (sigma between 0 and 3.0),
# average/uniform blur (kernel size between 2x2 and 7x7)
# median blur (kernel size between 3x3 and 11x11).
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
]),
# Sharpen each image, overlay the result with the original
# image using an alpha between 0 (no sharpening) and 1
# (full sharpening effect).
iaa.Sharpen(alpha=(0, 0.3), lightness=(0.75, 1.5)),
# Same as sharpen, but for an embossing effect.
iaa.Emboss(alpha=(0, 0.3), strength=(0, 2)),
# Search in some images either for all edges or for
# directed edges. These edges are then marked in a black
# and white image and overlayed with the original image
# using an alpha of 0 to 0.7.
sometimes(iaa.OneOf([
iaa.EdgeDetect(alpha=(0, 0.3)),
iaa.DirectedEdgeDetect(
alpha=(0, 0.3), direction=(0.0, 1.0)
),
])),
# Add gaussian noise to some images.
# In 50% of these cases, the noise is randomly sampled per
# channel and pixel.
# In the other 50% of all cases it is sampled once per
# pixel (i.e. brightness change).
iaa.AdditiveGaussianNoise(
loc=0, scale=(0.0, 0.05*255), per_channel=0.5
),
# Either drop randomly 1 to 10% of all pixels (i.e. set
# them to black) or drop them on an image with 2-5% percent
# of the original size, leading to large dropped
# rectangles.
iaa.OneOf([
iaa.Dropout((0.01, 0.02), per_channel=0.5),
#iaa.CoarseDropout(
# (0.03, 0.15), size_percent=(0.02, 0.05),
# per_channel=0.2
#),
]),
# Invert each image's chanell with 5% probability.
# This sets each pixel value v to 255-v.
#iaa.Invert(0.05, per_channel=True), # invert color channels
# Add a value of -10 to 10 to each pixel.
iaa.Add((-15, 15), per_channel=0.5),
# Change brightness of images (50-150% of original value).
iaa.Multiply((0.75, 1.25), per_channel=0.5),
# Improve or worsen the contrast of images.
iaa.ContrastNormalization((0.75, 1.75), per_channel=0.5),
# Convert each image to grayscale and then overlay the
# result with the original with random alpha. I.e. remove
# colors with varying strengths.
iaa.Grayscale(alpha=(0.0, 1.0)),
# In some images move pixels locally around (with random
# strengths).
#sometimes(
# iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=0.25)
#),
# In some images distort local areas with varying strength.
sometimes(iaa.PiecewiseAffine(scale=(0.005, 0.01)))
],
# do all of the above augmentations in random order
random_order=True
)
],
# do all of the above augmentations in random order
random_order=True
)
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
class ImageDataset(Dataset):
def __init__(self, data_frame, transform=None):
self.data_frame = data_frame
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
#idx is index from dataset
#This is a mapping from your data_frame to the output of the mode
img_name = self.data_frame.loc[idx, 'ImageName']
expression = self.data_frame.loc[idx, 'Expression']
AU = self.data_frame.loc[idx, 'AU']
# read image as ndarray, H*W*C
image = dlib.load_rgb_image(img_name)
image = cv2.resize(image, (224,224)) # resize the image to 224x224 for the ResNet Model
if self.transform:
image = self.transform(image)
# transform label to torch tensor
# This sets the order of the label
return (image, torch.from_numpy(np.asarray(expression, dtype=np.float32)),
torch.from_numpy(np.asarray(AU, dtype=np.float32)))
transform_train_data = transforms.Compose([
ImgAugTransform(),
lambda x: PIL.Image.fromarray(x),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transformed_train_dataset = ImageDataset(data_frame=train_frame,
transform=transform_train_data
)
train_dataloader = DataLoader(transformed_train_dataset, batch_size=32,
shuffle=True, num_workers=8)
transform_val_data = transforms.Compose(([transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]))
transformed_val_dataset = ImageDataset(data_frame=val_frame,
transform=transform_val_data
)
val_dataloader = DataLoader(transformed_val_dataset, batch_size=32,
shuffle=True, num_workers=8)
transform_test_data = transforms.Compose(([transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]))
transformed_test_dataset = ImageDataset(data_frame=test_frame,
transform=transform_test_data
)
test_dataloader = DataLoader(transformed_test_dataset, batch_size=32,
shuffle=False, num_workers=8)
# Training
torch.cuda.is_available()
dataloaders = {'train': train_dataloader, 'test': test_dataloader}
dataset_sizes = {'train': len(transformed_train_dataset), 'test': len(transformed_test_dataset)}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#Following cell:
#
#* model = pretrained resnet imagenet model
#* criteron - loss function (cross entropy loss)
#* optimizer - optimization algorithm (Adam)
#* epochs - number of training epochs
from pytorch_metric_learning import miners, losses
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class ResNet50_base(nn.Module):
"""ResNet50 but without the final fc layer"""
def __init__(self, hidden_size=2048, dropout=0.5):
super().__init__()
self.resnet = torchvision.models.resnet50(pretrained=True)
#self.resnet.fc = nn.Linear(2048, hidden_size)
self.resnet.fc = Identity()
#self.relu = nn.ReLU()
#self.dropout = nn.Dropout(dropout)
def require_all_grads(self):
for param in self.parameters():
param.requires_grad = True
def forward(self, x):
features = self.resnet(x)
#features = self.dropout(self.relu(features))
return features
class Similarity_AU_model():
def __init__(self):
self.training_ratio = 3
self.alpha = 100
self.epoch = 0
self.best_dev_mAP = 0.
self.train_loader = train_dataloader
self.dev_loader = val_dataloader
self.test_loader = test_dataloader
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
full_model = torchvision.models.resnet50(pretrained=True)
#self.base_network = torch.nn.Sequential(*(list(full_model.children())[:-1])).to(self.device) # Without last layer
self.base_network = ResNet50_base().to(self.device)
self.fc_network = nn.Linear(2048, 2).to(self.device)
self.criterion = nn.CrossEntropyLoss()
self.base_optimizer = torch.optim.Adam(
params=filter(lambda p: p.requires_grad, self.base_network.parameters()),
lr=1e-4)
self.fc_optimizer = torch.optim.Adam(
params=filter(lambda p: p.requires_grad, self.fc_network.parameters()),
lr=1e-4)
def _criterion(self, output, expression):
#return F.binary_cross_entropy_with_logits(torch.squeeze(output), expression.float())
return self.criterion(torch.squeeze(output), expression.long())
def state_dict(self):
state_dict = {
'base_network': self.base_network.state_dict(),
'fc_network': self.fc_network.state_dict(),
'base_optimizer': self.base_optimizer.state_dict(),
'fc_optimizer': self.fc_optimizer.state_dict(),
'epoch': self.epoch
}
return state_dict
def load_state_dict(self, state_dict):
self.base_network.load_state_dict(state_dict['base_network'])
self.fc_network.load_state_dict(state_dict['fc_network'])
def inference(self, output):
predict_prob = torch.sigmoid(output)
return predict_prob.cpu().numpy()
def _train(self, loader):
"""Train the model for one epoch"""
self.base_network.train()
self.fc_network.train()
train_class_loss = 0
total = 0
class_correct = 0
for i, (images, expression, AU) in enumerate(loader):
images, expression, AU = images.to(self.device), expression.to(self.device), AU.to(self.device)
self.base_optimizer.zero_grad()
self.fc_optimizer.zero_grad()
features = self.base_network(images)
class_outputs = torch.squeeze(self.fc_network(features))
class_loss = self._criterion(class_outputs, expression)
miner = miners.MultiSimilarityMiner(epsilon=0.1)
loss_func = losses.TripletMarginLoss(margin=1)
hard_pairs = miner(features, AU)
triplet_loss = loss_func(features, AU, hard_pairs)
total += expression.size(0)
#class_predicted = torch.where(torch.sigmoid(class_outputs) >= 0.5, torch.ones_like(class_outputs), torch.zeros_like(class_outputs)).long()
_, class_predicted = class_outputs.max(1)
class_correct += class_predicted.eq(expression.long()).sum().item()
#if i == 0:
# print()
# print("class outputs:")
# print(class_outputs)
# print("class predicted")
# print(class_predicted)
# print("class loss")
# print(class_loss)
# print()
#print(class_loss)
#print(self.alpha*triplet_loss)
loss = class_loss + self.alpha*triplet_loss
loss.backward()
self.fc_optimizer.step()
self.base_optimizer.step()
train_class_loss += loss.item()
print('Training epoch {}: [{}|{}], class loss:{}, class accuracy: {}'
.format(self.epoch, i+1, len(loader),
loss.item(), 100.*class_correct/total))
self.epoch += 1
def _test(self, loader):
"""Compute model output on test set"""
self.base_network.eval()
self.fc_network.eval()
test_class_loss = 0
total = 0
class_correct = 0
feature_list = []
class_output_list = []
with torch.no_grad():
for i, (images, expression, AU) in enumerate(loader):
images, expression, AU = images.to(self.device), expression.to(self.device), AU.to(self.device)
features = self.base_network(images)
class_outputs = torch.squeeze(self.fc_network(features))
class_loss = self._criterion(class_outputs, expression)
miner = miners.MultiSimilarityMiner(epsilon=0.1)
loss_func = losses.TripletMarginLoss(margin=1)
hard_pairs = miner(features, AU)
triplet_loss = loss_func(features, AU, hard_pairs)
#print(class_loss)
#print(triplet_loss)
#loss = class_loss + self.alpha*(loss_0 + loss_1 + loss_2 + loss_3)/(n0 + n1 + n2 + n3)
loss = class_loss + self.alpha*triplet_loss
test_class_loss += loss.item()
total += expression.size(0)
#class_predicted = torch.where(torch.sigmoid(class_outputs) >= 0.5, torch.ones_like(class_outputs), torch.zeros_like(class_outputs)).long()
_, class_predicted = class_outputs.max(1)
class_correct += class_predicted.eq(expression.long()).sum().item()
class_output_list.append(class_outputs)
feature_list.append(features)
#print('Testing epoch, class loss:{}, class accuracy: {}, domain loss: {}, domain accuracy: {}'
# .format(test_class_loss, 100.*class_correct/total, test_domain_loss,
# 100.*domain_correct/total))
return test_class_loss, torch.cat(class_output_list), 100.*class_correct/total, torch.cat(feature_list)
def public_test(self, images):
"""Compute model output on test set"""
self.base_network.eval()
self.fc_network.eval()
features = self.base_network(images)
class_outputs = torch.squeeze(self.fc_network(features))
return F.softmax(class_outputs, dim = 0)#torch.sigmoid(class_outputs)
def train(self):
"""Train the model for one epoch, evaluate on validation set and
save the best model
"""
start_time = datetime.now()
learning_rates = [1e-4, 1e-5, 1e-6]
epochs = [6, 4, 4]
for learning_rate, epoch in zip(learning_rates, epochs):
print()
print('learning rate:', learning_rate)
print('epoch number:', epoch)
self.base_optimizer = torch.optim.Adam(
params=filter(lambda p: p.requires_grad, self.base_network.parameters()),
lr=learning_rate)
self.fc_optimizer = torch.optim.Adam(
params=filter(lambda p: p.requires_grad, self.fc_network.parameters()),
lr=learning_rate, weight_decay=1e-5)
for i in range(epoch):
print()
self._train(self.train_loader)
dev_class_loss, dev_class_output,dev_class_accuarcy, _ = self._test(self.dev_loader)
print('Testing epoch, class loss:{}, class accuracy: {}'
.format(dev_class_loss/len(self.dev_loader), dev_class_accuarcy))
torch.save(self.state_dict(), os.path.join("./", 'hardtriplets_margin1_' + str(k) + '.pth'))
duration = datetime.now() - start_time
print('Finish training epoch {}, dev class loss: {}, time used: {}'
.format(self.epoch, dev_class_loss/len(self.dev_loader), duration))
def test(self):
# Test and save the result
state_dict = torch.load(os.path.join("./", 'hardtriplets_margin1_' + str(k) + '.pth'))
self.load_state_dict(state_dict)
test_class_loss, test_class_output, test_class_accuarcy, _ = self._test(self.test_loader)
test_predict_prob = self.inference(test_class_output)
print(test_class_accuarcy)
return test_class_output.cpu().numpy(), test_feature.cpu().numpy()
model = Similarity_AU_model()
model.train()
# Evaluate on Test Set
#model = Similarity_AU_model()
#model.load_state_dict(torch.load('./model_relabeled/tripletloss/happiness_model_forcing_similarity_by_Adjusted_AU_triplet_100.pth'))
trans = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
face_names = []
scores = []
preds = []
true_labels = []
true_gender_labels = []
for index, row in test_frame.iterrows():
if index % 200 == 0:
print(index)
image_name = row['ImageName']
image = dlib.load_rgb_image(image_name)
image = trans(image)
image = image.view(1, 3, 224, 224)
image = image.to(device)
outputs = model.public_test(image)
outputs = outputs.cpu().detach().numpy()
outputs = np.squeeze(outputs)
score = outputs * 1
#score = np.exp(outputs) / np.sum(np.exp(outputs))
#pred = (score>=0.5)*1
pred = np.argmax(score)
face_names.append(image_name)
scores.append(score)
preds.append(pred)
true_labels.append(row['Expression'])
true_gender_labels.append(row['gender_preds'])
test_result = pd.DataFrame(list(zip(face_names, scores, preds, true_labels, true_gender_labels)),
columns = ['ImageName', 'ExpressionScore', 'Prediction', 'Expression', 'Gender'])
test_result.head(10)
# Accuracy
test_result['CorrectOrNot'] = (test_result.Prediction == test_result.Expression)
dict_row = {}
dict_row['model_random_state'] = k
dict_row['test_accuracy'] = test_result.CorrectOrNot.mean()
dict_row['test_male_true_proportion'] = (test_result[test_result.Gender == "Male"].Expression==1).mean()
dict_row['test_female_true_proportion'] = (test_result[test_result.Gender == "Female"].Expression==1).mean()
dict_row['test_male_predicted_proportion'] = (test_result[test_result.Gender == "Male"].Prediction==1).mean()
dict_row['test_female_predicted_proportion'] = (test_result[test_result.Gender == "Female"].Prediction==1).mean()
dict_row['test_male_average_score'] = test_result[test_result.Gender == "Male"].ExpressionScore.apply(lambda x: x[1]).mean()
dict_row['test_female_average_score'] = test_result[test_result.Gender == "Female"].ExpressionScore.apply(lambda x: x[1]).mean()
# Apply on Chicago Face
chicago_df = pd.read_csv('ChicagoFace_selected_evaluation_set2.csv')
chicago_df.head()
def expression_string_to_num(s):
if s == "Happy":
return 3
elif s == "Angry":
return 0
elif s == "Fear":
return 2
else:
return 6
chicago_df["Expression_num"] = chicago_df.Expression.apply(expression_string_to_num)
chicago_df["happy"] = chicago_df.Expression_num.apply(lambda x: 1 if x == 3 else 0)
#model = Similarity_AU_model()
#model.load_state_dict(torch.load('./model_relabeled/tripletloss/happiness_model_forcing_similarity_by_Adjusted_AU_triplet_100.pth'))
trans = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
face_names = []
scores = []
preds = []
true_labels = []
for index, row in chicago_df.iterrows():
#if index >= 100: break
if index % 100 == 0:
print(index)
image_name = row['ImageName']
image = dlib.load_rgb_image(image_name)
image = trans(image)
image = image.view(1, 3, 224, 224)
image = image.to(device)
outputs = model.public_test(image)
outputs = outputs.cpu().detach().numpy()
outputs = np.squeeze(outputs)
score = outputs * 1
#score = np.exp(outputs) / np.sum(np.exp(outputs))
#pred = (score>=0.5)*1
pred = np.argmax(score)
face_names.append(image_name)
scores.append(score)
preds.append(pred)
true_labels.append(row['happy'])
chicago_result = pd.DataFrame([face_names, true_labels, preds, scores]).T
chicago_result.columns = ['ImageName', 'Expression_num', 'expression_preds', 'expression_scores']
chicago_result["happiness_score"] = chicago_result.expression_scores.apply(lambda x: x[1])
num_happy = (chicago_df.happy == 1).sum()
threshold = chicago_result.sort_values(by = ['happiness_score'], ascending = False).reset_index(drop = True).iloc[num_happy]['happiness_score']
chicago_result['expression_preds_relabeled'] = chicago_result.happiness_score.apply(lambda x : 1 if x > threshold else 0)
chicago_result['CorrectOrNot'] = (chicago_result.expression_preds_relabeled == chicago_result.Expression_num)
dict_row['selected_2_chicago_accuracy'] = chicago_result.CorrectOrNot.mean()
chicago_df_merged = pd.merge(chicago_df, chicago_result, on = ['ImageName'], how = 'left')
chicago_df_merged.columns = ['ImageName', 'Gender', 'Race', 'Expression', 'AU06_r_c', 'AU12_r_c',
'AU', 'Expression_num_6','happy', 'Expression_num', 'expression_preds', 'expression_scores',
'happiness_score','expression_preds_relabeled', 'CorrectOrNot']
chicago_df_merged.head()
pd.crosstab(chicago_result.Expression_num, chicago_result.expression_preds_relabeled)
chicago_male_frame = chicago_df_merged.loc[chicago_df_merged['Gender'] == 'M']
chicago_female_frame = chicago_df_merged.loc[chicago_df_merged['Gender'] == 'F']
# Accuracy between males and females
dict_row['selected_2_chicago_accuracy_male'] = (chicago_male_frame.expression_preds_relabeled == chicago_male_frame.Expression_num).mean()
dict_row['selected_2_chicago_accuracy_female'] = (chicago_female_frame.expression_preds_relabeled == chicago_female_frame.Expression_num).mean()
# True proportion
dict_row['selected_2_chicago_true_proportion'] = chicago_male_frame.happy.mean()
# Prediction proportion
dict_row['selected_2_chicago_male_predicted_proportion'] = chicago_male_frame.expression_preds_relabeled.mean()
dict_row['selected_2_chicago_female_predicted_proportion'] = chicago_female_frame.expression_preds_relabeled.mean()
dict_row['selected_2_chicago_bias'] = chicago_female_frame.expression_preds_relabeled.mean() - chicago_male_frame.expression_preds_relabeled.mean()
# Prediction proportion
dict_row['selected_2_chicago_male_predicted_proportion_raw'] = chicago_male_frame.expression_preds.mean()
dict_row['selected_2_chicago_female_predicted_proportion_raw'] = chicago_female_frame.expression_preds.mean()
dict_row['selected_2_chicago_bias_raw'] = chicago_female_frame.expression_preds.mean() - chicago_male_frame.expression_preds.mean()
# Average Happiness Score
dict_row['selected_2_chicago_male_score'] = chicago_male_frame.happiness_score.mean()
dict_row['selected_2_chicago_female_score'] = chicago_female_frame.happiness_score.mean()
# Average Happiness Score among neutral faces
dict_row['selected_2_chicago_male_score_neutral_faces'] = chicago_male_frame[chicago_male_frame.Expression_num_6 == 6].happiness_score.mean()
dict_row['selected_2_chicago_female_score_neutral_faces'] = chicago_female_frame[chicago_female_frame.Expression_num_6 == 6].happiness_score.mean()
result_rows_list.append(dict_row)
results_df = pd.DataFrame(result_rows_list)
print(results_df)
results_df.to_csv('model_evaluation_result.csv', index = False)
del model
torch.cuda.empty_cache()
| auc_fer/Table 4 - ResNet50 Happy AUC-FER.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### lecture Feb 4, 2021
# - Data cleaning with pandas, part 2
# - no quiz this weekend
# - no class next Thursday (Feb 11)
# - midterm available next Thu and next Sunday (Feb 14)
# #### Today
# - merging datasets
# - dataframe aggregation, you want to find the mean, median, max, min...
# - kaggle to read some notebooks.
pwd
import pandas as pd
import numpy as np
flights = pd.read_csv("flights.csv")
flights.info() # 19 variables, sample size = 336776 (number of rows)
# (a) Find all flights that
# (i) Had an arrival delay of two or more hours
# (“arr_delay” variable is measured in minutes)
# (ii) Flew to Houston (IAH or HOU)
# (“dest" is either “IAH” or “HOU”.)
# (iii) Were operated by United or Delta (You also need the dataset “airlines.csv”)
# a(i)
flights['flight'][flights["arr_delay"]>120]
# a(ii) flew to houston
flights["flight"][(flights["dest"]=="IAH")|(flights["dest"]=="HOU")]
# a(iii) Were operated by United or Delta
flights['flight'][(flights['carrier']=='UA')| (flights['carrier']=="DL")]
# (b) How many flights have a missing 'dep_time'? What other variables are missing? What might these rows represent?
flights['dep_time'].isnull().sum()
flights.isnull().any()
# (c) Sort flights to find
# (i) the most delayed flights. (use “dep_delay”)
# (ii) the fastest flights. (you need to calculate “speed” = “distance”/ “air_time” )
#
#
flights.sort_values("dep_delay", ascending=False, inplace=True)
flights.iloc[0] # show the information of the first row.
flights["speed"] = flights["distance"]/flights["air_time"]
flights.sort_values("speed", ascending=False, inplace=True)
flights.iloc[0]
flights["speed"].isnull().sum()
# (d) Currently dep_time is convenient to look at, but hard to compute with because they’re not really continuous numbers. Convert them to a more convenient representation of number of minutes since midnight.
# (E.g. for “dep_time”, 1504 represents 15:04 (or 3:04 PM). You should convert it to 904, which means 904 minutes after midnight. Be careful, midnight can be represented by 2400, which should be converted to 0”)
#
flights["dep_time_min"] = flights["dep_time"]//100*60 + flights['dep_time']%100
# 1705//100 = 17
# 1850//100 = 18
flights["dep_time_min"] = flights["dep_time_min"] % 1400
# 1400 is 24 hours after midnight, you have to convert it to 0
flights[["dep_time","dep_time_min"]]
# (e) Which carrier has the worst delays? (find the average “arr_delay” for each carrier)
# (f) Find all destinations that are flown by at least two carriers.
#
## you want to group the flights into carriers, you want to find the average arr_delay for each group.
delay = flights.groupby("carrier")["arr_delay"].mean()
delay
## Find all destinations that are flown by at least two carriers.
# find the number of unique carriers for each destination,
## group all the fights into different destinatioins
flights.groupby("dest")["carrier"].nunique()
# 1. merging datasets
# 2. dataframe aggregation: mean, max, min, median
# 3. kaggle notebook
# (f) group the "basic.4y", "basic.9y", and "basic.6y" categories together and call them "basic". Then do the value count for "education" again.
# - recoding
banking = pd.read_csv("Banking_Marketing.csv")
banking["education"].value_counts()
education_newgroup = {"education":{"basic.9y":"basic","basic.4y":"basic",
"basic.6y":"basic", "Basic":"basic"}}
banking.replace(education_newgroup, inplace=True)
banking["education"].value_counts()
# ### merging datasets
ser1 = pd.Series(["A","B","C"], index = [1,2,3])
ser2 = pd.Series(["E","F","G"], index = [4,5,6])
pd.concat([ser1, ser2]) # one series
ser1 = pd.Series(["A","B","C"], index = [1,2,3])
ser2 = pd.Series(["E","F","G"], index = [1,2,3])
pd.concat([ser1, ser2]) # one series, use pd.concat
df1 = pd.DataFrame(np.random.randint(0,11,size=(2,3)))
df1.columns=["A", "B","C"]
df1
df2 = pd.DataFrame(np.random.randint(0,11,size=(2,3)))
df2.columns=["B","C","D"]
df2
pd.concat([df1,df1])
## pd.merge()
df3 = pd.DataFrame({"employee":["Bob","Jake","Lisa","Sue"],
'group':["Accounting","Engineering","Engineering","HR"]})
df3
df4 = pd.DataFrame({"name":["Jake","Lisa","Sue","Bob"],
"hire_date":[2004,2008,2012,2014]})
df4
## column for doing the merge: "employee"
pd.merge(df3,df4, left_on="employee", right_on="name")
## inner merge (intersection) vs outer merge (union)
df6 = pd.DataFrame({"name":["Peter","Paul","Mary"],
"food":["fish","beans","bread"]})
df6
df7 = pd.DataFrame({"name":["Mary","Joseph"],
"drink":["wine","Coke"]})
df7
## inner merge (intersection)
pd.merge(df6,df7, on="name", how="inner")
## outer merge (union)
pd.merge(df6,df7, on="name", how="outer")
# 5. Create the following 4 dataframes.
# df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
# 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
# df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
# 'hire_date': [2004, 2008, 2012, 2014]})
# df3 = pd.DataFrame({'group': ['Accounting', 'Engineering', 'HR'],
# 'supervisor': ['Carly', 'Guido', 'Steve']})
# df4 = pd.DataFrame({'name': ['Bob', 'Jake', 'Lisa', 'Sue'],
# 'salary': [70000, 80000, 120000, 90000]})
#
# - merge the four dataframes
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
df3 = pd.DataFrame({'group': ['Accounting', 'Engineering', 'HR'],
'supervisor': ['Carly', 'Guido', 'Steve']})
df4 = pd.DataFrame({'name': ['Bob', 'Jake', 'Lisa', 'Sue'],
'salary': [70000, 80000, 120000, 90000]})
df1
df2
df_merged1 = pd.merge(df1,df2)
df_merged1
df3
df_merged2 = df_merged1.merge(df3, on='group') # df_merged1 is the primary dataframe
df_merged2 # one to many merge
df4
df_final = df_merged2.merge(df4, left_on="employee", right_on="name")
df_final.drop("name", axis=1)
del df_final["name"] # this change is permanent
df_final
# ### finally, we look at aggregation.
# - max, mean, median
import seaborn as sns # seaborn is for plotting graphs, seaborn consists of a lot of dataframes.
planets = sns.load_dataset("planets")
planets.info() # 6 columns, sample size= 1035
planets["method"].value_counts()
planets.head()
planets["year"].max()
planets["year"].min()
planets["orbital_period"].mean()
planets.describe() ## we have missing values for orbital period
### we want to group the planets into "method",
## find the average value of "orbital period" for each method
planets.groupby("method")["orbital_period"].mean()
planets.head()
# I want to create a column "decade", it will show the decade, when the planet was found
planets["decade"] = planets["year"] - planets["year"]%10
## planets["deace"] = planets["year"]//10 * 10
planets
## for each decade, how many planets were found by each method?
planets.groupby(["method","decade"])['number'].sum().unstack()
# https://www.kaggle.com/pawanbhandarkar/covid-19-eda-man-vs-disease
# https://www.kaggle.com/sayooj98/simple-eda-with-python
# https://www.kaggle.com/kasrasadeghianpoor/student-performance-in-exams-data-visualization
# https://www.kaggle.com/morchhlay22/nba-data
# https://www.kaggle.com/rikdifos/nba-players-salary-prediction
# ### Assignment 4B
# (a) read in the dataset with pd.read_csv. Show the first 8 records, what’s wrong with the process of importing the dataset.
df = pd.read_csv("adult_income_data.csv", header=None)
df.head(8)
# the first row in not a header, the first row is a sample
# we don't have names of the columns
# +
names=[]
with open("adult_income_names.txt","r") as f: # "r", read only
for line in f:
f.readline()
var = line.split(":")[0]
names.append(var)
names
df.columns= names
df
# -
# (i) group by "occupation", show the median age of each group.
df.groupby("occupation")['age'].median()
| lectures/lecture Feb 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ml
# language: python
# name: ml
# ---
# +
#default_exp hybrid
# +
#export
import os
import pickle
import attr
import pandas as pd
import scipy
import numpy as np
from game_recommender import steam_data, content_based, user_based, evaluation
# -
# %cd ..
game_dataset = steam_data.get_steam_ratings_dataset()
user_based_recommender = user_based.UserBasedRecommender.make_from_steam_ratings('log_hours')
content_based_recommender = content_based.ContentBasedRecommender.make_from_steam_metadata()
steam_df = steam_data.load_steam_df()
chosen_games_substring = 'counter strike'
chosen_games_df = steam_data.get_games_by_name(steam_df, chosen_games_substring)
user_item_df = game_dataset.get_user_item_df()
i = 10
user_id = user_based_recommender.user_similarity_searcher.df.iloc[10,0]
user_ratings_raw = user_item_df.iloc[10]
user_ratings = user_ratings_raw[~user_ratings_raw.isna()]
content_based_recommender.recommend_similar_games(
user_ratings,
n_recommended=50,
n_similar=10
)
user_based_recommender.recommend_games_from_similar_users(user_ratings=user_ratings, n_recommended=50, n_similar=10)
# +
#export
@attr.s
class HybridRecommender:
content_based_recommender: content_based.ContentBasedRecommender = attr.ib()
user_based_recommender: user_based.UserBasedRecommender = attr.ib()
def get_weighted_recommendations(
self,
user_ratings,
n_recommended,
user_recommendation_weight,
content_recommendation_weight,
mean_content_recommendation_weight,
n_similar_users=10,
n_similar_items=10):
(
user_based_recommendations,
content_based_recommendations,
mean_content_based_recommendations
) = self.get_recommendation_groups(
user_ratings,
n_recommended,
n_similar_users,
n_similar_items
)
return self.weigh_recommendations(
[
user_recommendation_weight,
content_recommendation_weight,
mean_content_recommendation_weight
],
[
user_based_recommendations,
content_based_recommendations,
mean_content_based_recommendations
],
n_recommended
)
@classmethod
def weigh_recommendations(
cls,
recommendation_groups,
weights,
n_recommended):
all_recommendations = pd.concat([
weight * recommendations for (weight, recommendations) in zip(weights, recommendation_groups)
])
deduplicated_recommendations = all_recommendations.groupby(all_recommendations.index).agg('mean')
best_recommendations_indices = deduplicated_recommendations.argsort()[::-1]
return deduplicated_recommendations[best_recommendations_indices[:n_recommended]]
def get_recommendation_groups(
self,
user_ratings,
n_recommended,
n_similar_users=10,
n_similar_items=10):
user_based_recommendations = self.user_based_recommender.recommend_games_from_similar_users(
user_ratings=user_ratings,
n_recommended=n_recommended,
n_similar=n_similar_users
)
content_based_recommendations = self.content_based_recommender.recommend_similar_games(
user_ratings=user_ratings,
n_similar=n_similar_items,
n_recommended=n_recommended
)
mean_content_based_recommendations = self.content_based_recommender.recommend_mean_similar_games(
user_ratings=user_ratings,
n_similar=n_similar_items,
n_recommended=n_recommended
)
return (
user_based_recommendations,
content_based_recommendations,
mean_content_based_recommendations
)
# +
recommender = HybridRecommender(content_based_recommender, user_based_recommender)
recommendations = recommender.get_weighted_recommendations(user_ratings, 20, user_recommendation_weight=1, content_recommendation_weight=1, mean_content_recommendation_weight=1)
# -
evaluation.metrics.get_recall_at(user_ratings, recommendations)
evaluation.metrics.get_correlation(user_ratings, recommendations, method=scipy.stats.kendalltau)
| notebooks/Hybrid Recommendations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import matplotlib.pyplot as plt
import cv2
import numpy as np
import json
import glob
cocoGt = COCO("test.json")
from utils import binary_mask_to_rle
coco_dt = []
count=0
for imgid in cocoGt.imgs:
#print(imgid)
image = cv2.imread("../vocdata/test/" + coco.loadImgs(ids=imgid)[0]['file_name'])[:,:,::-1] # load image
count=count+1
break
print(count)
| samples/coco/evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install --user virtualenv
# !pip install --upgrade pip
# !pip install -r binder/requirements.txt
import scipy
import numpy
import matplotlib
import pandas
from sklearn import model_selection
import pandas as pd
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import pyplot
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import seaborn as sns
iris = sns.load_dataset('iris')
iris.shape #grabs dimensions of the dataset
#peeking at the data
iris.head() #shows all rows
#iris.tail(3) #last three rows, also allows for "peeking" at the data
iris.describe() #quick stat summary of data
iris.groupby('sepal_length').size()
iris.groupby('sepal_width').size()
iris.groupby('petal_length').size()
iris.groupby('petal_width').size()
iris.groupby('species').size()
iris.plot(kind = 'bar',sharex=False, sharey=False, subplots=True, layout=(10,10))
iris.plot(kind='box', sharex=False, sharey=False, subplots= True, layout=(2,2) )
sns.boxplot(data=iris)
iris.hist()
# +
#splitting our data into training & testing data
array = iris.values #sets the dataset into a 2d array
X = array[:,0:4] #grabs the 4 columns of data
Y = array[:,4] #the data we are predicting, the last column, species
#validation_size = 0.20 #using 20% of the data set to validate
#plitting data into training and validation
#X's = the features for input for the model
#Y's = expected outcomes
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=0.2, random_state = 1)
print(X_train.shape, Y_train.shape)
print(X_validation.shape, Y_validation.shape)
# +
#LR: logistic regression, LDA: Linear Discriminant Analysis, KNN: K-nearest neighbors
#CART: classification and Regression Trees, NB: Gaussian Naive Bayes, SVM: Support vector Machines
models = []
models.append(('LR', sklearn.linear_model.LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# -
pyplot.boxplot(results, labels=names)
pyplot.title('Algorithm Comparison')
pyplot.show()
model = SVC(gamma='auto')
model.fit(X_train, Y_train) #fit the training algs
predictions = model.predict(X_validation) #make a prediction based of X validations
print(accuracy_score(Y_validation, predictions)) #provide an accuracy score based on similarity
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| Iriscasepractice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <hr>
# 第一章較基礎不再列出<br>
# 以下直接自第二章起始
# <hr>
# #### about PEP
#
# - fullname: *Python Enhancement Proposal*
#
# - The PEP process allows anyone to submit a PEP <br>with a technical specification of the feature and a rationable to defend its usefulness.
#
# - After a discussion on the Python mailing lists and possibly some improvements,<br> the BDFL <small>(*<NAME>*)</small> will make a decision to accept or reject the proposal.
# #### Beautiful is better than ugly.
# +
# What does these functions do?
# [10,20,30] | 20 --> 10,30
# [1,2,6,4] | 3 --> 1,2,4
def ftm(items,modulo):
output = []
for i in range(len(items)):
if items[i] % modulo == 0:
output.append(items[i])
return output
def ftm_gr(items,modulo):
for item in items:
if item % modulo == 0:
yield item
ftm([1,2,3,4],2)
ftm_gr([1,2,3,4],2) # list() is required
# -
# <p></p>
# <hr>
#
# <p>If we need to store a bit of data, what options do we I have?</p>
#
# | Type | Option |
# | --- | --- |
# | Full database Server | MySQL, PostgreSQL |
# | Simple file system database | SQLite, AnyDBM |
# | Flat file storage | CSV |
# | Structured storage | JSON, YAML, XML |
# | Serialized Python | Pickle, Marshal |
#
# <hr>
# <p></p>
# #### Errors should never pass silently.
# +
import logging
# WARNNING!!
try:
# value = int(input())
except Exception as e:
logging.warning("Uncaught exception %r",e)
# -
# #### Comparsions between value and identity
# +
a = 200 + 56
b = 256
c = 200 + 57
d = 257
print("{!r} == {!r}: {!r}".format(a,b,a==b))
print("{!r} is {!r}: {!r}".format(a,b,a is b))
print()
print("{!r} == {!r}: {!r}".format(c,d, c == d))
print("{!r} is {!r}: {!r}".format(c,d, c is d))
# +
spam = range(100)
eggs = range(100)
# same operation
spam is eggs
id(spam) == id(eggs)
# -
# #### Scope
# +
import logging
# 此處僅列出從前 未遇到過的案例
spam = 1
def eggs():
spam = spam + 1 # the local 'spam' doesn't exist, yet.
print(spam)
try:
eggs()
except Exception as e:
logging.warning(e)
# # Why?
# the spam in eggs cannot use the outer spam
# outer spam is 'Global' scope
# inner spam in eggs is 'Local' scope
# How to solve it?
# type 'global spam',
# telling py you wanna use the 'spam' in global scope
# like this
def eggs_fix():
global spam # using the outer 'spam'
spam = spam + 1
print(spam)
eggs_fix()
# -
# #### Modifying while iterating
# +
_dict = {'name':'alex'}
try:
for i in _dict:
del _dict[i]
except RuntimeError:
for i in list(_dict): # this line creates a copy of the keys
del _dict[i]
# -
# #### Late binding - be careful with closures
# +
eggs = [ lambda a: i*a for i in range(3) ]
for egg in eggs:
print(egg(5)) # 5*2, 5*2, 5*2
# +
import functools
eggs = [ functools.partial(lambda i, a: i*a, i) for i in range(3) ]
for egg in eggs:
print(egg(5))
| Chapter_01/Part 01 - Pitfalls and Style Guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Descriptive Statistics
# 1. Descriptive Statistics and Graphs
# 2. Number of Tweets (Total)
# 3. Number of Tweets (Time Series)
# 4. Gender Distribution
# 5. Language Distribution
# 6. Follower Counts
# 7. Client Usage (Android, iPhone, web etc.)
# # Jupyter Notebook Style
# Let's make this thing look nice.
from IPython.core.display import HTML
styles = open("../css/custom.css", "r").read()
HTML(styles)
import pandas as pd
import numpy as np
# # Read .tsv File with Tweets
# Path to the tsv file where previously fetched tweets are.
tsvpath = '/Users/rcn/Desktop/twitter-analysis/data/tweets.tsv'
twitterData=pd.read_table(tsvpath,
encoding='utf-8',
na_values=['NaN',''],
parse_dates=[1]
)
# Read in TSV and turn off NaN catching to leave in unrecognised genders
twitterData.head()
twitterData.dtypes
# ## All Tweets
# ### Number of Tweets
nTweets = len(twitterData.index)
print "There are", nTweets, "tweets in the full dataset"
# ### Number of Tweets Over Time
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
# %matplotlib inline
twitterData.plot()
# ### Documents by Twitter Language
nLanguage = twitterData.Language.value_counts(sort=True, ascending=False, bins=None)
nLanguage[0:10]
nLanguage = twitterData.Language.value_counts(normalize=True, sort=True, ascending=False, bins=None)
nLanguage[0:10]
# ### Number of Documents by Location
nLocation = twitterData['User Location'].value_counts(normalize=False, sort=True, ascending=False, bins=None)
nLocation[0:15]
# ### Number of Documents by UNGP Location
# Getting Vincent ready
vincent.initialize_notebook()
gpBlue='#00aeef'
gpLightGray='#96999b'
gpDarkBlue='#00447c'
gpRed='#cf5c42'
gpBrown='#e1d8ad'
gpPink='#f4d5e3'
gpLightBlue='#e1f4fd'
location_grouped = twitterData.groupby('UNGPLocation')
mean_location_grouped = location_grouped.mean().dropna()
mean_followers = mean_location_grouped.sort('Followers')['Followers']
followersBar = vincent.Bar(mean_followers)
followersBar.axis_titles(x='Country', y='Followers')
from vincent.axes import AxisProperties
from vincent.properties import PropertySet
from vincent.values import ValueRef
for axis in followersBar.axes:
axis.properties = AxisProperties()
for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks']:
setattr(axis.properties, prop, PropertySet(stroke=ValueRef(value=gpLightGray)))
axis.properties.title = PropertySet(font_size=ValueRef(value=20),
fill=ValueRef(value=gpLightGray))
axis.properties.labels = PropertySet(fill=ValueRef(value=gpLightGray))
followersBar.axes[0].properties.labels.angle = ValueRef(value=0)
followersBar.axes[0].properties.labels.align = ValueRef(value='center')
followersBar.axes[0].properties.title.dy = ValueRef(value=20)
followersBar.scales[2].range = [gpBlue]
followersBar.to_json('../charts/followersBar.json')
followersBar
location_grouped = twitterData.groupby('UNGPLocation')
mean_location_grouped = location_grouped.mean().dropna()
mean_friends = mean_location_grouped.sort('Friends')['Friends']
friendsBar = vincent.Bar(mean_friends)
friendsBar.axis_titles(x='Country', y='Friends')
for axis in friendsBar.axes:
axis.properties = AxisProperties()
for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks']:
setattr(axis.properties, prop, PropertySet(stroke=ValueRef(value=gpLightGray)))
axis.properties.title = PropertySet(font_size=ValueRef(value=20),
fill=ValueRef(value=gpLightGray))
axis.properties.labels = PropertySet(fill=ValueRef(value=gpLightGray))
friendsBar.axes[0].properties.labels.angle = ValueRef(value=0)
friendsBar.axes[0].properties.labels.align = ValueRef(value='center')
friendsBar.axes[0].properties.title.dy = ValueRef(value=20)
friendsBar.scales[2].range = [gpDarkBlue]
friendsBar.to_json('../charts/friendsBar.json')
friendsBar
location_grouped = twitterData.groupby('UNGPLocation')
mean_location_grouped = location_grouped.mean().dropna()
mean_genderProb = mean_location_grouped.sort('UNGPGenderProb')['UNGPGenderProb']
genderProb = vincent.Bar(mean_genderProb)
genderProb.axis_titles(x='Country', y='Average Gender Probablility')
for axis in genderProb.axes:
axis.properties = AxisProperties()
for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks']:
setattr(axis.properties, prop, PropertySet(stroke=ValueRef(value=gpLightGray)))
axis.properties.title = PropertySet(font_size=ValueRef(value=20),
fill=ValueRef(value=gpLightGray))
axis.properties.labels = PropertySet(fill=ValueRef(value=gpLightGray))
genderProb.axes[0].properties.labels.angle = ValueRef(value=0)
genderProb.axes[0].properties.labels.align = ValueRef(value='center')
genderProb.axes[0].properties.title.dy = ValueRef(value=20)
genderProb.scales[2].range = [gpRed]
genderProb.to_json('../charts/genderProbBar.json')
genderProb
mpld3.enable_notebook()
gatesCountry = twitterData.UNGPLocation.value_counts(normalize=False, sort=True, ascending=False, bins=None)
gatesCountryFig = gatesCountry.plot(kind='barh', color='#00aeef')
mpld3.display()
import ggplot as gg
(ggplot(gg.aes(x='UNGPLocation'), data=twitterData)
+ gg.geom_bar() + gg.ggtitle("Gates Tweets")
+ gg.labs("Country", "Number of tweets"))
languagePlot = ggplot(aes(x='DataSiftLanguage'), data=twitterData) + geom_bar() + ggtitle("Language Distribution") + labs("Language", "Number of tweets")
languagePlot
from IPython.core.display import HTML
styles = open("../css/custom.css", "r").read()
HTML(styles)
| ipynb/03b Descriptive Statistics - TSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reading and Writing Data with Spark
#
# This notebook contains the code from the previous screencast. The only difference is that instead of reading in a dataset from a remote cluster, the data set is read in from a local file. You can see the file by clicking on the "jupyter" icon and opening the folder titled "data".
#
# Run the code cell to see how everything works.
#
# First let's import SparkConf and SparkSession
import pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
# Since we're using Spark locally we already have both a sparkcontext and a sparksession running. We can update some of the parameters, such our application's name. Let's just call it "Our first Python Spark SQL example"
spark = SparkSession \
.builder \
.appName("Our first Python Spark SQL example") \
.getOrCreate()
# Let's check if the change went through
spark.sparkContext.getConf().getAll()
spark
# As you can see the app name is exactly how we set it
#
# Let's create our first dataframe from a fairly small sample data set. Througout the course we'll work with a log file data set that describes user interactions with a music streaming service. The records describe events such as logging in to the site, visiting a page, listening to the next song, seeing an ad.
path = "data/sparkify_log_small.json"
user_log = spark.read.json(path)
user_log.printSchema()
user_log.describe()
user_log.show(n=1)
user_log.take(5)
out_path = "data/sparkify_log_small.csv"
user_log.write.save(out_path, format="csv", header=True)
user_log_2 = spark.read.csv(out_path, header=True)
user_log_2.printSchema()
user_log_2.take(2)
user_log_2.select("userID","firstName","lastName").show(4)
user_log_2.take(1)
| DEND/DataLakes/practice/3_data_inputs_and_outputs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cleaning Data
#
# **Prerequisites**
#
# - [Intro](https://datascience.quantecon.org/intro.html)
# - [Boolean selection](https://datascience.quantecon.org/basics.html)
# - [Indexing](https://datascience.quantecon.org/the_index.html)
#
#
# **Outcomes**
#
# - Be able to use string methods to clean data that comes as a string
# - Be able to drop missing data
# - Use cleaning methods to prepare and analyze a real dataset
#
#
# **Data**
#
# - Item information from about 3,000 Chipotle meals from about 1,800
# Grubhub orders
# + hide-output=false
# Uncomment following line to install on colab
# ! pip install qeds
# + hide-output=false
import pandas as pd
import numpy as np
import qeds
# -
# ## Outline
#
# - [Cleaning Data](#Cleaning-Data)
# - [Cleaning Data](#Cleaning-Data)
# - [String Methods](#String-Methods)
# - [Type Conversions](#Type-Conversions)
# - [Missing Data](#Missing-Data)
# - [Case Study](#Case-Study)
# - [Appendix: Performance of `.str` Methods](#Appendix:-Performance-of-`.str`-Methods)
# - [Exercises](#Exercises)
# ## Cleaning Data
#
# For many data projects, a [significant proportion of
# time](https://www.forbes.com/sites/gilpress/2016/03/23/data-preparation-most-time-consuming-least-enjoyable-data-science-task-survey-says/#74d447456f63)
# is spent collecting and cleaning the data — not performing the analysis.
#
# This non-analysis work is often called “data cleaning”.
#
# pandas provides very powerful data cleaning tools, which we
# will demonstrate using the following dataset.
# + hide-output=false
df = pd.DataFrame({"numbers": ["#23", "#24", "#18", "#14", "#12", "#10", "#35"],
"nums": ["23", "24", "18", "14", np.nan, "XYZ", "35"],
"colors": ["green", "red", "yellow", "orange", "purple", "blue", "pink"],
"other_column": [0, 1, 0, 2, 1, 0, 2]})
df
# -
# What would happen if we wanted to try and compute the mean of
# `numbers`?
# + [markdown] hide-output=false
# ```python
# df["numbers"].mean()
# ```
#
# -
# It throws an error!
#
# Can you figure out why?
#
# Hint: When looking at error messages, start at the very
# bottom.
#
# The final error says, `TypeError: Could not convert #23#24... to numeric`.
# ## String Methods
#
# Our solution to the previous exercise was to remove the `#` by using
# the `replace` string method: `int(c2n.replace("#", ""))`.
#
# One way to make this change to every element of a column would be to
# loop through all elements of the column and apply the desired string
# methods…
# + hide-output=false
# %%time
# Iterate over all rows
for row in df.iterrows():
# `iterrows` method produces a tuple with two elements...
# The first element is an index and the second is a Series with the data from that row
index_value, column_values = row
# Apply string method
clean_number = int(column_values["numbers"].replace("#", ""))
# The `at` method is very similar to the `loc` method, but it is specialized
# for accessing single elements at a time... We wanted to use it here to give
# the loop the best chance to beat a faster method which we show you next.
df.at[index_value, "numbers_loop"] = clean_number
# -
# While this is fast for a small dataset like this, this method slows for larger datasets.
#
# One *significantly* faster (and easier) method is to apply a string
# method to an entire column of data.
#
# Most methods that are available to a Python string (we learned a
# few of them in the [strings lecture](https://datascience.quantecon.org/../python_fundamentals/basics.html)) are
# also available to a pandas Series that has `dtype` object.
#
# We access them by doing `s.str.method_name` where `method_name` is
# the name of the method.
#
# When we apply the method to a Series, it is applied to all rows in the
# Series in one shot!
#
# Let’s redo our previous example using a pandas `.str` method.
# + hide-output=false
# %%time
# ~2x faster than loop... However, speed gain increases with size of DataFrame. The
# speedup can be in the ballpark of ~100-500x faster for big DataFrames.
# See appendix at the end of the lecture for an application on a larger DataFrame
df["numbers_str"] = df["numbers"].str.replace("#", "")
# -
# We can use `.str` to access almost any string method that works on
# normal strings. (See the [official
# documentation](https://pandas.pydata.org/pandas-docs/stable/text.html)
# for more information.)
# + hide-output=false
df["colors"].str.contains("p")
# + hide-output=false
df["colors"].str.capitalize()
# -
# ## Type Conversions
#
# In our example above, the `dtype` of the `numbers_str` column shows that pandas still treats
# it as a string even after we have removed the `"#"`.
#
# We need to convert this column to numbers.
#
# The best way to do this is using the `pd.to_numeric` function.
#
# This method attempts to convert whatever is stored in a Series into
# numeric values
#
# For example, after the `"#"` removed, the numbers of column
# `"numbers"` are ready to be converted to actual numbers.
# + hide-output=false
df["numbers_numeric"] = pd.to_numeric(df["numbers_str"])
# + hide-output=false
df.dtypes
# + hide-output=false
df.head()
# -
# We can convert to other types well.
#
# Using the `astype` method, we can convert to any of the supported
# pandas `dtypes` (recall the [intro lecture](https://datascience.quantecon.org/intro.html)).
#
# Below are some examples. (Pay attention to the reported `dtype`)
# + hide-output=false
df["numbers_numeric"].astype(str)
# + hide-output=false
df["numbers_numeric"].astype(float)
# -
# ## Missing Data
#
# Many datasets have missing data.
#
# In our example, we are missing an element from the `"nums"` column.
# + hide-output=false
df
# -
# We can find missing data by using the `isnull` method.
# + hide-output=false
df.isnull()
# -
# We might want to know whether particular rows or columns have any
# missing data.
#
# To do this we can use the `.any` method on the boolean DataFrame
# `df.isnull()`.
# + hide-output=false
df.isnull().any(axis=0)
# + hide-output=false
df.isnull().any(axis=1)
# -
# Many approaches have been developed to deal with missing data, but the two most commonly used (and the corresponding DataFrame method) are:
#
# - Exclusion: Ignore any data that is missing (`.dropna`).
# - Imputation: Compute “predicted” values for the data that is missing
# (`.fillna`).
#
#
# For the advantages and disadvantages of these (and other) approaches,
# consider reading the [Wikipedia
# article](https://en.wikipedia.org/wiki/Missing_data).
#
# For now, let’s see some examples.
# + hide-output=false
# drop all rows containing a missing observation
df.dropna()
# + hide-output=false
# fill the missing values with a specific value
df.fillna(value=100)
# + hide-output=false
# use the _next_ valid observation to fill the missing data
df.fillna(method="bfill")
# + hide-output=false
# use the _previous_ valid observation to fill missing data
df.fillna(method="ffill")
# -
# We will see more examples of dealing with missing data in future
# chapters.
# ## Case Study
#
# We will now use data from an
# [article](https://www.nytimes.com/interactive/2015/02/17/upshot/what-do-people-actually-order-at-chipotle.html)
# written by The Upshot at the NYTimes.
#
# This data has order information from almost 2,000 Chipotle orders and
# includes information on what was ordered and how much it cost.
# + hide-output=false
chipotle = qeds.data.load("chipotle_raw")
chipotle.head()
# -
# ## Appendix: Performance of `.str` Methods
#
# Let’s repeat the “remove the `#`” example from above, but this time on
# a much larger dataset.
# + hide-output=false
import numpy as np
test = pd.DataFrame({"floats": np.round(100*np.random.rand(100000), 2)})
test["strings"] = test["floats"].astype(str) + "%"
test.head()
# + hide-output=false
# %%time
for row in test.iterrows():
index_value, column_values = row
clean_number = column_values["strings"].replace("%", "")
test.at[index_value, "numbers_loop"] = clean_number
# + hide-output=false
# %%time
test["numbers_str_method"] = test["strings"].str.replace("%", "")
# + hide-output=false
test["numbers_str_method"].equals(test["numbers_loop"])
# -
# We got the exact same result in a fraction of the time!
| Session_7/5_data_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # TensorFlow Distributed Training & Inference
#
# For use cases involving large datasets, particularly those where the data is images, it often is necessary to perform distributed training on a cluster of multiple machines. Similarly, when it is time to set up an inference workflow, it also may be necessary to perform highly performant batch inference using a cluster. In this notebook, we'll examine distributed training and distributed inference with TensorFlow in Amazon SageMaker.
#
# The model used for this notebook is a basic Convolutional Neural Network (CNN) based on [the Keras examples](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). We'll train the CNN to classify images using the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), a well-known computer vision dataset. It consists of 60,000 32x32 images belonging to 10 different classes (6,000 images per class). Here is a graphic of the classes in the dataset, as well as 10 random images from each:
#
# 
#
# ## Setup
#
# We'll begin with some necessary imports, and get an Amazon SageMaker session to help perform certain tasks, as well as an IAM role with the necessary permissions.
# +
# %matplotlib inline
import numpy as np
import os
import sagemaker
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
role = get_execution_role()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/DEMO-tf-horovod-inference'
print('Bucket:\n{}'.format(bucket))
# -
# Now we'll run a script that fetches the dataset and converts it to the TFRecord format, which provides several conveniences for training models in TensorFlow.
# !python generate_cifar10_tfrecords.py --data-dir ./data
# For Amazon SageMaker hosted training on a cluster separate from this notebook instance, training data must be stored in Amazon S3, so we'll upload the data to S3 now.
inputs = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-cifar10-tf')
display(inputs)
# ## Distributed training with Parameter Server
#
# A common pattern in distributed training is to use one or more dedicated processes to collect gradients computed by “worker” processes, then aggregate them and distribute the updated gradients back to the workers in an asynchronous manner. These processes are known as parameter servers. In general, they can be run either on their own machines or co-located on the same machines as the workers. In a parameter server cluster, each parameter server communicates with all workers (“all-to-all”). The Amazon SageMaker prebuilt TensorFlow container comes with a built-in option to use parameter servers for distributed training. The container runs a parameter server thread in each training instance.
#
# Once we have a training script, the next step is to set up an Amazon SageMaker TensorFlow Estimator object with the details of the training job. It is very similar to an Estimator for training on a single machine, except we specify a `distributions` parameter to enable starting of parameter server in each training instance.
# +
from sagemaker.tensorflow import TensorFlow
ps_instance_type = 'ml.p3.8xlarge'
ps_instance_count = 2
model_dir = "/opt/ml/model"
distributions = {'parameter_server': {
'enabled': True}
}
hyperparameters = {'epochs': 60, 'batch-size' : 256}
estimator_ps = TensorFlow(base_job_name='dist-cifar10-tf',
source_dir='code',
entry_point='train_ps.py',
role=role,
framework_version='1.12.0',
py_version='py3',
hyperparameters=hyperparameters,
train_instance_count=ps_instance_count,
train_instance_type=ps_instance_type,
model_dir=model_dir,
tags = [{'Key' : 'Project', 'Value' : 'cifar10'},{'Key' : 'TensorBoard', 'Value' : 'dist'}],
distributions=distributions)
# -
# Now we can call the fit method of the Estimator object to start training. After training completes, the tf.keras model will be saved in the SavedModel .pb format so it can be served by a TensorFlow Serving container. Note that the model is only saved by the the master node (disregard any warnings about the model not being saved by all the processes).
remote_inputs = {'train' : inputs+'/train', 'validation' : inputs+'/validation', 'eval' : inputs+'/eval'}
estimator_ps.fit(remote_inputs, wait=True)
# ## Distributed training with Horovod
#
# Sometimes it makes sense to perform training on a single machine. For large datasets, however, it may be necessary to perform distributed training on a cluster of multiple machines. In fact, it may be not only faster but cheaper to do distributed training on several machines rather than one machine. Fortunately, Amazon SageMaker makes it easy to run distributed training without having to manage cluster setup and tear down. Distributed training can be done on a cluster of multiple machines using either parameter servers or Ring-AllReduce with Horovod.
#
# Horovod is an open source distributed training framework for TensorFlow, Keras, PyTorch, and MXNet. It is an alternative to the more "traditional" parameter server method of performing distributed training. In Amazon SageMaker, Horovod is only available with TensorFlow version 1.12 or newer. Only a few lines of code are necessary to use Horovod for distributed training of a Keras model defined by the tf.keras API. For details, see the `train.py` script included with this notebook; the changes primarily relate to:
#
# - importing Horovod.
# - initializing Horovod.
# - configuring GPU options and setting a Keras/tf.session with those options.
#
# Once we have a training script, the next step is to set up an Amazon SageMaker TensorFlow Estimator object with the details of the training job. It is very similar to an Estimator for training on a single machine, except we specify a `distributions` parameter describing Horovod attributes such as the number of process per host, which is set here to the number of GPUs per machine. Beyond these few simple parameters and the few lines of code in the training script, there is nothing else you need to do to use distributed training with Horovod; Amazon SageMaker handles the heavy lifting for you and manages the underlying cluster setup.
# +
from sagemaker.tensorflow import TensorFlow
hvd_instance_type = 'ml.p3.8xlarge'
hvd_processes_per_host = 4
hvd_instance_count = 2
distributions = {'mpi': {
'enabled': True,
'processes_per_host': hvd_processes_per_host
}
}
hyperparameters = {'epochs': 60, 'batch-size' : 256}
estimator_hvd = TensorFlow(base_job_name='dist-cifar10-tf',
source_dir='code',
entry_point='train_hvd.py',
role=role,
framework_version='1.12.0',
py_version='py3',
hyperparameters=hyperparameters,
train_instance_count=hvd_instance_count,
train_instance_type=hvd_instance_type,
tags = [{'Key' : 'Project', 'Value' : 'cifar10'},{'Key' : 'TensorBoard', 'Value' : 'dist'}],
distributions=distributions)
# -
# Now we can call the `fit` method of the Estimator object to start training. After training completes, the tf.keras model will be saved in the SavedModel .pb format so it can be served by a TensorFlow Serving container. Note that the model is only saved by the the master, rank = 0 process (disregard any warnings about the model not being saved by all the processes).
remote_inputs = {'train' : inputs+'/train', 'validation' : inputs+'/validation', 'eval' : inputs+'/eval'}
estimator_hvd.fit(remote_inputs, wait=True)
# ## Model Deployment with Amazon Elastic Inference
#
# Amazon SageMaker provides both real time inference and batch inference. Although we will focus on batch inference below, let's start with a quick overview of setting up an Amazon SageMaker hosted endpoint for real time inference with TensorFlow Serving (TFS). The processes for setting up hosted endpoints and Batch Transform jobs have significant differences. Additionally, we will discuss why and how to use Amazon Elastic Inference with the hosted endpoint.
#
# ### Deploying the Model
#
# When considering the overall cost of a machine learning workload, inference often is the largest part, up to 90% of the total. If a GPU instance type is used for real time inference, it typically is not fully utilized because, unlike training, real time inference does not involve continuously inputting large batches of data to the model. Elastic Inference provides GPU acceleration suited for inference, allowing you to add inference acceleration to a hosted endpoint for a fraction of the cost of using a full GPU instance.
#
# The `deploy` method of the Estimator object creates an endpoint which serves prediction requests in near real time. To utilize Elastic Inference with the SageMaker TFS container, simply provide an `accelerator_type` parameter, which determines the type of accelerator that is attached to your endpoint. Refer to the **Inference Acceleration** section of the [instance types chart](https://aws.amazon.com/sagemaker/pricing/instance-types) for a listing of the supported types of accelerators.
#
# Here we'll use a general purpose CPU compute instance type along with an Elastic Inference accelerator: together they are much cheaper than the smallest P3 GPU instance type.
predictor = estimator_hvd.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
accelerator_type='ml.eia1.medium')
# ### Real time inference
#
# Now that we have a Predictor object wrapping a real time Amazon SageMaker hosted enpoint, we'll define the label names and look at a sample of 10 images, one from each class.
# +
from IPython.display import Image, display
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
images = []
for entry in os.scandir('sample-img'):
if entry.is_file() and entry.name.endswith("png"):
images.append('sample-img/' + entry.name)
for image in images:
display(Image(image))
# -
# Next we'll set up the Predictor object created by the `deploy` method call above. The TFS container in Amazon SageMaker by default uses the TFS REST API, which requires requests in a specific JSON format. However, for many use cases involving image data it is more convenient to have the client application send the image data directly to a real time endpoint for predictions without converting and preprocessing it on the cliet side.
#
# Fortunately, the Amazon SageMaker TFS container provides a data pre/post-processing feature that allows you to simply supply a data transformation script to to accomplish this. We'll discuss this feature more in the Batch Transform section of this notebook. For now, observe in the code cell below that with a preprocessing script in place, we just specify the Predictor's content type as `application/x-image` and override the default serializer, then we can simply provide the raw .png image bytes to the Predictor.
# +
predictor.content_type = 'application/x-image'
predictor.serializer = None
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
def get_prediction(file_path):
with open(file_path, "rb") as image:
f = image.read()
b = bytearray(f)
return labels[np.argmax(predictor.predict(b)['predictions'], axis=1)[0]]
# -
predictions = [get_prediction(image) for image in images]
print(predictions)
# ## Batch Transform with TFS pre/post-processing scripts
#
# If a use case does not require individual predictions in near real-time, an Amazon SageMaker Batch Transform job is likely a better alternative. Although hosted endpoints also can be used for pseudo-batch prediction, the process is more involved than using the alternative Batch Transform, which is designed for large-scale, asynchronous batch inference.
#
# A typical problem in working with batch inference is how to convert data into tensors that can be input to the model. For example, image data in .png or .jpg format cannot be directly input to a model, but rather must be converted first. Additionally, sometimes other preprocessing of the data must be performed, such as resizing. The Amazon SageMaker TFS container provides facilities for doing this efficiently.
# ### Pre/post-postprocessing script
#
# As mentioned above, the TFS container in Amazon SageMaker by default uses the REST API to serve prediction requests. This requires the input data to be converted to JSON format. One way to do this is to create a Docker container to do the conversion, then create an overall Amazon SageMaker model that links the conversion container to the TensorFlow Serving container with the model. This is known as an Amazon SageMaker Inference Pipeline, as demonstrated in another [sample notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/working_with_tfrecords).
#
# However, as a more convenient alternative for many use cases, the Amazon SageMaker TFS container provides a data pre/post-processing script feature that allows you to simply supply a data transformation script. Using such a script, there is no need to build containers or directly work with Docker. The simplest form of a script must only implement an `input_handler` and `output_handler` interface, as shown in the code below, be named `inference.py`, and be placed in a `/code` directory.
# !cat ./code/inference.py
# On the input preprocessing side, the code takes an image read from Amazon S3 and converts it to the required TFS REST API input format. On the output postprocessing side, the script simply passes through the predictions in the standard TFS format without modifying them. Alternatively, we could have just returned a class label for the class with the highest score, or performed other postprocessing that would be helpful to the application consuming the predictions.
# ### Requirements.txt
#
# Besides an `inference.py` script implementing the handler interface, it also may be necessary to supply a `requirements.txt` file to ensure any necessary dependencies are installed in the container along with the script. For this script, in addition to the Python standard libraries we need the Pillow and Numpy libraries.
# !cat ./code/requirements.txt
# ### Create GPU Model
#
# When we deployed the model above to an Amazon SageMaker real time endpoint, we deployed to a CPU-based instance type, along with an attached Elastic Inference accelerator to which parts of the model computation graph are offloaded. Under the hood a CPU-based Amazon SageMaker Model object was created to wrap a CPU-based TFS container. However, for Batch Transform on a large dataset, we would prefer to use full GPU instances. To do this, we need to create another Model object that will utilize a GPU-based TFS container.
# +
import boto3
from sagemaker.tensorflow.serving import Model
from time import gmtime, strftime
client = boto3.client('sagemaker')
model_name = "dist-cifar10-tf-gpu-{}".format(strftime("%d-%H-%M-%S", gmtime()))
estimator = estimator_hvd
tf_serving_model = Model(model_data=estimator.model_data,
role=sagemaker.get_execution_role(),
image=estimator.image_name,
framework_version=estimator.framework_version,
sagemaker_session=estimator.sagemaker_session)
batch_instance_type = 'ml.p3.2xlarge'
tf_serving_container = tf_serving_model.prepare_container_def(batch_instance_type)
model_params = {
"ModelName": model_name,
"Containers": [
tf_serving_container
],
"ExecutionRoleArn": sagemaker.get_execution_role()
}
client.create_model(**model_params)
# -
# ### Run a Batch Transform job
#
# Next, we'll run a Batch Transform job using our data processing script and GPU-based Amazon SageMaker Model. More specifically, we'll perform distributed inference on a cluster of two instances. As an additional optimization, we'll set the `max_concurrent_transforms` parameter of the Transformer object, which controls the maximum number of parallel requests that can be sent to each instance in a transform job.
# +
input_data_path = 's3://sagemaker-sample-data-{}/tensorflow/cifar10/images/png'.format(sagemaker_session.boto_region_name)
output_data_path = 's3://{}/{}/{}'.format(bucket, prefix, 'batch-predictions')
batch_instance_count = 2
concurrency = 100
transformer = sagemaker.transformer.Transformer(
model_name = model_name,
instance_count = batch_instance_count,
instance_type = batch_instance_type,
max_concurrent_transforms = concurrency,
strategy = 'MultiRecord',
output_path = output_data_path,
assemble_with= 'Line',
base_transform_job_name='cifar-10-image-transform',
sagemaker_session=sagemaker_session,
)
transformer.transform(data = input_data_path, content_type = 'application/x-image')
transformer.wait()
# -
# ### Inspect Batch Transform output
#
# Finally, we can inspect the output files of our Batch Transform job to see the predictions. First we'll download the prediction files locally, then extract the predictions from them.
# !aws s3 cp --quiet --recursive $transformer.output_path ./batch_predictions
# +
import json
import re
total = 0
correct = 0
predicted = []
actual = []
for entry in os.scandir('batch_predictions'):
try:
if entry.is_file() and entry.name.endswith("out"):
with open(entry, 'r') as f:
jstr = json.load(f)
results = [float('%.3f'%(item)) for sublist in jstr['predictions'] for item in sublist]
class_index = np.argmax(np.array(results))
predicted_label = labels[class_index]
predicted.append(predicted_label)
actual_label = re.search('([a-zA-Z]+).png.out', entry.name).group(1)
actual.append(actual_label)
is_correct = (predicted_label in entry.name) or False
if is_correct:
correct += 1
total += 1
except Exception as e:
print(e)
continue
# -
# Let's calculate the accuracy of the predictions.
print('Out of {} total images, accurate predictions were returned for {}'.format(total, correct))
accuracy = correct / total
print('Accuracy is {:.1%}'.format(accuracy))
# The accuracy from the batch transform job on 10000 test images never seen during training is fairly close to the accuracy achieved during training on the validation set. This is an indication that the model is not overfitting and should generalize fairly well to other unseen data.
#
# Next we'll plot a confusion matrix, which is a tool for visualizing the performance of a multiclass model. It has entries for all possible combinations of correct and incorrect predictions, and shows how often each one was made by our model. Ours will be row-normalized: each row sums to one, so that entries along the diagonal correspond to recall.
# +
import pandas as pd
import seaborn as sns
confusion_matrix = pd.crosstab(pd.Series(actual), pd.Series(predicted), rownames=['Actuals'], colnames=['Predictions'], normalize='index')
sns.heatmap(confusion_matrix, annot=True, fmt='.2f', cmap="YlGnBu").set_title('Confusion Matrix')
# -
# If our model had 100% accuracy, and therefore 100% recall in every class, then all of the predictions would fall along the diagonal of the confusion matrix. Here our model definitely is not 100% accurate, but manages to achieve good recall for most of the classes, though it performs worse for some classes, such as cats.
# # Extensions
#
# Although we did not demonstrate them in this notebook, Amazon SageMaker provides additional ways to make distributed training more efficient for very large datasets:
# - **VPC training**: performing Horovod training inside a VPC improves the network latency between nodes, leading to higher performance and stability of Horovod training jobs.
#
# - **Pipe Mode**: using [Pipe Mode](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-inputdataconfig) reduces startup and training times. Pipe Mode streams training data from S3 as a Linux FIFO directly to the algorithm, without saving to disk. For a small dataset such as CIFAR-10, Pipe Mode does not provide any advantage, but for very large datasets where training is I/O bound rather than CPU/GPU bound, Pipe Mode can substantially reduce startup and training times.
# # Cleanup
#
# To avoid incurring charges due to a stray endpoint, delete the Amazon SageMaker endpoint if you no longer need it:
sagemaker_session.delete_endpoint(predictor.endpoint)
| tf-distribution-options/tf-distributed-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: drlnd
# language: python
# name: drlnd
# ---
# # Project 3: Collaboration and Competition. Tennis
#
# ---
#
# ### Preliminary actions
# +
import numpy as np
import torch
import matplotlib
import matplotlib.pyplot as plt
from collections import deque
from unityagents import UnityEnvironment
from maddpg_agent import MADDGP_Agent
# -
# **IMPORTANT**: introduce in the following cell, the path to place where you have stored the file of the TENNIS evironment.
# Launching the environment
env = UnityEnvironment(file_name='Tennis.app')
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# +
# Get and select the default brain
brain_names = env.brain_names
print('Number of available brains:',len(brain_names))
print('Name of the brains:',brain_names)
brain_name = brain_names[0] # Get the name of the first brain
brain = env.brains[brain_name] # Initizlize the brain
print(brain)
# +
# Reset the environment
env_data = env.reset(train_mode=True)[brain_name]
# Number of agents in the environment
num_agents = len(env_data.agents)
print('Number of agents:', num_agents)
# size of the action space (per agent)
action_size = brain.vector_action_space_size
print('Size of action space (per agent):', action_size)
# Size fo the state space
states = env_data.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state of any of the agents is made of 8 dimensions x 3 stacked observations = 24 values,')
print('and it looks likes this', states[0])
# -
# ### Training the Agent with MADDPG algorithm
# Hyperparameters definition
hyperparameters = {
'FC1': 190, # Actor/Critic network: nodes at 1st hidden layer
'FC2': 160, # Actor/Critic network: nodes at 2nd hidden layer
'BUFFER_SIZE': int(1e6), # Replay buffer size
'BATCH_SIZE': 256, # Minibatch size
'GAMMA': 0.99, # Discount factor
'TAU': 5e-3, # For soft update of target parameters
'LR_ACTOR': 1e-4, # Learning rate of the actor
'LR_CRITIC': 5e-4, # Learning rate of the critic
'WEIGHT_DECAY': 1e-6, # Critic network L2 weight decay
'UPDATE_EVERY': 1, # Update rate
'N_UPDATES': 1, # Update passes
'MU': 0.0, # Parameter for Ornstein-Uhlenbeck noise
'SIGMA': 0.20, # Parameter for Ornstein-Uhlenbeck noise
'THETA': 0.15, # Parameter for Ornstein-Uhlenbeck noise
'RANDOM_SEED': 4, # Seed for random generation (to allow repetitiveness)
'EPS_INT': 800 # Interval of episodes to decay de noise epsilon (from 1 to 0)
}
# Initializing the MADDPG agent (from maddpg_agent.py file)
agent = MADDGP_Agent(num_agents=num_agents, state_size=state_size, action_size=action_size,
hyperparameters=hyperparameters)
# Defining the training process
def maddpg_training(agent, n_episodes=2000, print_every=100):
# Initialize:
scores_mean_log = [] # list for the log of mean scores
scores_window = deque(maxlen=100) # list of last 100 scores
scores = [] # list containing score for each episode
not_solved = True # not_solved control
solved_episode = 0 # episode for average score over threshold
solved_av_score = 0 # average score when solved
max_av_score = float("-inf") # maximum average score
max_av_score_episode = 0 # episode at maximum average score
for i_episode in range(1, n_episodes+1): # Episode loop
env_data = env.reset(train_mode=True)[brain_name] # Reset environment in TRAINING MODE
states = env_data.vector_observations # Get the first state
agent.reset() # Reset smart agent (= noise process)
score = np.zeros(num_agents) # Initialize score counter
t = 1 # Initialize time step counter
while True: # Trajectory loop
actions = agent.act(states, i_episode,
add_noise=True) # Get actions from policy (one per agent)
env_data = env.step(actions)[brain_name] # Interaction with the environment
next_states = env_data.vector_observations # Get the next state (one per agent)
rewards = env_data.rewards # Get the reward (one per agent)
dones = env_data.local_done # Get the done code (one per agent)
agent.step(states, actions, rewards, next_states, dones) # Agents' process: gather experiences and learn
score += rewards # Add time step reward to total trajectory score
t += 1 # Update of time step counter
if np.any(dones): # Break trajectory loop when episode finishes (done=True)
break
states = next_states # Roll over states for next iteration
max_score = np.max(score) # Evaluate max score of the match from every agent's score
scores.append(max_score) # Add max-agent score to score log
scores_window.append(max_score) # Add max-agent score to last 100 scores log
scores_mean_log.append(np.mean(scores_window)) # Add mean of the last 100 scores to the log of mean scores
# Printing training log
print('\rEpisode {}\tAverage Score: {:.3f}\tMax Score: {:.3f}\tScore: {}\tGame-steps: {:d} '
.format(i_episode, np.mean(scores_window), max_score, score, t), end='')
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.3f}\t \t \t \t '
.format(i_episode, np.mean(scores_window)))
# Determination if problem solved
if (np.mean(scores_window) >= 0.5) and not_solved:
solved_episode = i_episode
solved_av_score = np.mean(scores_window)
not_solved = False
# Saving agent and critic checkpoint files with every new max 100-last average socre
if np.mean(scores_window) >= max_av_score:
max_av_score = np.mean(scores_window)
max_av_score_episode = i_episode
for i in range(num_agents):
torch.save(agent.ddpg_agents[i].actor_local.state_dict(), 'trained_actor_{:d}-maddpg.pth'.format(i))
torch.save(agent.ddpg_agents[i].critic_local.state_dict(), 'trained_critic_{:d}-maddpg.pth'.format(i))
# Stopping the learning process if no improvement in the last 200 episodes
if not_solved == False and i_episode > (max_av_score_episode + 200) and np.mean(scores_window) < max_av_score:
break
# Printing summary
print('\n')
print('Problem solved in {:d} episodes when reached an average score of {:.3f}'.format(solved_episode, solved_av_score))
print('Maximum average score {:.3f}, reached at episode {:d}'.format(max_av_score, max_av_score_episode))
return scores, scores_mean_log
# +
# Lauching the training process
scores, scores_mean_log = maddpg_training(agent, n_episodes=3000)
# Ploting the scores evolution during training.
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores, label='episode max s')
plt.plot(np.arange(1, len(scores)+1), scores_mean_log, c='r', label='100-last-score mean')
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# -
# ### Checking agent performance
# +
# Load actor and critic weights from file (if needed)
#agent = MADDGP_Agent(num_agents=num_agents, state_size=state_size, action_size=action_size,
# hyperparameters=hyperparameters)
#for i in range(num_agents):
# agent.ddpg_agents[i].actor_local.load_state_dict(torch.load('trained_actor_{:d}-maddpg.pth'.format(i)))
# agent.ddpg_agents[i].critic_local.load_state_dict(torch.load('trained_critic_{:d}-maddpg.pth'.format(i)))
# +
# Checking the performance of the trained agent
scores_log = []
test_episodes = 5
for i in range(0, test_episodes): # play game for a number of episodes
scores = np.zeros(num_agents) # initialize the score (for each agent)
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
while True: # play loop
actions = agent.act(states, i, add_noise=False) # get actions from trained agent
env_data = env.step(actions)[brain_name] # interact with the environment
next_states = env_data.vector_observations # get next state (for each agent)
rewards = env_data.rewards # get reward (for each agent)
dones = env_data.local_done # see if episode finished
scores += env_data.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
scores_log.append(np.max(scores))
print('Score (max over agents) at episode {:d}: {:2f}'.format(i+1, np.max(scores)))
print('\nSmart Agent average score after {:d} episodes: {:.2f}'.format(test_episodes, np.mean(scores_log)))
# +
# Checking the performance of the random agent
scores_log = []
test_episodes = 5
for i in range(0, test_episodes): # play game for a number of episodes
scores = np.zeros(num_agents) # initialize the score (for each agent)
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
while True: # play loop
actions = np.random.randn(num_agents, action_size) # get random actions (for each agent)
actions = np.clip(actions, -1, 1) # clip all actions between -1 and 1
env_info = env.step(actions)[brain_name] # interact with the environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
scores_log.append(np.max(scores))
print('Score (max over all agents) at episode {:d}: {:2f}'.format(i+1, np.max(scores)))
print('\nAverage score after {:d} episodes: {:2f}'.format(test_episodes, np.mean(scores_log)))
# -
# ### Closing the environment
# Close the environment
env.close()
| P3-Competition_and_Collaboration/My-Tennis-maddpg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bolt
# language: python
# name: bolt
# ---
# O jamaicano Usain Bolt é considerado por muitos o melhor velocista de todos os tempos. Mesmo tendo se aposentado das pistas em 2017, seu nome é ainda um dos primeiros a vir à mente do grande público quando o assunto é atletismo.
#
# Nesse artigo, veremos como podemos utilizar a linguagem Python para saber a velocidade e aceleração atingidas pelo jamaicano nas provas de 100 m da Olimpíada de Pequim de 2008, onde marcou o recorde mundial da prova, e no Mundial de Atletismo de 2009 em Berlin, onde quebrou seu próprio recorde. Aproveitaremos para discutir um pouco sobre modelos matemáticos e cuidados na seleção e interpretação de seus resultados.
# O código do artigo se encontra no formato Jupyter Notebook no [repositório do Ciência Programada no GitHub](https://github.com/Ciencia-Programada/bolt-vs-python). Dê uma olhada e pratique baixando o arquivo e alterando o código.
# # Obtendo os dados
# O primeiro passo é, obviamente, obter os dados e passá-los para uma forma que pode ser tratada pelos pacotes que utilizaremos. O tempo total de cada prova é relativamente fácil de achar na internet mas, para que possamos ter uma noção melhor do desempenho do atleta na prova, quanto mais detalhes obtivermos, melhor.
#
# O site [Speed Endurance](https://speedendurance.com/2009/08/19/usain-bolt-10-meter-splits-fastest-top-speed-2008-vs-2009/) compilou os dados dos [relatórios](https://www.worldathletics.org/about-iaaf/documents/research-centre) da Associação Internacional de Federeções de Atletismo - IAAF (*International Association of Athletics Federations*) dos dois eventos. Segue uma tabela adaptada com os dados compilados:
# 
#
# Na tabela, TR significa "tempo de reação", ou seja, o tempo que passou do disparo de início da prova até o atleta efetivamente se mover. Não estava ventando na prova de 2008 e havia um vento favorável 0,9 m/s na prova de 2009 de acordo com os dados dos relatórios.
# O interessante é que temos os tempos de Bolt a cada 10 m, o que nos será útil mais adiante. Podemos pegar os tempos acumulados, colocar em uma planilha e exportar para o formato csv ([*comma separeted values*](https://en.wikipedia.org/wiki/Comma-separated_values), valores separados por vírgula), formato usualmente reconhecido por qualquer pacote matemático.
#
# Vamos dar uma olhada em nosso arquivo:
# !cat bolt.csv
# Split é o termo que se usa para cada divisão da prova, cada marcação de 10 metros. É tão comum que preferi manter o termo em inglês. Repare que coloquei uma linha com tempo 0 no início para facilitar a construção de gráficos futuramente. Como tudo em programação, o idioma padrão é inglês, então o formato dos números está no formato inglês, pontos para separar a parte decimal ao invés de vírgulas como estamos acostumados a usar. Recomendo fortemente que mude nas opções de seus programas para deixar ponto como padrão, nem todo pacote terá a opção de receber arquivos com vírgula. Na realidade, para se acostumar com o idioma, recomendo usar todos seus programadas e o sistema operacional em inglês, mas é apenas minha opinião.
# Vamos armazenar o caminho do arquivo em uma variável para ficar mais fácil de se referir durante o programa:
arquivo_dados = 'bolt.csv'
# Um dos pacotes mais utilizados para lidar com dados numéricos e computação científica em Python é o [NumPy](https://numpy.org/). Vamos importá-lo juntamente com o [Matplotlib](https://matplotlib.org/), para gráficos:
import numpy as np
import matplotlib.pyplot as plt
# Para importar dados do arquivo csv, usaremos o método [genfromtxt](https://numpy.org/doc/stable/reference/generated/numpy.genfromtxt.html). Para entender o funcionamento deste método, vamos olhar o output passando apenas o arquivo e o delimitador (vírgula no caso de arquivos csv):
np.genfromtxt(arquivo_dados, delimiter=',')
# Repare que há `nan` em várias posições. Mais especificamente, nas posições onde havia texto. Isso porque o método, por padrão, espera tipo `float` em todas as posições. Quando encontra algo inesperado, de outro tipo, substitui por `nan` ([*not a number*](https://en.wikipedia.org/wiki/NaN)). Assim, precisamos informar que a primeira coluna é de texto (tipo `string`) e, além disso, que a primeira linha contém os nomes de cada coluna. Para isso, usamos os seguintes parâmetros do método:
np.genfromtxt(arquivo_dados, delimiter=',', dtype=(str, float, float), names=True)
# Repare que a parte dos nomes funcionou, agora o output lista os nomes ao final. No entanto, os textos aparecem como strings vazias. [De acordo com a documentação](https://numpy.org/doc/stable/reference/arrays.dtypes.html#specifying-and-constructing-data-types) é necessário passar o tipo de codificação e a quantidade de caracteres esperada. No nosso caso, [unicode](https://en.wikipedia.org/wiki/Unicode) e 10 caracteres:
np.genfromtxt(arquivo_dados, delimiter=',', dtype=('U10', float, float), names=True)
# Agora sim, mas repare que não armazenamos os dados em nenhuma variável. Podemos utilizar um pequeno macete do Notebook. Quando "esquecemos" de armazenar o resultado da última célula executada em uma variável, podemos criar a variável e atribuir a ela `_`:
dados = _
# Vamos verificar:
dados
# Podemos acessar cada coluna dos dados pelos seus nomes:
dados['Splits']
dados['2008_Beijing']
dados['2009_Berlin']
# Agora já temos o que precisamos para começar a explorar esses dados. Vamos começar com alguns gráficos simples.
# # Primeiros gráficos
# Vamos começar fazendo um simples gráfico de posição *versus* tempo. Para os dados do Mundial de Berlin, temos:
plt.scatter(dados['2009_Berlin'], dados['Splits'])
# Por mais que na tabela a informação na forma de intervalo fique de mais fácil interpretação para o leitor, na minha opinião no gráfico o melhor seria o valor inteiro do final do intervalo. Ou seja, 0, 10, 20... Assim, vamos criar uma variável `marcas` que irá armazenar essas marcações a cada 10 metros para podemos utilizar como eixo vertical do gráfico. Para isso, usaremos o método [arange](https://numpy.org/doc/stable/reference/generated/numpy.arange.html) que, de acordo com a documentação, gera valores igualmente espaçados na forma `[início, fim)`. Repare, intervalo fechado no início e aberto no fim. Por isso, no código abaixo está 101, para que o valor 100 esteja incluso:
marcas = np.arange(0, 101, 10)
marcas
# Fazendo o gráfico novamente:
plt.scatter(dados['2009_Berlin'], marcas)
plt.show()
# Fica bem mais fácil de entender, na minha opinião. Mas apenas números não significam coisa alguma. Seria melhor colocar título no gráfico e informar do que se trata cada eixo. Além disso, linhas de grade (*grid*) facilitariam a interpretação do gráfico por parte do leitor. Vamos resolver cada caso.
#
# O Matplotlib possui alguns estilos de gráfico pré-definidos que podem ser [vistos aqui](https://matplotlib.org/3.1.0/gallery/style_sheets/style_sheets_reference.html). Vamos escolher um que apresente linhas de grade:
plt.style.use('ggplot')
# Esse estilo irá valer para todo o Notebook a partir do momento da execução da célula.
# Agora, um pouco de Matplotlib básico para identificar o gráfico e os eixos:
plt.scatter(dados['2009_Berlin'], marcas)
plt.xlabel('Tempo / s')
plt.ylabel('Posição / m')
plt.title('Usain Bolt - Mundial de Berlin - 2009')
plt.show()
# Podemos comparar os dados das duas competições no mesmo gráfico:
plt.scatter(dados['2008_Beijing'], marcas, label='2008 - Beijing')
plt.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin')
plt.legend()
plt.xlabel('Tempo / s')
plt.ylabel('Posição / m')
plt.title('Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# Repare que no gráfico fica mais fácil de perceber que Bolt começou pior na competição de 2009 (trecho entre 0 e 3 segundos), mas recuperou na metade final para quebrar seu próprio recorde.
# Podemos também apresentar os gráficos lado a lado. Caso tenha dificuldade de entender o código a seguir, [leia esse artigo](https://cienciaprogramada.com.br/2020/09/graficos-python-pint-matplotlib/) onde também apresento um pouco de Matplotlib e explico um pouco como trabalhar com eixos.
# +
# útil para mudar a frequência dos ticks (marcações, traços) nos eixos
import matplotlib.ticker as plticker
# criando o grid
fig1, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
# associando eixos a cada posição do grid
ax1 = axarr[0]
ax2 = axarr[1]
# marcações no eixo de posição de 10 em 10 e no de tempo de 1 em 1
ticks_posicao = plticker.MultipleLocator(base=10)
ticks_tempo = plticker.MultipleLocator(base=1)
# os gráficos em si
ax1.scatter(dados['2008_Beijing'], marcas, label='2008 - Beijing', color='red')
ax2.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin', color='blue')
# nomeando os eixos e colocando as marcações no intervalo desejado
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Posição / m')
ax.yaxis.set_major_locator(ticks_posicao)
ax.legend()
# título principal
fig1.suptitle('Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Como eixo de posição é igual nos dois gráficos, poderiam ser unificados:
# +
fig2, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), sharey=True, facecolor=(1,1,1))
# removendo espaço horizontal entre os plots
fig2.subplots_adjust(wspace=0)
ax1 = axarr[0]
ax2 = axarr[1]
ticks_posicao = plticker.MultipleLocator(base=10)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.scatter(dados['2008_Beijing'], marcas, label='2008 - Beijing', color='red')
ax2.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin', color='blue')
ax1.set_ylabel('Posição / m')
ax1.yaxis.set_major_locator(ticks_posicao)
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.legend()
fig2.suptitle('Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Não gosto muito dessa forma, mas deixo aí para conhecimento. Se for colocar gráfico em páginas web, lembre-se que muitos veem pelo celular, então é melhor deixar os eixos separados para facilitar aqueles que usam zoom para conseguir enxergar os gráficos separadamente. Unificar eixos costuma ser uma opção melhor em trabalhos feitos para ler em computador ou em papel.
# # Como "ligar os pontos"?
# Repare que, por enquanto, apenas plotamos os pontos referentes a cada posição de Bolt no tempo em cada prova. Mas, como ligar esses pontos? Ou, aqueles mais conhecedores de Matplotlib devem estar se perguntando por que não usei o método `plot`:
# +
# ERRADO!!
plt.xlabel('Tempo / s')
plt.ylabel('Posição / m')
plt.title('Usain Bolt - Mundial de Berlin - 2009')
plt.plot(dados['2009_Berlin'], marcas, marker='o')
plt.show()
# -
# Ora, é simples saber o problema com o gráfico acima. Reparou no que o método fez? Ele simplesmente ligou os pontos, bem parecido com o que você fazia bem criancinha naqueles livros de desenho com pontos para ligar e formar desenhos. Mas, depois, quando ficou uma criancinha um pouco mais esperta, você começou a perceber que, para o desenho ficar esteticamente mais aceitável, nem sempre era para ligar com retas, né?
#
# Então, aqui não é um desenho de ligar pontos mas a ideia é mesma. Será que é para ligar com retas? E aqui o parâmetro obviamente não é estético e, sim, uma análise lógica e física da situação.
# Logicamente que Bolt não correu com velocidade constante durante toda a prova. Especialmente na arrancada inicial, onde ele saiu do repouso (velocidade zero), certamente acelerou utilizando toda a explosão muscular possível até atingir um máximo de velocidade e tentou manter esse máximo até o final da prova, mas pode não ter conseguido. Aliás vamos ver as provas.
#
# No vídeo da prova de 2008, fica perceptível que ele já comemorava nos metros finais:
# +
from IPython.display import YouTubeVideo
YouTubeVideo('93dC0o2aHto', width=600, height=300)
# -
# Em 2009 não comemorou com antecedência, apenas olhou para o cronômetro nas passadas finais:
YouTubeVideo('3nbjhpcZ9_g', width=600, height=300)
# É de se esperar que a comemoração em 2008 tenha diminuído um pouco a velocidade.
#
# E o que velocidade tem a ver com nosso gráfico de posição? Ora, velocidade é a variação da posição no tempo. Logo, mais pontos de posição seriam úteis para um melhor estudo da velocidade. Aqui entra o conceito de interpolação.
# ## Interpolação
# Precisamos estimar pontos intermediários aos pontos experimentais que temos. O nome formal disso é interpolação. Não confunda com regressão. Em uma regressão, tentamos verificar qual função matemática melhor descreve um determinado conjunto de dados, não necessariamente passando por pontos desses dados. Na interpolação você passa pelos pontos.
#
# Para os fins desse artigo, essa definição de interpolação basta. Caso queira mais rigor, veja [esse artigo sobre interpolação](https://en.wikipedia.org/wiki/Interpolation), [esse sobre regressão](https://en.wikipedia.org/wiki/Regression_analysis) e [essa discussão](https://stats.stackexchange.com/questions/33659/how-is-interpolation-related-to-the-concept-of-regression/) sobre a diferença entre os dois conceitos.
# Para fazer a interpolação utilizaremos a biblioteca [SciPy](https://www.scipy.org/scipylib/index.html) que possui rotinas numéricas para estatística, álgebra linear, problemas de otimização, regressão e interpolação.
# O assunto é tão extenso que uma [olhada na documentação](https://docs.scipy.org/doc/scipy/reference/interpolate.html) mostra a existência de diversos métodos de interpolação. Mas vamos por partes.
# Primeiro vamos mostrar que realmente o método `plot` do Matplotlib realiza uma interpolação linear entre cada par de pontos. Vamos utilizar o pacote [interp1d](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html#scipy.interpolate.interp1d) do SciPy.
from scipy.interpolate import interp1d
# Vamos então interpolar linearmente nossos pontos. Para tornar o artigo menos longo, farei apenas para os dados de 2009:
linear_2009_berlin = interp1d(dados['2009_Berlin'], marcas, kind='linear', fill_value='extrapolate')
# O `kind='linear'` é auto-explicativo. O parâmetro `fill_value` resolve o que fazer caso sejam passados valores para a função resultante que estejam fora do intervalo de interpolação. No caso, solicitei que faça extrapolação.
# Agora que temos nossa função interpoladora, precisamos passar valores de x, nosso caso tempo, para ela. Vamos criar um array (vetor) de tempos espaçados de 0,1 s:
tempo = np.arange(0, 9.7, 0.1)
# Fazendo o gráfico:
# +
plt.xlabel('Tempo / s')
plt.ylabel('Posição / m')
plt.title('<NAME> - Mundial de Berlin - 2009')
plt.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin')
plt.plot(tempo, linear_2009_berlin(tempo), label='Linear')
plt.legend()
plt.show()
# -
# É o mesmo gráfico gerado pelo método `plot`.
# Tudo bem, mostramos que foi feito realmente uma interpolação linear entre cada par de pontos. Mas o que devemos fazer então?
# Aqui cabe destacar que há várias formas de abordagem quando se trata de interpolação. Uma forma de abordagem é procurar um [polinômio](https://en.wikipedia.org/wiki/Polynomial_interpolation) que passe por todos os pontos. Dados *n* pontos, há um polinômio de grau *n-1* que interpola tais pontos. Essa abordagem é computacionalmente cara e pode exibir alguns artefatos oscilatórios nos pontos extremos, o que é conhecido como [fenômeno de Runge](https://en.wikipedia.org/wiki/Runge%27s_phenomenon).
#
# [Essa animação interativa](https://www.geogebra.org/m/ddxsk6k8) ilustra bem tal fenômeno para aqueles que tiveram dificuldade de visualizar o escrito acima.
#
# Uma outra abordagem é fazer um interpolação por partes (também chamada de interpolação por polinômios seccionados), ou seja, ao invés de buscar uma função única que passe por todos os pontos, divide-se os pontos em intervalos. Cada intervalo é interpolado e anexado ao intervalo seguinte de manera a obter uma curva contínua.
# ## Splines
# Dentre as formas de interpolação por partes, destaca-se a chamada de [spline](https://en.wikipedia.org/wiki/Spline_interpolation). A vantagem é que boas interpolações são conseguidas com polinômios de baixa ordem, evitando o fenômeno de Runge.
# Um bom ponto de partida é começar com splines cúbicos, ou seja, utilizando polinômios de grau 3 em cada intervalo. É um grau baixo e garante continuidade da função até sua derivada de segunda ordem, algo que será importante para gente mais adiante. Cabe destacar que é um ponto de partida comum quando se tem pouco conhecimento do comportamento dos dados, mas não é uma bala de prata, sempre busque conhecer ao máximo o contexto de seus dados.
# Mesmo dentre os splines cúbicos há [diferentes tipos](https://en.wikipedia.org/wiki/Natural_cubic_spline), a depender das condições de contorno, mas que não entrarei no mérito aqui nesse artigo. Vamos utilizar um método específico do SciPy para spline cúbicos o [CubicSpline](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html). Como é possível ver na documentação linkada, há como alterar o tipo (condições de contorno), mas irei manter o padrão aqui nesse artigo.
from scipy.interpolate import CubicSpline
# Vamos então obter os splines para nossos dados:
cs_2009_berlin = CubicSpline(dados['2009_Berlin'], marcas)
cs_2008_beijing = CubicSpline(dados['2008_Beijing'], marcas)
# +
fig3, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_posicao = plticker.MultipleLocator(base=10)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.scatter(dados['2008_Beijing'], marcas, label='2008 - Beijing', color='red')
ax1.plot(tempo, cs_2008_beijing(tempo), label='Spline Cúbico', color='red')
ax2.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin', color='blue')
ax2.plot(tempo, cs_2009_berlin(tempo), label='Spline Cúbico', color='blue')
# nomeando os eixos e colocando as marcações no intervalo desejado
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Posição / m')
ax.yaxis.set_major_locator(ticks_posicao)
ax.legend()
# título principal
fig3.suptitle('Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Finalmente, parece que agora temos um gráfico que nos permite continuar nossas explorações
# # Obtendo a velocidade e a aceleração de Bolt
# ## Velocidade média
# Agora que temos o que parece um modelo mais aceitável de como varia a posição de Bolt no tempo podemos calcular sua velocidade. Mas precisamos esclarecer que tipo de velocidade estamos querendo determinar. Afinal, se for apenas a velocidade média, isso já poderia ter sido calculado posto que sabemos o total de deslocamento, 100 metros, e o tempo que durou o deslocamento, o tempo final da prova, que é a última posição de cada array de tempos:
# +
velocidade_media_beijing = 100 / dados['2008_Beijing'][-1]
print(f'Velocidade média em Beijing (2008): {velocidade_media_beijing:.2f} m/s')
velocidade_media_berlin = 100 / dados['2009_Berlin'][-1]
print(f'Velocidade média em Berlin (2009): {velocidade_media_berlin:.2f} m/s')
# -
# Já escrevi [aqui sobre o pacote pint](https://cienciaprogramada.com.br/2020/09/python-unidades-cerveja-pint/) que permite lidar com unidades e fazer conversões. Vamos usar esse pacote para converter os valores para quilômetros por hora, unidade que estamos mais acostumados:
# +
import pint
ureg = pint.UnitRegistry(fmt_locale='pt_BR')
Q_ = ureg.Quantity
velocidade_media_beijing = Q_(velocidade_media_beijing, 'm/s')
velocidade_media_berlin = Q_(velocidade_media_berlin, 'm/s')
# -
print(f"Velocidade média em Beijing (2008): {velocidade_media_beijing.to('km/hour'):.2f}")
print(f"Velocidade média em Berlin (2009): {velocidade_media_berlin.to('km/hour'):.2f}")
# Impressionante, não?
#
# ## Velocidade instantânea
#
# Mas o que seria mais interessante é estimar a velocidade em cada momento da prova, a chamada velocidade instantânea. Afinal, como já discutimos, a velocidade de Bolt durante as provas não é constante. Para obter tais médias, certamente em alguns momentos Bolt correu *acima* desses valores. Como podemos obter velocidades instantâneas?
#
# Utilizaremos um pouco de cálculo aqui e o fato de que agora temos uma curva unindo os pontos experimentais. Para a velocidade média, pegamos dois pontos distantes, o início e o fim da prova. Agora, imagine pegar pontos cada vez mais próximos, ou seja, diminuindo cada vez mais o intervalo de tempo entre os pontos. No limite em que esse intervalo tender a zero, teremos a velocidade instantânea.
#
# Fica mais fácil visualizar com uma imagem. Observe a animação abaixo. Nela temos uma curva de uma função qualquer, a marcação de dois pontos, **A** e **B**, e a visualização do triângulo formado para obter a variação da quantidade do eixo *y* e a do eixo *x* com auxílio de um terceiro ponto **C**. No caso de nossos gráficos, variação da distância e do tempo, respectivamente. Na animação, os pontos vão se aproximando cada vez mais. Observe que, no limite em que os pontos se tornam mais próximos, a hipotenusa do triângulo, inicialmente uma [reta secante](https://en.wikipedia.org/wiki/Secant_line) da curva, torna-se uma reta tangente no ponto de virtual encontro **A**. A reta azul é a tangente no ponto **A**, apresentada justamente para mostrar como a secante se aproxima cada vez mais de uma tangente.
# 
# Assim, matematicamente, a velocidade instantânea em um dado momento é a inclinação da reta tangente à curva do gráfico posição vs tempo no momento desejado. Em [cálculo](https://en.wikipedia.org/wiki/Calculus), isso é o mesmo que dizer que a velocidade instantânea é a [derivada](https://en.wikipedia.org/wiki/Derivative) da posição com relação ao tempo em um dado dado ponto do gráfico.
#
# Eu havia citado anteriormente que o fato dos splines cúbicos garantirem que a curva obtida possui derivadas primeira e segunda era importante. Agora sabemos o porquê.
# O bom do `CubicSpline` do SciPy é que é muito simples solicitar a primeira derivada, bastando passar o parâmetro `1` para o spline obtido. Veja o código abaixo:
# +
fig4, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_velocidade = plticker.MultipleLocator(base=2)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.plot(tempo, cs_2008_beijing(tempo, 1), label='Spline Cúbico - 1ª derivada', color='red')
ax2.plot(tempo, cs_2009_berlin(tempo, 1), label='Spline Cúbico - 1ª derivada', color='blue')
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Velocidade / m/s')
ax.yaxis.set_major_locator(ticks_velocidade)
ax.legend()
fig4.suptitle('Velocidade - Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Perceba que obtivemos perfis condizentes com o que esperávamos. Na prova de 2008, a velocidade cai próximo ao segundo final de prova, quando Bolt já visivelmente comemorava. Na de 2009, o jamaicano é mais consistente na velocidade.
#
# Podemos verificar as velocidades máximas que nosso modelo fornece para cada caso:
# +
velocidade_max_2008_cs = Q_(max(cs_2008_beijing(tempo, 1)), 'm/s')
velocidade_max_2009_cs = Q_(max(cs_2009_berlin(tempo, 1)), 'm/s')
print(f'{velocidade_max_2008_cs:.2f}')
print(f'{velocidade_max_2009_cs:.2f}')
# -
# Em quilômetros por hora:
print(f"{velocidade_max_2008_cs.to('km/hour'):.2f}")
print(f"{velocidade_max_2009_cs.to('km/hour'):.2f}")
# Já dá para multá-lo em perimêtros urbanos. Anotaram a placa?
#
# Seguindo a lógica de tudo que mostramos até agora, podemos obter uma curva para aceleração. A aceleração é a variação da velocidade no tempo, o que é o mesmo que dizer que a aceleração é a derivada primeira da velocidade em relação ao tempo. Ou, a derivada segunda da posição em relação ao tempo. Daí:
# +
fig5, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_velocidade = plticker.MultipleLocator(base=1)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.plot(tempo, cs_2008_beijing(tempo, 2), label='Spline Cúbico - 2ª derivada', color='red')
ax2.plot(tempo, cs_2009_berlin(tempo, 2), label='Spline Cúbico - 2ª derivada', color='blue')
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Aceleração / m/s²')
ax.yaxis.set_major_locator(ticks_velocidade)
ax.legend()
fig5.suptitle('Aceleração - Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Os gráficos também estão relativamente bem coerentes, uma grande aceleração inicial e depois mantendo uma aceleração positiva buscando aumentar a velocidade. Em 2008, uma aceleração negativa bem próximo ao final, indicando desaceleração perto da linha de chegada.
# Embora estejam coerentes, precisamos ter cuidado com nossos modelos. Nos gráficos de aceleração observamos alterações bruscas nos perfis. Isso é um pouco estranho, lembre-se que é um humano, não há um pedal de acelerador ou freio! Talvez fosse de se esperar variações menos bruscas.
#
# # Melhorando nosso modelo
#
# Obviamente que diversas pessoas e empresas fazem estudos sobre o desempenho dos atletas. Inclusive há todo um ramo de estudos chamado [biomecânica esportiva](https://en.wikipedia.org/wiki/Sports_biomechanics) voltado para isso. Analisando estudos da área, vemos que usualmente se utilizam splines, mas só que de grau 5 e não 3. Em inglês, o termo é *quintic splines*, sendo *quintic* relativo ao grau 5. Não por outro motivo que há uma [empresa chamada Quintic](https://www.quinticsports.com/about/) que justamente trabalha com softwares de biomecânica esportiva. Alguns livros de biomecânica, [como esse](https://www.amazon.com/Introduction-Sports-Biomechanics-Analysing-Movement-ebook-dp-B001ROAJR2/dp/B001ROAJR2/ref=mt_other?_encoding=UTF8&me=&qid=), possuem trechos dedicados ao estudo de splines de grau 5 dada a importância na área.
# Logo, podemos verificar como ficaria uma interpolação com splines de quinto grau. O SciPy possui o `UnivariateSpline` onde podemos passar um parâmetro para indicar o grau do spline desejado:
from scipy.interpolate import UnivariateSpline
s_grau5_2008_beijing = UnivariateSpline(dados['2008_Beijing'], marcas, k=5)
s_grau5_2009_berlin = UnivariateSpline(dados['2009_Berlin'], marcas, k=5)
# Vamos fazer os gráficos de posição, velocidade e aceleração *versus* tempo:
# +
fig6, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_posicao = plticker.MultipleLocator(base=10)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.scatter(dados['2008_Beijing'], marcas, label='2008 - Beijing', color='red')
ax1.plot(tempo, s_grau5_2008_beijing(tempo), label='Spline grau 5', color='red')
ax2.scatter(dados['2009_Berlin'], marcas, label='2009 - Berlin', color='blue')
ax2.plot(tempo, s_grau5_2009_berlin(tempo), label='Spline grau 5', color='blue')
# nomeando os eixos e colocando as marcações no intervalo desejado
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Posição / m')
ax.yaxis.set_major_locator(ticks_posicao)
ax.legend()
# título principal
fig6.suptitle('Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# +
fig7, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_velocidade = plticker.MultipleLocator(base=2)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.plot(tempo, s_grau5_2008_beijing(tempo, 1), label='Spline grau 5 - 1ª derivada', color='red')
ax2.plot(tempo, s_grau5_2009_berlin(tempo, 1), label='Spline grau 5 - 1ª derivada', color='blue')
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Velocidade / m/s')
ax.yaxis.set_major_locator(ticks_velocidade)
ax.legend()
fig7.suptitle('Velocidade - Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# +
fig8, axarr = plt.subplots(nrows=1, ncols=2, figsize=(10,5), constrained_layout=True, facecolor=(1,1,1))
ax1 = axarr[0]
ax2 = axarr[1]
ticks_velocidade = plticker.MultipleLocator(base=1)
ticks_tempo = plticker.MultipleLocator(base=1)
ax1.plot(tempo, s_grau5_2008_beijing(tempo, 2), label='Spline grau 5 - 2ª derivada', color='red')
ax2.plot(tempo, s_grau5_2009_berlin(tempo, 2), label='Spline grau 5 - 2ª derivada', color='blue')
for ax in axarr:
ax.set_xlabel('Tempo / s')
ax.xaxis.set_major_locator(ticks_tempo)
ax.set_ylabel('Aceleração / m/s²')
ax.yaxis.set_major_locator(ticks_velocidade)
ax.legend()
fig8.suptitle('Aceleração - Usain Bolt - Beijing (2008) vs Berlin (2009)')
plt.show()
# -
# Repare que agora as curvas de aceleração são mais suaves. Mais coerente com uma variação gradual do atleta. Inclusive, contrariamente ao spline cúbico anterior, o gráfico de 2009 agora mostra uma leve desaceleração ao final.
#
# Vamos ver como a mudança de interpolação mudou a estimativa de velocidade máxima:
# +
velocidade_max_2008_s5grau = Q_(max(s_grau5_2008_beijing(tempo, 1)), 'm/s')
velocidade_max_2009_s5grau = Q_(max(s_grau5_2009_berlin(tempo, 1)), 'm/s')
print(f'{velocidade_max_2008_s5grau:.2f}')
print(f'{velocidade_max_2009_s5grau:.2f}')
# -
# Como as curvas foram "suavizadas", não mais tantos máximos e mínimos no gráfico. Será mesmo que Bolt atingiu uma menor velocidade máxima em 2009 quando comparado a 2008?
# # Será que nossos modelos estão certos?
# Não. Modelos tentam descrever a realidade, não são a realidade. Não faz muito sentido dizer "certo" ou "errado", podemos avaliar com base nos dados que temos se estão coerentes, se fazem predições que tenham sentido. Ou seja, as discussões que estamos fazendo a cada etapa aqui nesse artigo. Eles podem ser, no máximo, tão bons quanto os dados de entrada o famoso [GIGO - *garbage in, garbage out*](https://en.wikipedia.org/wiki/Garbage_in,_garbage_out), em português menos agressivo, dados de entradas ruins, saídas ruins. Como só temos dados de posição a cada 10 metros, tudo que vier dessa análise será limitado por esses dados de entrada.
#
# No caso da velocidade máxima, por exemplo, se ela foi atingida em algum ponto intermediário, obviamente que a forma de interpolação para obter esses pontos terá grande efeito no resultado.
# ## Obtendo dados experimentais de velocidade
# O desempenho de Bolt nas Olimpíadas de 2008, quando quebrou o recorde mundial dos 100 m pela primeira vez, chamou tanta a atenção que mudou a forma de preparo da comunidade científica para o Mundial de 2009. Muitos [pensavam qual seria o tempo](https://aapt.scitation.org/doi/pdf/10.1119/1.3033168) se ele não tivesse desacelerado para comemorar nos metros finais e havia muito interesse em entender o desempenho do jamaicano.
# Assim, foram utilizadas câmeras especiais, medidores de velocidade e muitos outros equipamentos para estudar a biomecânica dos atletas da competição. O [artigo gerado](http://www.meathathletics.ie/devathletes/pdf/Biomechanics%20of%20Sprints.pdf) vale muito a leitura. Os detalhes são sensacionais.
#
# Um ponto interessante do artigo é que, com os medidores de velocidade utilizados, puderam estimar as velocidades médias dos atletas a cada 10 metros. Vamos importar esses dados e comparar com nossos modelos:
# !cat bolt_berlin_vel.csv
arquivo_berlin_velocidade = 'bolt_berlin_vel.csv'
dados_berlin_velocidade = np.genfromtxt(arquivo_berlin_velocidade, delimiter=',', dtype=['U10', float], names=True)
dados_berlin_velocidade
# A velocidade máxima obtida foi de:
Q_(max(dados_berlin_velocidade['2009_Berlin']), 'm/s')
# Mas lembre-se, essa velocidade é resultado de uma média feita a cada 10 metros. Já vou explicar mais sobre isso adiante. A real velocidade máxima de Bolt na prova, de acordo com o artigo citado, foi de 12,34 m/s quando já havia percorrido 67,90 m.
# Vamos comparar esses dados com nosso modelo feito com spline cúbico:
# +
plt.xlabel('Tempo / s')
plt.ylabel('Velocidade / m/s')
plt.title('Usain Bolt - Mundial de Berlin - 2009')
plt.scatter(dados['2009_Berlin'], dados_berlin_velocidade['2009_Berlin'], label='Vel. média experimental')
plt.plot(tempo, cs_2009_berlin(tempo, 1), label='Spline cúbico')
plt.legend()
plt.show()
# -
# E agora com o spline de quinto grau:
# +
plt.xlabel('Tempo / s')
plt.ylabel('Velocidade / m/s')
plt.title('Usain Bolt - Mundial de Berlin - 2009')
plt.scatter(dados['2009_Berlin'], dados_berlin_velocidade['2009_Berlin'], label='Vel. média experimental')
plt.plot(tempo, s_grau5_2009_berlin(tempo, 1), label='spline grau 5')
plt.legend()
plt.show()
# -
# E aí? Alguma conclusão? Difícil dizer. Ambos falham nos tempos iniciais. O modelo de quinto grau apresenta melhor a queda de velocidade no trecho final. Mas devemos ficar atentos se a comparação faz sentido.
#
# O modelo de interpolação gera pontos intermediários nos dados de posição, cria o spline de posição, que é então derivado para obter a curva de velocidade. A velocidade tabelada no artigo é, como já dito, uma média a cada intervalo de 10 metros feita com base nas medições de velocidade. Vamos ver um dos gráficos do artigo para entender:
# 
# Infelizmente a resolução da figura disponível não é das melhores. Mas temos a velocidade no eixo vertical e a posição no horizontal. A linha azul é resultado dos dados experimentais dos medidores e, a vermelha, uma linha média desses dados.
# Consegue entender por que utilizaram uma média? A velocidade oscila muito, afinal, ao correr ocorre o impacto rítmico de cada pé, em cada passada. Inclusive o próprio artigo e outras referências [como esta](http://www.johk.pl/files/15mackala.pdf) e [essa](https://hrcak.srce.hr/ojs/index.php/kinesiology/article/view/5579#:~:text=THE%20KINEMATICS%20OF%20USAIN%20BOLT'S%20MAXIMAL%20SPRINT%20VELOCITY,-Milan%20%C4%8Coh%20University&text=Despite%20a%20relatively%20slow%20reaction,and%204.36%20strides%2Fs%20frequency) mostram que o grande diferencial do Bolt é na sua passada. A leitura dos artigos também deixa clara as dificuldades experimentais de medir velocidade, afinal se trata de corpos e não de pontos, precisando se determinar a posição do corpo que vai ser monitorada, a influência do tempo de reação do atleta nos resultados, especialmente nos primeiros metros, dentre outros fatores.
# ## O que poderia ser melhorado?
# Espero que você tenha entendido que há muitos fatores envolvidos e que isso aqui é apenas um exercício para demonstrar a linguagem Python, ferramentas matemáticas disponíveis e um pouco de cálculo e física. Mas vamos terminar o artigo pensando em como poderíamos melhorar nossa análise.
#
# Analise novamente os gráficos de velocidade obtidos. Há algo estranho? Bom, não sei se reparou, mas os gráficos não começam em zero. Isso pode ser resolvido forçando uma condição de contorno onde a primeira derivada deve ter valor zero no ponto inicial. Isso é possível de ser feito e já havia citado que há diferentes tipos de spline a depender das condições de contorno. Uma leitura na documentação dos métodos de spline mostra como isso poderia ser feito. Escreva na seção de comentários caso queira um artigo onde isso é feito ou caso você tenha feito e queira mostrar.
#
# Mas não é tão simples assim, afinal há o tempo de reação do atleta. A velocidade é nula até que o atleta comece a se movimentar. Cabe a você considerar se isso irá afetar muito a análise ou não. E como seria implementar isso. Ah, vai tentar descontar a influência do vento ou não?
#
# Outra abordagem é partir de modelos matemáticos previamente definidos na literatura. Recomendo a leitura [deste artigo](https://jeb.biologists.org/content/jexbio/208/14/2809.full.pdf) e também [desse aqui](https://staff.fnwi.uva.nl/a.j.p.heck/Research/art/ModelingSprinting.pdf). Assim, as condições de contorno poderiam ser as equações propostas nesses artigos e caberia verificar se representam bem as situações ou não.
#
# Por fim, lembra mais acima que escrevi sobre o método de interpolação polinomial? Quando se busca um polinômio que passa por todos os pontos? Outras pessoas já fizeram estudos nesse sentido: como [aqui](https://web.archive.org/web/20141006173930/http://rcuksportscience.wikispaces.com/file/view/Analysing+men+100m+Nspire.pdf) e [aqui](https://education.ti.com/sites/UK/downloads/pdf/TI-Nspire_STEM_MathsMotion.pdf). Veja os resultados e compare com a abordagem que utilizamos. Busque também reproduzir os estudos e tire suas conclusões.
# # Conclusão
#
# Ufa, não foi fácil fazer esse artigo. Muita leitura, matemática, esporte, física e Python. Mas o objetivo era mostrar ferramentas matemáticas disponíveis para a linguagem utilizando um problema real. Acredito ser muito mais produtivo que mostrar exemplos batidos que todo livro e site tem.
#
# E usar um problema real permite também discutir as limitações de métodos e modelos e como analisá-los criticamente. Infelizmente, enxergo que há alguns problemas na forma que são ensinados e utilizados hoje. A saber:
#
# - muitos confundem modelo com realidade;
# - há aplicação de ferramentas em contextos onde não se aplicam;
# - relacionado ao item anterior, há muitos usuários de métodos e modelos, mas poucos analistas e entendedores dos mesmos;
# - são pouco utilizados casos reais no ensino, de forma que não se exercita análise crítica. Dados de livros são sempre bonitinhos, né?
# - a divulgação/interpretação da mídia de estudos científicos e dos resultados de modelos é lastimável.
#
# A consequência disso tudo se relaciona ao que vivemos nesse ano de 2020, quando escrevo esse artigo. Afinal, boa parte do que vivemos esse ano foi consequência de predições feitas a partir de modelos lá no início do ano. [Lembra](https://g1.globo.com/bemestar/coronavirus/noticia/2020/03/27/sem-isolamento-e-acoes-contra-a-covid-19-brasil-pode-ter-ate-1-milhao-de-mortes-na-pandemia-diz-estudo.ghtml)? Obviamente que a doença é séria e não deve ser negligenciada, não é esse o ponto. Mas será que todos os cuidados foram tomados no modelo e no software do mesmo? [Parece que não](https://www.akitaonrails.com/2020/05/07/o-modelo-do-imperial-college-sobre-a-covid-19-pode-estar-errado). Será que pensaram nas consequências políticas e na vida e liberdade das pessoas? As previsões se confirmaram? Fica a reflexão.
#
# Caso queira receber notificação quando novos artigos e vídeos sobre ciência e programação forem lançados, nos acompanhe nas redes sociais linkadas no cabeçalho e no rodapé da página. Comente o que achou abaixo e compartilhe com mais gente interessada em ciência.
#
# Até a próxima.
| bolt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Determine a integral dada</b>
# $1. \int xe^{-x}dx$
# <b>Escolhendo $v$ e $du$</b>
# $u = x$
# $du = 1$
# $dv = e^{-x}$
# $v = -e^{-x}$
# <b>Substituindo na fórmula</b>
# $\int xe^{-x}dx \rightarrow \int udv$
# <b>$ \int udv = uv - \int vdu$</b>
# $\int udv = -e^{-x}x - \int -e^{-x}$
# <b>Integrando $\int -e^{-x}$</b>
# $\int -e^{-x} = e^{-x}$
# $\int udv = -e^{-x}x - e^{-x} + C$
| Problemas 6.1/01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use DICOMweb™ Standard APIs with Python
# This tutorial uses Python to demonstrate working with the Medical Imaging Server for DICOM.
#
# For the tutorial we will use the DICOM files here: [Sample DICOM files](../dcms). The file name, studyUID, seriesUID and instanceUID of the sample DICOM files is as follows:
#
# | File | StudyUID | SeriesUID | InstanceUID |
# | --- | --- | --- | ---|
# |green-square.dcm|1.2.826.0.1.3680043.8.498.13230779778012324449356534479549187420|1.2.826.0.1.3680043.8.498.45787841905473114233124723359129632652|1.2.826.0.1.3680043.8.498.12714725698140337137334606354172323212|
# |red-triangle.dcm|1.2.826.0.1.3680043.8.498.13230779778012324449356534479549187420|1.2.826.0.1.3680043.8.498.45787841905473114233124723359129632652|1.2.826.0.1.3680043.8.498.47359123102728459884412887463296905395|
# |blue-circle.dcm|1.2.826.0.1.3680043.8.498.13230779778012324449356534479549187420|1.2.826.0.1.3680043.8.498.77033797676425927098669402985243398207|1.2.826.0.1.3680043.8.498.13273713909719068980354078852867170114|
#
# > NOTE: Each of these files represent a single instance and are part of the same study. Also green-square and red-triangle are part of the same series, while blue-circle is in a separate series.
#
# ## Prerequisites
#
# In order to use the DICOMWeb™ Standard APIs, you must have an instance of the Medical Imaging Server for DICOM deployed. If you have not already deployed the Medical Imaging Server, [Deploy the Medical Imaging Server to Azure](../quickstarts/deploy-via-azure.md).
#
# Once you have deployed an instance of the Medical Imaging Server for DICOM, retrieve the URL for your App Service:
#
# 1. Sign into the [Azure Portal](https://portal.azure.com/).
# 1. Search for **App Services** and select your Medical Imaging Server for DICOM App Service.
# 1. Copy the **URL** of your App Service.
#
# For this code, we'll be accessing an unsecured dev/test service. Please don't upload any private health information (PHI).
#
# ## Working with the Medical Imaging Server for DICOM
# The DICOMweb™ standard makes heavy use of `multipart/related` HTTP requests combined with DICOM specific accept headers. Developers familiar with other REST-based APIs often find working with the DICOMweb™ standard awkward. However, once you have it up and running, it's easy to use. It just takes a little finagling to get started.
# ### Import the appropriate Python libraries
#
# First, import the necessary Python libraries.
#
# We've chosen to implement this example using the synchronous `requests` library. For asnychronous support, consider using `httpx` or another async library. Additionally, we're importing two supporting functions from `urllib3` to support working with `multipart/related` requests.
import requests
import pydicom
from pathlib import Path
from urllib3.filepost import encode_multipart_formdata, choose_boundary
# ### Configure user-defined variables to be used throughout
# Replace all variable values wrapped in { } with your own values. Additionally, validate that any constructed variables are correct. For instance, `base_url` is constructed using the default URL for Azure App Service. If you're using a custom URL, you'll need to override that value with your own.
# +
dicom_server_name = "{server-name}"
path_to_dicoms_dir = "{path to the folder that includes green-square.dcm and other dcm files}"
version = "{version of REST API}"
base_url = f"https://{dicom_server_name}.azurewebsites.net/v{version}"
base_url
# +
dicom_server_name = "sjbdicomtest"
path_to_dicoms_dir = "c:\\githealth\\dicom-server\\docs\\dcms\\"
version = "1"
base_url = f"https://{dicom_server_name}.azurewebsites.net/v{version}"
base_url
# -
study_uid = "1.2.826.0.1.3680043.8.498.13230779778012324449356534479549187420"; #StudyInstanceUID for all 3 examples
series_uid = "1.2.826.0.1.3680043.8.498.45787841905473114233124723359129632652"; #SeriesInstanceUID for green-square and red-triangle
instance_uid = "1.2.826.0.1.3680043.8.498.47359123102728459884412887463296905395"; #SOPInstanceUID for red-triangle
# ### Create supporting methods to support `multipart\related`
# The `Requests` library (and most Python libraries) do not work with `multipart\related` in a way that supports DICOMweb™. Because of this, we need to add a few methods to support working with DICOM files.
#
# `encode_multipart_related` takes a set of fields (in the DICOM case, these are generally Part 10 dcm files) and an optional user defined boundary. It returns both the full body, along with the content_type, which can be used
#
def encode_multipart_related(fields, boundary=None):
if boundary is None:
boundary = choose_boundary()
body, _ = encode_multipart_formdata(fields, boundary)
content_type = str('multipart/related; boundary=%s' % boundary)
return body, content_type
# ### Create a `requests` session
# Create a `requests` session, called `client`, that will be used to communicate with the Medical Imaging Server for DICOM.
client = requests.session()
# ## Store DICOM Instances (STOW)
#
# The following examples highlight persisting DICOM files.
# ### Store-instances-using-multipart/related
#
# This demonstrates how to upload a single DICOM file. This uses a bit of a Python hack to pre-load the DICOM file (as bytes) into memory. By passing an array of files to the fields parameter ofencode_multipart_related, multiple files can be uploaded in a single POST. This is sometimes used to upload a complete Series or Study.
#
# _Details:_
#
# * Path: ../studies
# * Method: POST
# * Headers:
# * `Accept: application/dicom+json`
# * `Content-Type: multipart/related; type="application/dicom"`
# * Body:
# * `Content-Type: application/dicom` for each file uploaded, separated by a boundary value
#
# > Some programming languages and tools behave differently. For instance, some require you to define your own boundary. For those, you may need to use a slightly modified Content-Type header. The following have been used successfully.
# > * `Content-Type: multipart/related; type="application/dicom"; boundary=ABCD1234`
# > * `Content-Type: multipart/related; boundary=ABCD1234`
# > * `Content-Type: multipart/related`
#
#
# +
#upload blue-circle.dcm
filepath = Path(path_to_dicoms_dir).joinpath('blue-circle.dcm')
# Hack. Need to open up and read through file and load bytes into memory
with open(filepath,'rb') as reader:
rawfile = reader.read()
files = {'file': ('dicomfile', rawfile, 'application/dicom')}
#encode as multipart_related
body, content_type = encode_multipart_related(fields = files)
headers = {'Accept':'application/dicom+json', "Content-Type":content_type}
url = f'{base_url}/studies'
response = client.post(url, body, headers=headers, verify=False)
response
# -
# ### Store-instances-for-a-specific-study
#
# This demonstrates how to upload a multiple DICOM files into the specified study. This uses a bit of a Python hack to pre-load the DICOM file (as bytes) into memory.
#
# By passing an array of files to the fields parameter of `encode_multipart_related`, multiple files can be uploaded in a single POST. This is sometimes used to upload a complete Series or Study.
#
# _Details:_
# * Path: ../studies/{study}
# * Method: POST
# * Headers:
# * `Accept: application/dicom+json`
# * `Content-Type: multipart/related; type="application/dicom"`
# * Body:
# * `Content-Type: application/dicom` for each file uploaded, separated by a boundary value
#
# +
filepath_red = Path(path_to_dicoms_dir).joinpath('red-triangle.dcm')
filepath_green = Path(path_to_dicoms_dir).joinpath('green-square.dcm')
# Hack. Need to open up and read through file and load bytes into memory
with open(filepath_red,'rb') as reader:
rawfile_red = reader.read()
with open(filepath_green,'rb') as reader:
rawfile_green = reader.read()
files = {'file_red': ('dicomfile', rawfile_red, 'application/dicom'),
'file_green': ('dicomfile', rawfile_green, 'application/dicom')}
#encode as multipart_related
body, content_type = encode_multipart_related(fields = files)
headers = {'Accept':'application/dicom+json', "Content-Type":content_type}
url = f'{base_url}/studies'
response = client.post(url, body, headers=headers, verify=False)
response
# -
# ### Store single instance (non-standard)
#
# This demonstrates how to upload a single DICOM file. This non-standard API endpoint simplifies uploading a single file as a byte array stored in the body of a request.
#
# _Details:_
# * Path: ../studies
# * Method: POST
# * Headers:
# * `Accept: application/dicom+json`
# * `Content-Type: application/dicom`
# * Body:
# * Contains a single DICOM file as binary bytes.
# +
#upload blue-circle.dcm
filepath = Path(path_to_dicoms_dir).joinpath('blue-circle.dcm')
# Hack. Need to open up and read through file and load bytes into memory
with open(filepath,'rb') as reader:
body = reader.read()
headers = {'Accept':'application/dicom+json', 'Content-Type':'application/dicom'}
url = f'{base_url}/studies'
response = client.post(url, body, headers=headers, verify=False)
response # response should be a 409 Conflict if the file was already uploaded abovin the above request
# -
# ## Retrieve DICOM Instances (WADO)
#
# The following examples highlight retrieving DICOM instances.
# ### Retrieve all instances within a study
#
# This retrieves all instances within a single study.
#
# _Details:_
# * Path: ../studies/{study}
# * Method: GET
# * Headers:
# * `Accept: multipart/related; type="application/dicom"; transfer-syntax=*`
#
# All three of the dcm files that we uploaded previously are part of the same study so the response should return all 3 instances. Validate that the response has a status code of OK and that all three instances are returned.
#
# +
url = f'{base_url}/studies/{study_uid}'
headers = {'Accept':'multipart/related; type="application/dicom"; transfer-syntax=*'}
response = client.get(url, headers=headers) #, verify=False)
response
# -
# ### Use the retrieved instances
# The instances are retrieved as binary bytes. You can loop through the returned items and convert the bytes into a file-like structure which can be read by `pydicom`.
# +
import requests_toolbelt as tb
from io import BytesIO
mpd = tb.MultipartDecoder.from_response(response)
for part in mpd.parts:
# Note that the headers are returned as binary!
print(part.headers[b'content-type'])
# You can convert the binary body (of each part) into a pydicom DataSet
# And get direct access to the various underlying fields
dcm = pydicom.dcmread(BytesIO(part.content))
print(dcm.PatientName)
print(dcm.SOPInstanceUID)
# -
# ### Retrieve metadata of all instances in study
#
# This request retrieves the metadata for all instances within a single study.
#
# _Details:_
# * Path: ../studies/{study}/metadata
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# All three of the dcm files that we uploaded previously are part of the same study so the response should return the metadata for all 3 instances. Validate that the response has a status code of OK and that all the metadata is returned.
# +
url = f'{base_url}/studies/{study_uid}/metadata'
headers = {'Accept':'application/dicom+json'}
response = client.get(url, headers=headers) #, verify=False)
print(response)
response.json()
# -
# ### Retrieve all instances within a series
#
# This retrieves all instances within a single series.
#
# _Details:_
# * Path: ../studies/{study}/series/{series}
# * Method: GET
# * Headers:
# * `Accept: multipart/related; type="application/dicom"; transfer-syntax=*`
#
# This series has 2 instances (green-square and red-triangle), so the response should return both instances. Validate that the response has a status code of OK and that both instances are returned.
#
#
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}'
headers = {'Accept':'multipart/related; type="application/dicom"; transfer-syntax=*'}
response = client.get(url, headers=headers) #, verify=False)
response
# -
# ### Retrieve metadata of all instances in series
#
# This request retrieves the metadata for all instances within a single series.
#
# _Details:_
# * Path: ../studies/{study}/series/{series}/metadata
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# This series has 2 instances (green-square and red-triangle), so the response should return metatdata for both instances. Validate that the response has a status code of OK and that both instances metadata are returned.
#
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/metadata'
headers = {'Accept':'application/dicom+json'}
response = client.get(url, headers=headers) #, verify=False)
print(response)
response.json()
# -
# ### Retrieve a single instance within a series of a study
#
# This request retrieves a single instance.
#
# _Details:_
# * Path: ../studies/{study}/series{series}/instances/{instance}
# * Method: GET
# * Headers:
# * `Accept: application/dicom; transfer-syntax=*`
#
# This should only return the instance red-triangle. Validate that the response has a status code of OK and that the instance is returned.
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/instances/{instance_uid}'
headers = {'Accept':'application/dicom; transfer-syntax=*'}
response = client.get(url, headers=headers) #, verify=False)
response
# -
# ### Retrieve metadata of a single instance within a series of a study
#
# This request retrieves the metadata for a single instances within a single study and series.
#
# _Details:_
# * Path: ../studies/{study}/series{series}/instances/{instance}
# * Method: GET
# * Headers:
# * `Accept: application/dicom; transfer-syntax=*`
#
# This should only return the metatdata for the instance red-triangle. Validate that the response has a status code of OK and that the metadata is returned.
#
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/metadata'
headers = {'Accept':'application/dicom+json'}
response = client.get(url, headers=headers) #, verify=False)
print(response)
response.json()
# -
# ### Retrieve one or more frames from a single instance
#
# This request retrieves one or more frames from a single instance.
#
# _Details:_
# * Path: ../studies/{study}/series{series}/instances/{instance}/frames/1,2,3
# * Method: GET
# * Headers:
# * `Accept: multipart/related; type="application/octet-stream"; transfer-syntax=1.2.840.10008.1.2.1` (Default) or
# * `Accept: multipart/related; type="application/octet-stream"; transfer-syntax=*` or
# * `Accept: multipart/related; type="application/octet-stream";`
#
# This should return the only frame from the red-triangle. Validate that the response has a status code of OK and that the frame is returned.
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/1'
headers = {'Accept':'multipart/related; type="application/octet-stream"; transfer-syntax=*'}
response = client.get(url, headers=headers) #, verify=False)
response
# -
# ## Query DICOM (QIDO)
#
# In the following examples, we search for items using their unique identifiers. You can also search for other attributes, such as PatientName and the like.
#
# > NOTE: Please see the [Conformance Statement](../resources/conformance-statement.md#supported-search-parameters) file for supported DICOM attributes.
# ### Search for studies
#
# This request searches for one or more studies by DICOM attributes.
#
# _Details:_
# * Path: ../studies?StudyInstanceUID={study}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 study and that response code is OK.
# +
url = f'{base_url}/studies'
headers = {'Accept':'application/dicom+json'}
params = {'StudyInstanceUID':study_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ### Search for series
#
# This request searches for one or more series by DICOM attributes.
#
# _Details:_
# * Path: ../series?SeriesInstanceUID={series}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 series and that response code is OK.
# +
url = f'{base_url}/series'
headers = {'Accept':'application/dicom+json'}
params = {'SeriesInstanceUID':series_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ### Search for series within a study
#
# This request searches for one or more series within a single study by DICOM attributes.
#
# _Details:_
# * Path: ../studies/{study}/series?SeriesInstanceUID={series}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 series and that response code is OK.
#
# +
url = f'{base_url}/studies/{study_uid}/series'
headers = {'Accept':'application/dicom+json'}
params = {'SeriesInstanceUID':series_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ### Search for instances
#
# This request searches for one or more instances by DICOM attributes.
#
# _Details:_
# * Path: ../instances?SOPInstanceUID={instance}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 instance and that response code is OK.
# +
url = f'{base_url}/instances'
headers = {'Accept':'application/dicom+json'}
params = {'SOPInstanceUID':instance_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ### Search for instances within a study
#
# This request searches for one or more instances within a single study by DICOM attributes.
#
# _Details:_
# * Path: ../studies/{study}/instances?SOPInstanceUID={instance}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 instance and that response code is OK.
#
# +
url = f'{base_url}/studies/{study_uid}/instances'
headers = {'Accept':'application/dicom+json'}
params = {'SOPInstanceUID':instance_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ### Search for instances within a study and series
#
# This request searches for one or more instances within a single study and single series by DICOM attributes.
#
# _Details:_
# * Path: ../studies/{study}/series/{series}/instances?SOPInstanceUID={instance}
# * Method: GET
# * Headers:
# * `Accept: application/dicom+json`
#
# Validate that response includes 1 instance and that response code is OK.
#
# +
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/instances'
headers = {'Accept':'application/dicom+json'}
params = {'SOPInstanceUID':instance_uid}
response = client.get(url, headers=headers, params=params) #, verify=False)
response
# -
# ## Delete DICOM
#
# > NOTE: Delete is not part of the DICOM standard, but has been added for convenience.
#
# A 204 response code is returned when the deletion is successful. A 404 response code is returned if the item(s) have never existed or have already been deleted.
# ### Delete a specific instance within a study and series
#
# This request deletes a single instance within a single study and single series.
#
# _Details:_
# * Path: ../studies/{study}/series/{series}/instances/{instance}
# * Method: DELETE
# * Headers: No special headers needed
#
# This deletes the red-triangle instance from the server. If it is successful the response status code contains no content.
#headers = {'Accept':'anything/at+all'}
url = f'{base_url}/studies/{study_uid}/series/{series_uid}/instances/{instance_uid}'
response = client.delete(url)
response
# ### Delete a specific series within a study
#
# This request deletes a single series (and all child instances) within a single study.
#
# _Details:_
# * Path: ../studies/{study}/series/{series}
# * Method: DELETE
# * Headers: No special headers needed
#
#
# This deletes the green-square instance (it is the only element left in the series) from the server. If it is successful the response status code contains no content.
#headers = {'Accept':'anything/at+all'}
url = f'{base_url}/studies/{study_uid}/series/{series_uid}'
response = client.delete(url)
response
# ### Delete a specific study
#
# This request deletes a single study (and all child series and instances).
#
# _Details:_
# * Path: ../studies/{study}
# * Method: DELETE
# * Headers: No special headers needed
#
#headers = {'Accept':'anything/at+all'}
url = f'{base_url}/studies/{study_uid}'
response = client.delete(url)
response
| docs/resources/use-dicom-web-standard-apis-with-python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="13zUoDymWjoj"
# # Data Wrangling
# + [markdown] id="P-GOTpNaWjok"
# ## Contents:
#
# * String formatting (f-strings)
# * Regular expressions (regex)
# * Pandas (Reading/writing CSV)
# + [markdown] id="r3kPJNMIWjol"
# ## String formatting
# + [markdown] id="IlzCXlG5Wjol"
# Instead of writing a series of `print()` statements with multiple arguments, or concatenating (by `+`) strings, you can also use a Python string formatting method, called `f-strings`. More information can be read in PEP 498: https://www.python.org/dev/peps/pep-0498/
# + [markdown] id="2WFp8i4pWjom"
# You can define a string as a template by inserting `{ }` characters with a variable name or expression in between. For this to work, you have to type an `f` in front of the `'`, `"` or `"""` start of the string definition. When defined, the string will read with the string value of the variable or the expression filled in.
#
# ```python
# name = "Joe"
# text = f"My name is {name}."
# ```
#
# Again, if you need a `'` or `"` in your expression, use the other variant in the Python source code to declare the string. Writing:
#
# ```python
# f'This is my {example}.'
# ```
#
# is equivalent to:
#
# ```python
# f"This is my {example}."
# ```
# + id="ZsWlOdzCWjom"
name = "Joe"
text = f"My name is {name}."
print(text)
# + id="XtKqnCWRWjoo"
day = "Monday"
weather = "Sunny"
n_messages = 8
test_dict = {'test': 'test_value'}
text = f"""
Today is {day}.
The weather is {weather.lower()} and you have {n_messages} unread messages.
The first three letters of the weekday: {day[:3]}
An example expression is: {15 ** 2 = }
"""
text = f'Test by selecting key: {test_dict["test"]}'
print(text)
# + [markdown] id="rcz2ZzX8Wjoo"
# ---
# + [markdown] id="6iczszohWjoo"
# ## Regular expressions
# + [markdown] id="R0KFKUT9Wjoo"
# Using regular expressions can be very useful when working with texts. It is a powerful search mechanism by which you can search on patterns, instead of 'exact matches'. But, they can be difficult to grasp, at first sight.
#
# A **regular expression**, for instance, allows you to substitute all digits in a text, following another text sequence, or to find all urls, phone numbers, or email addresses. Or any text, that meets a particular condition.
#
# See the Python manual for the `re` module for more info: https://docs.python.org/3/library/re.html
#
# You can/should use a cheatsheet when writing a regular expression. A nice website to write and test them is: https://regex101.com/.
#
# Some examples of commonly used expressions:
#
# * `\d` for all digits 0-9
# * `\w` for any word character
# * `[abc]` for a set of characters (here: a, b, c)
# * `.` any character
# * `?` the preceding character/pattern 0 or 1 times
# * `*` the preceding character/pattern 0 or multiple times
# * `+` the preceding character/pattern 1 or multiple times
# * `{1,2}` 1 or 2 times
# * `^` the start of the string
# * `$` the end of the string
# * `|` or
# * `()` capture group (only return this part)
#
# In many text editors (e.g. VSCode) there is also an option to search (and replace) with the help of regular expressions.
# + [markdown] id="UHXRd47uWjop"
# Python has a regex module built in. When working with a regular expression, you have to import it first:
# + id="FySvVyNZWjop"
import re
# + [markdown] id="SVPG0dw8Wjop"
# You can use a regular expression for **finding** occurences in a text. Let's say we want to filter out all web urls in a text:
# + id="E4ETED8GWjoq"
text = """
There are various search engines on the web.
There is https://www.google.com/, but also https://www.bing.com/.
A more privacy friendly alternative is https://duckduckgo.com/.
And who remembers http://www.altavista.com/?
"""
re.findall(r'https?://.+?/', text)
# + id="nljkoZR6Wjoq"
# Copied from https://www.imdb.com/search/title/?groups=top_250&sort=user_rating
text = """
1. The Shawshank Redemption (1994)
12 | 142 min | Drama
9,3 Rate this 80 Metascore
Two imprisoned men bond over a number of years, finding solace and eventual redemption through acts of common decency.
Director: <NAME> | Stars: <NAME>, <NAME>, <NAME>, <NAME>
Votes: 2.355.643 | Gross: $28.34M
2. The Godfather (1972)
16 | 175 min | Crime, Drama
9,2 Rate this 100 Metascore
An organized crime dynasty's aging patriarch transfers control of his clandestine empire to his reluctant son.
Director: <NAME> | Stars: <NAME>, <NAME>, <NAME>, <NAME>
Votes: 1.630.157 | Gross: $134.97M
3. The Dark Knight (2008)
16 | 152 min | Action, Crime, Drama
9,0 Rate this 84 Metascore
When the menace known as the Joker wreaks havoc and chaos on the people of Gotham, Batman must accept one of the greatest psychological and physical tests of his ability to fight injustice.
Director: <NAME> | Stars: <NAME>, <NAME>, <NAME>, <NAME>
Votes: 2.315.134 | Gross: $534.86M
"""
titles = re.findall(r'\d{1,2}\. (.+)', text)
titles
# + [markdown] id="ojLPhJ-0Wjor"
# ### Quiz
# Try to get a list of all directors. And the gross income.
# + id="wvFlrH_BWjor"
# All directors
# + id="0-uZoKO2Wjos"
# Gross income
# + [markdown] id="-x_xCtNQWjos"
# Or, you can use a regular expression to **replace** a character sequence. This is an equivalent to the `.replace()` function, but allows more variance in the string matching.
# + id="zsNVQetBWjos"
text = """
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
"""
# Hint: test this with https://regex101.com/
new_text = re.sub(r"(?:(\w)\w+) (\w+)", r"\1. \2", text)
print(new_text)
# + [markdown] id="ZdYs70QQWjot"
# ---
# + [markdown] id="-gWLRatDWjot"
# # Data wrangling with Pandas
# + [markdown] id="0DIIgvmlWjot"
# ## CSV (in Pandas)
#
# The other often used file type is CSV (Comma Separated Values), or variants, such as TSV (Tab Separated Values). Python includes another built-in module to deal with these files: the `csv` module. But, we will be using the `Pandas` module, the go-to package for data analysis, that you already imported and updated in Notebook 0.
#
# A CSV file is similar to an Excel or Google Docs spreadsheet, but more limited in markup and functionality (e.g. you cannot store Excel functions). It is just a text file in which individual entries correspond to lines, and columns are separated by a comma. You can always open a CSV file with a text editor, and this also makes it so easy to store and share data with.
#
# For the rest of the notebook we will see how to work with the two main data types in `pandas`: the `DataFrame` and a `Series`.
#
# Information on functions and modules of Pandas cannot be found in the Python manual online, as it is an external package. Instead, you can refer to https://pandas.pydata.org/pandas-docs/stable/index.html .
# + [markdown] id="rfESw9J_Wjou"
# ### `DataFrame`
#
#
# What is a `pandas.DataFrame`?
#
# A `DataFrame` is a collection of `Series` having the same length and whose indexes are in sync. A *collection* means that each column of a dataframe is a series. You can also see it as a spreadheet in memory, that also allows for inclusion of Python objects.
# + [markdown] id="6OAUHYSaWjou"
# We first have to import the package. It's a convention to do this like so with Pandas, which makes the elements from this package (classes, functions, methods) available under its abbreviation `pd`:
# + id="N8fYArBwWjou"
import pandas as pd
# + [markdown] id="nkT3_k-KWjov"
# Next is loading the data. The following data comes from Wikipedia and was [automatically](https://query.wikidata.org/#%0ASELECT%20DISTINCT%20%3FmovieLabel%20%3Fimdb%20%28MIN%28%3FpublicationYear%29%20as%20%3Fyear%29%20%28year%28%3Fdate%29%20as%20%3Faward_year%29%20%28group_concat%28DISTINCT%20%3FdirectorLabel%3Bseparator%3D%22%2C%20%22%29%20as%20%3Fdirectors%20%29%20%28group_concat%28DISTINCT%20%3FcompanyLabel%3Bseparator%3D%22%2C%20%22%29%20as%20%3Fcompanies%29%20%3Fmale_cast%20%3Ffemale_cast%20WHERE%20%7B%0A%20%20%0A%20%20%7B%0A%20%20%3Fmovie%20p%3AP166%20%3Fawardstatement%20%3B%0A%20%20%20%20%20%20%20%20%20wdt%3AP345%20%3Fimdb%20%3B%0A%20%20%20%20%20%20%20%20%20wdt%3AP577%20%3Fpublication%20%3B%0A%20%20%20%20%20%20%20%20%20wdt%3AP57%20%3Fdirector%20%3B%0A%20%20%20%20%20%20%20%20%20wdt%3AP272%20%3Fcompany%20%3B%0A%20%20%20%20%20%20%20%20%20wdt%3AP31%20wd%3AQ11424%20.%0A%20%20%0A%20%20%3Fawardstatement%20ps%3AP166%20wd%3AQ102427%20%3B%20%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20pq%3AP585%20%3Fdate%20.%0A%20%20%7D%0A%20%20%0A%20%20BIND%28year%28%3Fpublication%29%20as%20%3FpublicationYear%29%0A%20%20%0A%20%20%7B%0A%20%20%20%20%20SELECT%20%3Fmovie%20%28COUNT%28%3Fcast_member%29%20AS%20%3Fmale_cast%29%20WHERE%20%7B%0A%20%20%20%20%20%20%3Fmovie%20wdt%3AP161%20%3Fcast_member%20.%0A%20%20%20%20%20%20%3Fcast_member%20wdt%3AP21%20wd%3AQ6581097%20.%0A%20%20%20%20%7D%20GROUP%20BY%20%3Fmovie%0A%7D%20%7B%0A%20%20%20%20SELECT%20%3Fmovie%20%28COUNT%28%3Fcast_member%29%20AS%20%3Ffemale_cast%29%20WHERE%20%7B%0A%20%20%20%20%20%20%3Fmovie%20wdt%3AP161%20%3Fcast_member%20.%0A%20%20%20%20%20%20%3Fcast_member%20wdt%3AP21%20wd%3AQ6581072%20.%0A%20%20%20%20%7D%20GROUP%20BY%20%3Fmovie%0A%20%20%7D%0A%20%20%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20%0A%20%20%20%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20.%0A%20%20%20%20%3Fmovie%20rdfs%3Alabel%20%3FmovieLabel%20.%0A%20%20%20%20%3Fdirector%20rdfs%3Alabel%20%3FdirectorLabel%20.%0A%20%20%20%20%3Fcompany%20rdfs%3Alabel%20%3FcompanyLabel%20.%20%0A%20%20%7D%0A%7D%20%0A%0AGROUP%20BY%20%3FmovieLabel%20%3Fimdb%20%3Fdate%20%3Fmale_cast%20%3Ffemale_cast%0AORDER%20BY%20%3Fyear%20) retreived. It is an overview of all movies that have won an Academy Award for Best Picture, including some extra data for the movie: a link to the IMDB, the publication and award year, the director(s), production company and the number of male and female actors in the cast. It can be that this data is incorrect, because this information is not entered in Wikipedia.
#
# You can find this file in `data/academyawards.csv`. Download it from the repository and save it in the data folder if you don't have it.
# + [markdown] id="TYq-OoIiWjov"
# Reading in a csv with pandas is easy. We call the `pd.read_csv()` function with the file path as argument. Pandas takes care of opening and closing the file, so a `with` statement is not needed. The contents of the csv file are then read in a Pandas DataFrame object. We can store this in the variable `df`.
#
# Calling this variable in a Jypyter Notebook gives back a nicely formatted table with the first and last 5 rows of the file.
# + id="UmPotvtEWjov"
df = pd.read_csv('data/academyawards.csv', encoding='utf-8')
df
# + [markdown] id="-HYWyDLjWjov"
# Think of a `DataFrame` as an in-memory spreadsheet that you can analyse and manipulate programmatically. Or, think of it as a table in which every line is a data entry, and every column holds specific information on this data.
#
# These columns can also be seen as lists of values. They are ordered and the index of an element corresponds with the index of the data entry. The collection of all such columns is what makes the DataFrame. One column in a table is represented by a Pandas `Series`, which collects observations about a given variable. Multiple columns are a `DataFrame`. A DataFrame therefore is a collection of lists (=columns), or `Series`.
#
# If you look for other methods on `pd` you can call, you'll also see that there is an `pd.read_excel()` option to read spreadsheets in `.xls` or `.xlsx`. You can also use this, if you have these kind of files.
# + [markdown] id="VYFYCw1IWjow"
# ### Statistics
# Now that we loaded our DataFrame, we can make pandas print some statistics on the file.
# + id="3L5rU-RMWjow"
df.head(1) # First 5 rows
# + id="rGg7zsP9Wjow"
df.tail() # Last 5 rows
# + id="8DuLyUR9Wjow"
df.describe() # Descriptive statistics
# + [markdown] id="n-1bGEcMWjox"
# As you can see by what they return, these methods return another DataFrame with some descriptive statistics on the file, such as the number of entries (count), the mean of the numerical values, the standard deviation, minimum and maximum values, and the 25th, 50th, and 75th percentiles.
# + [markdown] id="8ZYj1kD7Wjox"
# The `.info()` method can also be informative. It gives you information about a dataframe:
# - how much space does it take in memory?
# - what is the datatype of each column?
# - how many records are there?
# - how many `null` values does each column contain (!)?
# + id="4uRZBDUSWjox"
df.info()
# + [markdown] id="b4KMJ_iRWjox"
# Pandas automatically interprets which datatypes are used in the file, but this is not always correct. Especially if you have empty fields in the DataFrame, any other integers get interpreted as float. Every column has one datatype. You can check them separately by requesting the `.dtypes` argument on the `df`.
#
# The 'object' type is a string in this file, 'int64' is an integer.
# + id="zhqOi1OWWjoy"
df.dtypes
# + [markdown] id="-efRk_jcWjoy"
# We expect different datatypes for the description-dataframe:
# + id="saBMJ_S4Wjoy"
description_df = df.describe()
description_df.dtypes
# + [markdown] id="7Sxekrz5Wjoy"
# ### Slicing and selecting
# + [markdown] id="EdvQxN2rWjoy"
# #### `df['column1']`
# You can select a single column by calling this column name as if the DataFrame was a dictionary. A single column from a DataFrame returns a `Series` object.
# + id="sjl04WKLWjoy"
df
# + id="gdJ1qooIWjoz"
print(type(df['movie']))
df['movie']
# + [markdown] id="86VDt-g1Wjoz"
# The `Series` object is very similar to a `list`:
# + id="PjmysaDdWjoz"
movies = df['movie']
print("Length:", len(movies))
print()
for n, movie in enumerate(movies[:10], 1):
print(n, movie, sep='\t')
# + [markdown] id="TSSri8hWWjoz"
# #### `df[['column1', 'column2']]`
# We can also slice a DataFrame by calling multiple column names as one list:
# + id="eX05cSQ_Wjoz"
df[['movie', 'imdb']]
# + [markdown] id="hT-DeTVZWjo0"
# ### Looping over DataFrames
# + [markdown] id="zgq83jp3fE1W"
# You might expect that if you loop through a DataFrame, you get all the rows. Sadly, it is not that simple, because we now have data in two dimensions. We instead get all the column names (or the first row of the dataframe):
# + id="zd_rZaWbekwm"
for r in df:
print(r)
# + [markdown] id="fGKJuYDTWjo0"
# #### `zip(df['column1', df['column2')`
# Going over these items in a `for` loop needs a different approach. The built-in `zip()` function ([manual](https://docs.python.org/3/library/functions.html#zip)) takes two iterables of even length and creates a new iterable of tuples. The number of arguments/iterables that you give to `zip()` determines the length of the tuples.
# + id="9NdOQEuXWjo0"
list1 = ['a', 'b', 'c']
list2 = [1, 2, 3]
list(zip(list1, list2))
# + id="QA3u6-_oWjo0"
n = 0
for movie, imdb in zip(df['movie'], df['imdb']):
if n > 9:
break # stop flooding the Notebook
print(movie, "http://www.imdb.com/title/" + imdb, sep='\t')
n += 1
# + [markdown] id="25eEafp-Wjo0"
# #### `.to_dict(orient='record')`
# Or, accessing all entries in a convenient way, as a python dictionary for instance, can be done with the `.to_dict(orient='records')` method:
# + id="T6dq7r6wWjo0"
df.head(1)
# + id="oaeltbGNWjo0"
for r in df.to_dict(orient='records'):
print(r)
print()
name = r['movie']
year = r['year']
won = r['award_year']
print("The movie " + name + " was produced in " + str(year) + " and won in " + str(won) + ".")
print()
break # To not flood the notebook, only print the first
# + [markdown] id="0NieFABdWjo1"
# #### `.iterrows()`
# Or you can use the `.iterrows()` method, which gives you tuples of the index of the row, and the row itself as `Series` object:
# + id="r0hAAQxIWjo1"
for n, r in df.iterrows():
name = r.movie # You can use a dot notation here
year = r.year
won = r.award_year
print(f"The movie {name} was produced in {year} and won in {won}.")
print()
break # To not flood the notebook, only print the first
# + id="8yStX5fpWjo1"
# + [markdown] id="uABpa0I8Wjo1"
# ---
# + [markdown] id="X1e4rchVWjo1"
# ### Analysis
# + id="cuUKTCx7Wjo1"
df
# + id="8FKHCM7OWjo1"
df.mean()
# + [markdown] id="1a0kPAJ7Wjo2"
# You already saw above that you could get statistics by calling `.describe()` on a DataFrame. You can also get these metrics for individual columns. Let's ask the maximum number of male and female actors in the cast of a movie:
# + id="NRjrTge4Wjo2"
df['female_cast'].max()
# + id="fzcZp6p7Wjo2"
df['male_cast'].max()
# + [markdown] id="ouz_x1srWjo2"
# You can also apply these operations to multiple columns at once. You get a `Series` object back.
# + id="HDttAFhqWjo2"
df.max()
# + id="T0A80PIaWjo2"
df[['male_cast', 'female_cast']]
# + id="DUhUsvZgWjo2"
slice_df = df[['male_cast', 'female_cast']]
slice_df.max()
# + [markdown] id="V78YM6E_Wjo2"
# To find the corresponding movie title, we can ask Pandas to give us the record in which these maxima occur. This is done through `df.loc`. This works by asking: "Give me all the locations (=rows) for which a value in a specified column is equal to this value".
# + id="0VQhT1pSWjo3"
df
# + id="rkpB8cUnWjo3"
df[['male_cast', 'female_cast']].max()
# + id="CMg5pvGXWjo3"
df[df['female_cast'] > 10]
# + id="yD04bSNMWjo3"
for column_name, value in df[['male_cast', 'female_cast']].max().items():
print("Movie with maximum for", column_name, value)
row = df.loc[df[column_name] == value]
print(row.movie)
print()
# + [markdown] id="_AXzYkRKWjo3"
# Other functions that can be used are for instance `.mean()`, `.median()`, `.std()` and `.sum()`.
# + id="QUkVzbSxWjo3"
df['female_cast'].mean()
# + id="L9CJ1tH-Wjo3"
df['male_cast'].mean()
# + id="Q_zuwCwOWjo4"
df['female_cast'].sum()
# + id="QUpm-WCSWjo4"
df['male_cast'].sum()
# + id="VMzPbSj3Wjo4"
df
# + [markdown] id="lQvUj6q-Wjo4"
# Pandas also understands dates, but you have to tell it to interpret a column as such. We can change the `year` column in-place so that it is not interpreted as integer, but as a date object.
#
# In this case, since we only have the year available, and not a full date such as `2021-02-22` (YYYY-mm-dd), we have to specify the format. Typing `%Y` as string is shorthand for `YYYY`. It returns a full date, so every month and day are set to January first.
# + id="-WRqzRSHWjo4"
df['year'] = pd.to_datetime(df['year'], format='%Y')
df['award_year'] = pd.to_datetime(df['award_year'], format='%Y')
df['year']
# + id="kItgHCU7Wjo5"
df
# + [markdown] id="hXWb2goUWjo5"
# ### Plotting
#
# Let's try to make some graphs from our data, for instance the number of male/female actors over time.
#
# We now have a year column that is interpreted as time by Pandas. These values can figure as values on a x-axis in a graph. The y-axis would then give info on the number of male and female actors in the movie.
# + [markdown] id="x0j1CQZOWjo5"
# First, we set an **index** for the DataFrame. This determines how the data can be accessed. Normally, this is a range of 0 untill the number of rows. But, you can change this, so that we can analyse the dataframe on a time index.
# + id="CX6uGkiKWjo5"
# Select only what we need
df_actors = df[['award_year', 'male_cast', 'female_cast']]
df_actors
# + id="svmSERhbWjo5"
df_actors = df_actors.set_index('award_year')
df_actors
# + [markdown] id="NUEZBlGVWjo5"
# Then simply call `.plot()` on your newly created DataFrame!
# + id="RRotJMOQWjo5"
df_actors.plot(figsize=(15,10))
# + [markdown] id="oP7plsOsWjo6"
# There are tons of parameters, functions, methods, transformations you can use on DataFrames and also on this plotting function. Luckily, plenty of guides and examples can be found on the internet.
# + [markdown] id="nR5U4KjaWjo6"
# ### Grouping
# + id="zLuRmVx1Wjo6"
df
# + [markdown] id="EygkWZGPWjo6"
# Some directors have won multiple Oscars. To find out which, we have to count the number of rows in the DataFrame that include the same director. There is a Pandas function for this: `.count()`. Calling this on the DataFrame itself would give us the total number of rows only, per column. Therefore, we have to tell Pandas that we want to group by a particular column, say 'directors'.
# + id="hH0vYKNiWjo6"
df.groupby('directors')
# + [markdown] id="4Dz-4n6gWjo6"
# It does not give back something nicely formatted or interpretable. It's just another Python object. The object returned by `groupby` is a `DataFrameGroupBy` **not** a normal `DataFrame`.
#
# However, some methods of the latter work also on the former, e.g. `.head()` and `.tail()`. Let's call the `.count()` on this object:
# + id="cI99agV2Wjo6"
df.groupby('directors').count()
# + [markdown] id="qJkhj3qEWjo6"
# Remember that this counts the numer of rows. As we know that each row is one movie, we can trim this down to:
# + id="nUtawB2sWjo6"
director_counts = df.groupby('directors').count()['movie']
director_counts
# + [markdown] id="KjZpua8TWjo7"
# Now, get all directors that have won an Oscar more than once by specifying a conditional operator:
# + id="SXHox2feWjo7"
director_counts[director_counts > 1]
# + id="HlxIajX3Wjo7"
list(director_counts.items())
# + id="QPhqAJSsWjo7"
for i, value in director_counts.items():
print(i, value)
# + [markdown] id="Wrkro7EtWjo7"
# ### Adding a column
# + [markdown] id="PVihLONeWjo8"
# If we want to get the total number of actors per movie, we have to sum the values from the `male_cast` and `female_cast` columns.
#
# You can do this in a for loop, by going over every row (like we saw above), but you can also sum the individual columns. Pandas will then add up the values with the same index and will return a new Series of the same length with the values summed.
# + id="7CGYHLtdWjo8"
df
# + id="kgdYTcMkWjo8"
df['male_cast'] + df['female_cast']
# + id="iZu8HM80Wjo8"
total_cast = df['male_cast'] + df['female_cast']
total_cast
# + [markdown] id="LHBoYnnJWjo8"
# Then, we add it as a column in our original dataframe. The only requirement for adding a column to a DataFrame is that the length of the Series or list is the same as that of the DataFrame.
# + id="6to7wyDKWjo8"
df['total_cast'] = total_cast
df
# + [markdown] id="vH981_n7Wjo9"
# Optionally, we can sort the DataFrame by column. For instance, from high to low (`ascending=False`) for the newly created `total_cast` column.
# + id="3hS0W5q5Wjo9"
df_sorted = df.sort_values('total_cast', ascending=False)
df_sorted
# + [markdown] id="NBkSgHX4Wjo9"
# ### Saving back the file
# + [markdown] id="zhuYIfQOWjo9"
# Use one of the `.to_csv()` or `.to_excel` functions to save the DataFrame. Again, no `with` statement needed, just a file path (and an encoding).
# + id="XhUVyeZnWjo9"
df_sorted.to_csv('stuff/academyawards_sum.csv', encoding='utf-8')
# + id="IAXnSFHkWjo9"
df_sorted.to_excel('stuff/academyawards_sum.xlsx')
# + [markdown] id="FP13dqU7Wjo-"
# You need to specify `index=False` if you want to prevent a standard index (0,1,2,3...) to be saved in the file as well.
# + id="-eZWRlh7Wjo-"
df_sorted.to_csv('stuff/academyawards_sum.csv', encoding='utf-8', index=False)
# + [markdown] id="IR914S1_Wjo-"
# Open the contents in Excel, LibreOffice Calc, or another program to read spreadsheets!
# + [markdown] id="QoyJmpb3Wjo-"
# ---
# + [markdown] id="rOUj79AmWjo-"
# # Data wrangling (example)
#
# We can take a look at another example. We consider a dataset of tweets from <NAME>, SpaceX and Tesla founder, and ask the following questions:
# * When is Elon most actively tweeting?
#
# While this question is a bit trivial, it will allow us to learn how to wrangle data.
# + id="jnR8cCCuWjo-"
import pandas as pd
# + [markdown] id="anjFRsrJWjo-"
# ### Load dataset
# + [markdown] id="AEkE4d1OWjo_"
# Let's read in a CSV file containing an export of [<NAME>'s tweets](https://twitter.com/elonmusk), exported from Twitter's API.
# + id="zRnIjOf3Wjo_"
dataset_path = 'data/elonmusk_tweets.csv'
df = pd.read_csv(dataset_path, encoding='utf-8')
# + id="yFVsux4FWjo_"
df
# + id="fqXEcft-WjpA"
df.info()
# + [markdown] id="phSczVrXWjpA"
# Let's give this dataset a bit more structure:
# - The `id` column can be transformed into the dataframe's index, thus enabling us e.g. to select a tweet by id;
# - The column `created_at` contains a timestamp, thus it can easily be converted into a `datetime` value
# + id="EQgtlrIVWjpA"
df.set_index('id', drop=True, inplace=True)
# + id="ItrFglkNWjpB"
df
# + id="oUmUplYpWjpB"
df.created_at = pd.to_datetime(df.created_at)
# + id="DjlGsSCKWjpB"
df.info()
# + id="0DATHvnUWjpB"
df
# + [markdown] id="95sY7ZPfWjpB"
# ---
# + [markdown] id="fmw2eKvzWjpB"
# ### Selection
# + [markdown] id="8UfnizmJWjpC"
# #### Renaming columns
# + [markdown] id="EZBwFCwuWjpC"
# An operation on dataframes that you'll find yourself doing very often is to rename the columns. The first way of renaming columns is by manipulating directly the dataframe's index via the `columns` property.
# + id="WNyjLCyWWjpC"
df.columns
# + [markdown] id="difr2WuLWjpC"
# We can change the column names by assigning to `columns` a list having as values the new column names.
#
# **NB**: the size of the list and new number of colums must match!
# + id="U9rk9GMAWjpC"
# here we renamed the column `text` => `tweet`
df.columns = ['created_at', 'tweet']
# + id="0N8Tp9lVWjpC"
# let's check that the change did take place
df.head()
# + [markdown] id="bvo42ui6WjpC"
# The second way of renaming colums is to use the method `rename()` of a dataframe. The `columns` parameter takes a dictionary of mappings between old and new column names.
#
# ```python
# mapping_dict = {
# "old_column_name": "new_column_name"
# }
# ```
# + id="uWwroMmuWjpD"
# let's change column `tweet` => `text`
df = df.rename(columns={"tweet": "text"})
# + id="V9du3B6LWjpD"
df.head()
# + [markdown] id="kX1Clp-LWjpD"
# **Question**: in which cases is it more convenient to use the second method over the first?
# + [markdown] id="SSyzgI-KWjpD"
# #### Selecting columns
# + id="xgADy6B_WjpD"
# this selects one single column and returns as a Series
df["created_at"].head()
# + id="2Ts8-demWjpD"
type(df["created_at"])
# + id="HA8veWZYWjpD"
# whereas this syntax selects one single column
# but returns a Dataframe
df[["created_at"]].head()
# + id="VPanFWGVWjpE"
type(df[["created_at"]])
# + [markdown] id="5JVkvqrMWjpF"
# #### Selecting rows
#
# Filtering rows in `pandas` is done by means of `[ ]`, which can contain the row number as well as a condition for the selection.
# + id="kHmk-0MNWjpF"
df[0:2]
# + [markdown] id="y5WF65O6WjpF"
# ### Transformation
#
#
# The two main functions used to manipulate and transform values in a dataframe are:
# - `.map()` (on Series only!)
# - `.apply()`
#
# In this section we'll be using both to enrich our datasets with useful information (useful for exploration, for later visualizations, etc.).
# + [markdown] id="S0vPCd_IWjpF"
# #### Add link to original tweet
# + [markdown] id="BOeYNz3uWjpF"
# The `map()` method can be called on a column, as well as on the dataframe's index.
#
# When passed as a parameter to `map`, an 'anonymous' lambda function `lambda` can be used to transform any value from that column into another one.
# + id="WUNAxlZ3WjpF"
df['tweet_link'] = df.index.map(lambda x: f'https://twitter.com/i/web/status/{x}')
# + [markdown] id="DMWT4GB3WjpF"
# Or, maybe it is easier with a list comprehension:
# + id="dFZtLGXPWjpF"
df['tweet_link'] = [f'https://twitter.com/i/web/status/{x}' for x in df.index]
# + id="qzr3BfiZWjpG"
df
# + [markdown] id="4DGRRkkGWjpG"
# #### Add colums with mentions
# + id="Cn4Hwl_dWjpG"
import re
def find_mentions(tweet_text):
"""
Find all @ mentions in a tweet and
return them as a list.
"""
regex = r'@[a-zA-Z0-9_]{1,15}'
mentions = re.findall(regex, tweet_text)
return mentions
# + id="i7zFdmncWjpG"
df['tweet_mentions'] = df.text.apply(find_mentions)
# + id="6S0dTdeCWjpG"
df['n_mentions'] = df.tweet_mentions.apply(len)
# + id="1n8B7SbfWjpG"
df.head()
# + [markdown] id="SxVP2smVWjpG"
# #### Add column with week day and hour
# + id="YkzyAnI7WjpH"
def day_of_week(t):
"""
Get the week day name from a week day integer.
"""
if t == 0:
return "Monday"
elif t == 1:
return "Tuesday"
elif t == 2:
return "Wednesday"
elif t == 3:
return "Thursday"
elif t == 4:
return "Friday"
elif t == 5:
return "Saturday"
elif t == 6:
return "Sunday"
# + id="sWoQi-cTWjpH"
df["week_day"] = df.created_at.dt.weekday
# + id="zuaqzrMMWjpH"
df["week_day_name"] = df["week_day"].apply(day_of_week)
# + [markdown] id="69U8rYV9WjpH"
# Or, there is a built-in function in Pandas that gives back the day name:
# + id="atR-yXkJWjpH"
df["week_day_name"] = df.created_at.dt.day_name()
# + id="wEpNKfF_WjpH"
df.head(3)
# + [markdown] id="dNyL9_17WjpH"
# #### Add column with day hour
# + id="9-5fndLAWjpI"
# df.created_at.dt?
# + id="du8-hTt5WjpI"
df.created_at.dt.hour.head()
# + id="SEa9CxUZWjpI"
df["day_hour"] = df.created_at.dt.hour
# + id="_d43pcy7WjpI"
display_cols = ['created_at', 'week_day', 'day_hour']
df[display_cols].head(4)
# + [markdown] id="TkyG8sbsWjpI"
# ##### Multiple conditions
# + id="tVNBk98-WjpJ"
# AND condition with `&`
df[
(df.week_day_name == 'Saturday') & (df.n_mentions == 0)
].shape
# + id="P0zJi5oEWjpJ"
# Equivalent expression with `query()`
df.query("week_day_name == 'Saturday' and n_mentions == 0").shape
# + id="CTO1hlNhWjpJ"
# OR condition with `|`
df[
(df.week_day_name == 'Saturday') | (df.n_mentions == 0)
].shape
# + [markdown] id="kGqH23K5WjpJ"
# ### Aggregation
# + id="2JoiqWqnWjpJ"
df.agg({'n_mentions': ['min', 'max', 'sum']})
# + [markdown] id="247FXVzhWjpJ"
# #### Grouping
# + id="qf8jfF1mWjpJ"
group_by_day = df.groupby('week_day')
# + id="sVDAv-D5WjpK"
# The head of a DataFrameGroupBy consists of the first
# n records for each group (see `help(grp_by_day.head)`)
group_by_day.head(1)
# + [markdown] id="tWNCOjXtWjpK"
# `agg` is used to pass an aggregation function to be applied to each group resulting from `groupby`.
#
# Here we are interested in how many tweets there are for each group, so we pass `len()` to an 'aggregate'. This is similar to the `.count()` method.
# + id="P1ATt8AiWjpK"
group_by_day.agg(len)
# + [markdown] id="wOiN3zPuWjpK"
# However, we are not interested in having the count for all columns. Rather we want to create a new dataframe with renamed column names.
# + id="t3avzNHOWjpK"
group_by_day.agg({'text': len}).rename({'text': 'tweet_count'}, axis='columns')
# + [markdown] id="VGArxHedWjpK"
# ##### By label (column)
# + [markdown] id="HHa_A2Q2WjpK"
# Previously we've added a column indicating on which day of the week a given tweet appeared.
# + id="8jZ__JQSWjpL"
groupby_result_as_series = df.groupby('day_hour')['text'].count()
# + id="BHogxsKsWjpL"
groupby_result_as_series
# + id="PurAA8tWWjpL"
groupby_result_as_df = df.groupby('day_hour')[['text']]\
.count()\
.rename({'text': 'count'}, axis='columns')
# + id="hHmsRRGrWjpL"
groupby_result_as_df.head()
# + [markdown] id="UUiyZfLJWjpL"
# ##### By series or dict
# + id="nl9sj2AHWjpL"
# df.groupby?
# + id="6qh4CJlxWjpL"
# here we pass the groups as a series
df.groupby(df.created_at.dt.day).agg({'text':len}).head()
# + id="_A4snuKTWjpM"
# here we pass the groups as a series
df.groupby(df.created_at.dt.day)[['text']].count().head()
# + id="-0znvZrHWjpM"
df.groupby(df.created_at.dt.hour)[['text']].count().head()
# + [markdown] id="AAeD5HadWjpM"
# ##### By multiple labels (columns)
# + id="fT3UrwA3WjpM"
# Here we group based on the values of two columns
# instead of one
x = df.groupby(['week_day', 'day_hour'])[['text']].count()
# + id="KpHvHWgWWjpM"
x.head()
# + [markdown] id="z5zgvSBMWjpN"
# #### Aggregation methods
#
# **Summary**:
#
# - `count`: Number of non-NA values
# - `sum`: Sum of non-NA values
# - `mean`: Mean of non-NA values
# - `median`: Arithmetic median of non-NA values
# - `std`, `var`: standard deviation and variance
# - `min`, `max`: Minimum and maximum of non-NA values
# + [markdown] id="McaM09oBWjpN"
# You can also use these in an aggregation functions within a groupby:
# + id="Av4JTDaGWjpN"
df.groupby('week_day').agg(
{
# each key in this dict specifies
# a given column
'n_mentions':[
# the list contains aggregation functions
# to be applied to this column
'count',
'mean',
'min',
'max',
'std',
'var'
]
}
)
# + [markdown] id="S03JNmiMWjpN"
# #### Sorting
# + [markdown] id="9DS3Y1I_WjpN"
# To sort the values of a dataframe we use its `sort_values` method:
# - `by`: specifies the name of the column to be used for sorting
# - `ascending` (default = `True`): specifies whether the sorting should be *ascending* (A-Z, 0-9) or `descending` (Z-A, 9-0)
# + id="W2xx3qjhWjpN"
df.sort_values(by='created_at', ascending=True).head()
# + id="Kv9H2D6bWjpO"
df.sort_values(by='n_mentions', ascending=False).head()
# + [markdown] id="kF2g6hKRWjpO"
# ### Save
#
# Before continuing with the plotting, let's save our enhanced dataframe, so that we can come back to it without having to redo the same manipulations on it.
#
# `pandas` provides a number of handy functions to export dataframes in a variety of formats.
# + [markdown] id="CAXx_eFQWjpO"
# Here we use `.to_pickle()` to serialize the dataframe into a binary format, by using behind the scenes Python's `pickle` library.
# + id="DJcFasz0WjpO"
df.to_pickle("stuff/musk_tweets_enhanced.pickle")
# + [markdown] id="XP15yKiGWjpO"
# ## Part 2
# + id="InFDIq1aWjpO"
df = pd.read_pickle("stuff/musk_tweets_enhanced.pickle")
# + [markdown] id="_NV235sBWjpO"
# ### `describe()`
# + [markdown] id="W8GCeXlrWjpO"
# The default behavior is to include only column with numerical values
# + id="do_rgIRFWjpP"
df.describe()
# + [markdown] id="imeVXNXEWjpP"
# A trick to include more values is to exclude the datatype on which it breaks, which in our case is `list`.
# + id="lWJ-qEuuWjpP"
df.describe(exclude=[list])
# + id="HVmtrWgAWjpP"
df.created_at.describe(datetime_is_numeric=True)
# + id="kf56VOrmWjpP"
df['week_day_name'] = df['week_day_name'].astype('category')
# + id="q9C-ZeKYWjpP"
df.describe(exclude=['object'])
# + [markdown] id="qtuFoYDKWjpP"
# ### Plotting
# + id="Ept3KEdqWjpP"
# Not needed in newest Pandas version
# %matplotlib inline
import matplotlib.pyplot as plt
# + [markdown] id="a6a3V4JnWjpQ"
# #### Histograms
#
# They are useful to see the distribution of a certain variable in your dataset.
# + id="WSIvsnS2WjpQ"
df.groupby(['n_mentions'])[['text']].count()
# + id="jf8tZ8zeWjpQ"
plt.figure(figsize=(10, 6))
plt.hist(df.n_mentions, bins='auto', rwidth=1.0)
plt.title('Distribution of the number of mentions per tweet')
plt.ylabel("Tweets")
plt.xlabel("Mentions (per tweet)")
plt.show()
# + id="Mw3FT2I1WjpQ"
plt.figure(figsize=(10, 6))
plt.hist(df.day_hour, bins='auto', rwidth=0.6)
plt.title('Distribution of the number of mentions per tweet')
plt.ylabel("Tweets")
plt.xlabel("Hour of the day")
plt.show()
# + id="1nhigOF3WjpQ"
df_2017 = df[df.created_at.dt.year == 2017]
# + id="DL5IrIngWjpQ"
plt.figure(figsize=(10, 6))
plt.hist(df_2017.day_hour, bins='auto', rwidth=0.6)
plt.title('Year 2017')
plt.ylabel("Tweets")
plt.xlabel("Hour of the day")
plt.show()
# + [markdown] id="lY2JtrzhWjpQ"
# So far we have used directly `matplotlib` to generate our plots.
#
# `pandas`'s dataframes provide some methods that directly call `matplotlib`'s API behind the scenes:
# - `hist()` for histograms
# - `boxplot()` for boxplots
# - `plot()` for other types of plots (specified with e.g. `any='scatter'`)
# + [markdown] id="p_ctcaYrWjpQ"
# By passing the `by` parameter to e.g. `hist()` it is possible to produce one histogram plot of a given variable for each value in another column.
# + [markdown] id="h35FvUBrWjpR"
# Let's see how we can plot the number of mentions by year:
# + id="fwDZHZOyWjpR"
df['year'] = df.created_at.dt.year
# + id="qabk8SHKWjpR"
axes = df.hist(column='day_hour', by='year', figsize=(10,10))
# + [markdown] id="KxZG0diqWjpR"
# #### Bar charts
#
# They are useful to plot categorical data.
# + id="nazqFakwWjpR"
# plt.bar?
# + id="nROj0KQGWjpR"
tweets_by_weekday = df.groupby(df.created_at.dt.weekday)[['text']].count()
# + id="pHrTiv6GWjpR"
week_days = [
"Mon",
"Tue",
"Wed",
"Thur",
"Fri",
"Sat",
"Sun"
]
# + id="u16bx22pWjpS"
plt.figure(figsize=(8, 6))
# specify the type of plot and the labels
# for the y axis (the bars)
plt.bar(
tweets_by_weekday.index,
tweets_by_weekday.text,
tick_label=week_days,
width=0.5
)
# give a title to the plot
plt.title('<NAME>\'s week on Twitter')
# give a label to the axes
plt.ylabel("Number of tweets")
plt.xlabel("Week day")
plt.show()
# + [markdown] id="or-3iZHkWjpS"
# #### Box plots
#
# 
# + [markdown] id="8diAjB28WjpS"
# ### Outliers, missing values
#
# An *outlier* is an observation far from the center of mass of the distribution. It might be an error or a genuine observation: this distinction requires domain knowledge. Outliers infuence the outcomes of several statistics and machine learning methods: it is important to decide how to deal with them.
#
# A *missing value* is an observation without a value. There can be many reasons for a missing value: the value might not exist (hence its absence is informative and it should be left empty) or might not be known (hence the value is existing but missing in the dataset and it should be marked as NA).
#
# *One way to think about the difference is with this Zen-like koan: An explicit missing value is the presence of an absence; an implicit missing value is the absence of a presence.*
# + id="fqQ9M64yWjpS"
tweets_by_weekday
# + id="MxF7BC66WjpS"
tweets_by_weekday.describe()
# + id="TMk9HWgKWjpS"
tweets_by_weekday.boxplot()
# + id="mQynlW_qWjpS"
# plt.bar?
# + id="pWog_GvdWjpS"
df.head(3)
# + id="qi9qNbwYWjpS"
df[['day_hour']].describe()
# + id="Abyiaf0RWjpS"
df[['day_hour']].quantile(.25)
# + id="gWT70MgBWjpT"
# df.boxplot?
# + id="N4DqfJyuWjpT"
df[['day_hour', 'week_day_name']].boxplot(
by='week_day_name',
grid=False,
figsize=(8,6),
fontsize=10
)
# give a title to the plot
plt.title('')
# give a label to the axes
plt.xlabel("Day of the week")
plt.show()
# + id="OljB9txHWjpT"
df[['day_hour', 'week_day']].boxplot(
by='week_day',
grid=True, # just to show the difference with/without
figsize=(8,6),
fontsize=10
)
# give a title to the plot
plt.title('')
# give a label to the axes
plt.xlabel("Day of the week")
plt.show()
# + [markdown] id="sybT6sNaWjpT"
# ### Exercise 1.
#
# * Create a function that calculates the frequency of hashtags in tweets.
# * Test it on toy examples, to make sure it works.
# * Apply it to Elon Musk's tweets.
# * List the top 10 hashtags in the dataset.
# + id="mkfqkxeSWjpT"
# Your code here.
# + [markdown] id="-vE_Pm5rWjpT"
# ### Exercise 2.
#
# Read the file `data/adams-hhgttg.txt` and:
#
# - Count the number of occurrences per distinct word in the text.
#
# - Create a data frame with two columns: word and counts.
#
# - Plot the histogram of the word frequencies and think about what is happening.
# + id="LuDfR5K4WjpT"
# Your code here.
# + [markdown] id="XOOk4v2xWjpT"
# ---
| notebooks/4_PandasDataWrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Convert plotTS() Function to Use netCDF4 Parameters ##
import sys
# !{sys.executable} -m pip install netCDF4
# !{sys.executable} -m pip install xarray
import opedia
import netCDF4
import os
import json
import xarray as xr
from datetime import datetime
from netCDF4 import num2date, date2num
import numpy as np
import pandas as pd
import db
import common as com
import timeSeries as TS
from datetime import datetime, timedelta
import time
from math import pi
from bokeh.io import output_notebook
from bokeh.plotting import figure, show
from bokeh.layouts import column
from bokeh.models import DatetimeTickFormatter
from bokeh.palettes import all_palettes
from bokeh.models import HoverTool
from bokeh.embed import components
import jupyterInline as jup
if jup.jupytered():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
# ### Original Function ###
def plotTS(tables, variables, startDate, endDate, lat1, lat2, lon1, lon2, depth1, depth2, fname, exportDataFlag, marker='-', msize=20, clr='purple'):
p = []
lw = 2
w = 800
h = 400
TOOLS = 'pan,wheel_zoom,zoom_in,zoom_out,box_zoom, undo,redo,reset,tap,save,box_select,poly_select,lasso_select'
for i in tqdm(range(len(tables)), desc='overall'):
dt = com.temporalRes(tables[i])
t, y, yErr = TS.timeSeries(tables[i], variables[i], startDate, endDate, lat1, lat2, lon1, lon2, depth1, depth2, fmt='%Y-%m-%d', dt=dt*24*60)
if len(y[~np.isnan(y)]) < 1:
com.printTQDM('%d: No matching entry found: Table: %s, Variable: %s ' % (i+1, tables[i], variables[i]), err=True )
continue
com.printTQDM('%d: %s retrieved (%s).' % (i+1, variables[i], tables[i]), err=False)
if exportDataFlag:
exportData(t, y, yErr, tables[i], variables[i], lat1, lat2, lon1, lon2, depth1, depth2)
output_notebook()
p1 = figure(tools=TOOLS, toolbar_location="above", plot_width=w, plot_height=h)
# p1.xaxis.axis_label = 'Time'
p1.yaxis.axis_label = variables[i] + ' [' + db.getVar(tables[i], variables[i]).iloc[0]['Unit'] + ']'
leg = variables[i]
fill_alpha = 0.3
cr = p1.circle(t, y, fill_color="grey", hover_fill_color="firebrick", fill_alpha=fill_alpha, hover_alpha=0.3, line_color=None, hover_line_color="white", legend=leg, size=msize)
p1.line(t, y, line_color=clr, line_width=lw, legend=leg)
p1.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
if not db.isClimatology(tables[i]):
p1.xaxis.formatter=DatetimeTickFormatter(
hours=["%d %B %Y"],
days=["%d %B %Y"],
months=["%d %B %Y"],
years=["%d %B %Y"],
)
p1.xaxis.major_label_orientation = pi/4
elif db.hasField(tables[i], 'month'):
p1.xaxis.axis_label = 'Month'
p.append(p1)
dirPath = 'embed/'
if not os.path.exists(dirPath):
os.makedirs(dirPath)
#if not inline: ## if jupyter is not the caller
# output_file(dirPath + fname + ".html", title="TimeSeries")
show(column(p))
return
# ### Xarray Implementation (yippee) ###
def plotTSX(tables, variables, startDate, endDate, lat1, lat2, lon1, lon2, depth1, depth2, fname, exportDataFlag, marker='-', msize=20, clr='purple'):
p = []
lw = 2
w = 800
h = 400
TOOLS = 'pan,wheel_zoom,zoom_in,zoom_out,box_zoom, undo,redo,reset,tap,save,box_select,poly_select,lasso_select'
for i in tqdm(range(len(tables)), desc='overall'):
dt = 1
unit = tables[i].variables[variables[i]].attrs['units']
toDateTime = tables[i].indexes['TIME'].to_datetimeindex()
tables[i]['TIME'] = toDateTime
y = tables[i].sel(TIME = slice(startDate, endDate), LAT_C = slice(lat1, lat2), LON_C = slice(lon1, lon2), DEP_C = slice(depth1, depth2))
t = y.variables['TIME'].values
y = y.variables[variables[i]][:,0,0,0].values.tolist()
if exportDataFlag:
exportData(t, y, yErr, tables[i], variables[i], lat1, lat2, lon1, lon2, depth1, depth2)
output_notebook()
p1 = figure(tools=TOOLS, toolbar_location="above", plot_width=w, plot_height=h)
p1.xaxis.axis_label = 'Time'
p1.yaxis.axis_label = variables[i] + str(unit)
leg = variables[i]
fill_alpha = 0.3
cr = p1.circle(t, y, fill_color="grey", hover_fill_color="firebrick", fill_alpha=fill_alpha, hover_alpha=0.3, line_color=None, hover_line_color="white", legend=leg, size=msize)
p1.line(t, y, line_color=clr, line_width=lw, legend=leg)
p1.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
p1.xaxis.formatter=DatetimeTickFormatter(
hours=["%d %B %Y"],
days=["%d %B %Y"],
months=["%d %B %Y"],
years=["%d %B %Y"],
)
p1.xaxis.major_label_orientation = pi/4
p.append(p1)
dirPath = 'embed/'
if not os.path.exists(dirPath):
os.makedirs(dirPath)
#if not inline: ## if jupyter is not the caller
# output_file(dirPath + fname + ".html", title="TimeSeries")
show(column(p))
return
# ### Testing Space ###
# +
#TESTS XARRAY IMPLEMENTATION
xFile = xr.open_dataset('http://192.168.3.11:80/thredds/dodsC/las/id-a1d60eba44/data_usr_local_tomcat_content_cbiomes_20190510_20_darwin_v0.2_cs510_darwin_v0.2_cs510_nutrients.nc.jnl')
tables = [xFile] # see catalog.csv for the complete list of tables and variable names
variables = ['O2'] # see catalog.csv for the complete list of tables and variable names
startDate = '2000-12-31'
endDate = '2001-12-31'
lat1, lat2 = 25, 30
lon1, lon2 = -160, -155
depth1, depth2 = 0, 10
fname = 'TS'
exportDataFlag = False # True if you you want to download data
plotTSX(tables, variables, startDate, endDate, lat1, lat2, lon1, lon2, depth1, depth2, fname, exportDataFlag)
| content/features/.ipynb_checkpoints/timeseries-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Chicago Crime Dataset
from sklearn.model_selection import train_test_split
import pandas as pd
import data_prep_utils as utils
import numpy as np
chicago_crime = pd.read_csv('data/chicago.csv')
chicago_crime.info()
chicago_crime.Date = pd.to_datetime(chicago_crime.Date)
chicago_crime = chicago_crime[chicago_crime['Date'].dt.year >= 2008]
chicago_crime.shape
chicago_crime['Primary Type'].unique()
# ### Theft crimes
chicago_theft_crimes = chicago_crime.copy()
chicago_theft = chicago_theft_crimes['Primary Type'].apply(lambda x: 1 if x == 'THEFT' else 0)
chicago_theft
chicago_theft_crimes['Primary Type'] = chicago_theft
chicago_theft_crimes = utils.coord_to_grid(chicago_theft_crimes, lat_col='Latitude', lon_col='Longitude')
chicago_theft_crimes = utils.feature_reduce(
chicago_theft_crimes,
features=['Date', 'binned_latitude', 'binned_longitude', 'Primary Type'],
sort_by='Primary Type')
chicago_theft_crimes.head()
chicago_theft_crimes = utils.convert_to_image_data(chicago_theft_crimes.head(1000), crime_col='Primary Type')
chicago_theft_crimes['batches']
chicago_theft_X_train, chicago_theft_X_test, chicago_theft_y_train, chicago_theft_y_test = train_test_split(
chicago_theft_crimes['inputs'],
chicago_theft_crimes['outputs'],
test_size=0.33,
random_state=42)
chicago_theft_X_train.ndim
# +
with open('data/binary-classification/chicago/chicago_theft_X_train.npy', 'wb') as f:
np.save(f, chicago_theft_X_train)
with open('data/binary-classification/chicago/chicago_theft_X_test.npy', 'wb') as f:
np.save(f, chicago_theft_X_test)
with open('data/binary-classification/chicago/chicago_theft_y_train.npy', 'wb') as f:
np.save(f, chicago_theft_y_train)
with open('data/binary-classification/chicago/chicago_theft_y_test.npy', 'wb') as f:
np.save(f, chicago_theft_y_test)
# -
# ### Battery crimes
chicago_battery_crimes = chicago_crime['Primary Type'].apply(lambda x: 1 if x == 'BATTERY' else 0)
# ### Criminal damage crimes
chicago_criminal_damage_crimes = chicago_crime['Primary Type'].apply(lambda x: 1 if x == 'CRIMINAL DAMAGE' else 0)
chicago_theft_crimes.info()
# ### Seattle Crime Dataset
seattle_crime = pd.read_csv('seattle.csv')
seattle_crime.info()
seattle_crime.Date = pd.to_datetime(seattle_crime['Report DateTime'])
seattle_crime_report_dates = seattle_crime['Report DateTime'].tolist()
seattle_crime_report_dates.sort(reverse=True)
seattle_crime_report_range = [seattle_crime_report_dates[0], seattle_crime_report_dates[-1]]
seattle_crime_report_range
len(seattle_crime_report_dates)
# +
# !echo *.csv >> .gitignore
# -
# !cat .gitignore
# !git status
# !git status
# !git config --global user.name "Lloyd"
# !git config --global user.email <EMAIL>
| Binary Classification Data Preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
x=r"hello i am \n bibek"
print(x)#with the help of r will print as it is.
x="C:\\Users\\msi\\Desktop\\SEM 5\\CG"
print(x)
x=r"C:\\Users\\msi\\Desktop\\SEM 5\\CG"
print(x)
import re
x=["bibek","nanda","shubham","shyrans","sovit","suman","santosh","tauhid","peace","kelvin"]
x=list(filter(lambda y: re.match(r"s",y),x))#return whose name start with s:
print(x)
import re
x=["bibek","nanda","shubham","shyrans","sovit","suman","santosh","tauhid","peace","kelvin"]
x=list(filter(lambda y:re.match(r".*s$",y),x))#return the value that is end with n.
print(x)
import re
x=["bibek","nanda","shubham","shyrans","sovit","suman","santosh","tauhid","peace","kelvin"]
x=list(filter(lambda y:re.match(r".*h.*",y),x))#it will return all the value that contain h.
print(x)
import re
x=["bibek","nanda","shubham","shyrans","sovit","suman","santosh","tauhid","peace","kelvin"]
x=list(filter(lambda y:re.match(r"(h|d|s)",y),x))#return value if any name contain h,d,s.
print(x)
x="hello i am bibek"
y=x.split(" ")
search=input("enter what you want to search?")
for i in y:
if search==i:
print(y.index(search),i)
#regular expression to extract the email from text.
import re
for i in range(2):
x=input("enter email id ?")
regex="[\w.]+@[\w.]+\.[a-zA-Z]{2,6}"
if (re.search(regex,x)):
print("valid email:")
else:
print("invalid email")
import re
x="hello 5 bibek i ma alexa 678 i love python."
y=re.findall("\d+",x)
print(y)
import re
x="hello 2 i am carlos78"
x=re.findall("\d",x)
print(x)
# +
#match() function--->if word is present at begining of a string
#search function----->if word is present any place in string
#findall function()--->if word is present in string and return all the frequency of that word.
# -
import re
x=r"hello python lover i am your future."
#it print a word that is begining at a string.
y=re.match("hello",x)
print(y)
print(y.group())#printing a exact word.
import re
x="hello gyes i am from nepal i love nepal"
x=re.search("nepal",x)#it print a word that is in string.
print(x.group())
import re
x="hello gyes i am from nepal i love nepal"
x=re.findall("(nepal|from|love)",x)#it will print all the value that we want to search
print(x)
#use of IGNORECASE
import re
x="hello gyes i am from Nepal i love nepal,NEPAL12"
x=re.findall("(nepal|from|love)",x,re.IGNORECASE)#it will print all the value and use upper and lowercase as well.
print(x)
#return value if that is present at the begining of the string.
import re
x=r"hello gyes i am from Nepal i love nepal,NEPAL12"
y=r"Hey gyes i am from Nepal i love nepal,NEPAL12"
x=re.findall("\Ahello",x)
y=re.findall("\AHey",y)
print(x)
print(y)
#return if value is present in lastan dfirst position of the string value.
import re
x=r"hello gyes i am from Nepal i love nepal,NEPAL12"
#x=re.findall(r"\bNe",x)
x=re.findall(r"al\b",x)
print(x)
#\B returns a match where the specified pattern is present, but NOT at the beginning (or at the end) of a word.
import re
x="hello gyes i am from Nepal i love nepal,NEPAL12"
#x=re.findall(r"\Bl",x)
x=re.findall(r"\Bl+",x)
print(x)
#\d returns a match where the string contains digits (numbers from 0-9)
import re
x="2 million monthly visits since Jan'19."
x=re.findall("\d",x)
#x=re.findall("\d+",x)
if (x):
print("yes:")
else:
print("no")
print(x)
#\w helps in extraction of alphanumeric characters only i.e(a-zA-z0-9)
import re
x="2 million monthly visits since Jan'19."
x=re.findall("\w+",x)#it won't print any spaces and other special character.
print(x)
#(.) matches any character (except newline character)
import re
x="2 million monthly visits since Jan'19."
x1=re.findall("m.",x)
x2=re.findall("m...",x)
print(x1)
print(x2)
#(^) starts with
import re
x="2 million monthly visits since Jan'19. nepal"
x1=re.findall("^2",x)#only print if string is start with it.
print(x1)
#($) ends with
import re
x="2 million monthly visits since Jan'19. nepal"
x1=re.findall("nepal$",x)
print(x1)
#(*) matches for zero or more occurences of the pattern to the left of it
import re
x="ssa,ssssaaa,ssaa,sssaa,sssa"
#print 'ss' followed by 0 or more an ending with 'a' character
#x=re.findall("ssa+a",x)
x=re.findall("ssa*a",x)
print(x)
#(?) matches zero or one occurrence of the pattern left to it.
import re
x="easy easssy eay ey"
x=re.findall("eas?y",x)
print(x)
#A set is a bunch of characters inside a pair of square brackets [ ] with a special meaning.
import re
x="Nepal has a diverse geography, including fertile plains, subalpine forested hills,"
x1=re.findall("[abc]+",x)
x2=re.findall("[a-z]+",x)
#x2=re.findall("[a-z]",x)
print(x1)
print(x2)
#[^] Check whether string has other characters mentioned after ^
x="Nepal has a diverse geography, including fertile plains, subalpine forested hills,"
x1=re.findall("[^(a-z), ]",x)
print(x1)
import re
x="hello my email is <EMAIL>"
x=re.findall("[^a-z0-9 ]\w+",x)
print(x)
#real life use or complex example.
import re
x="Send a mail to <EMAIL>, <EMAIL> and <EMAIL> about the <EMAIL>@<EMAIL>.in"
x=re.findall("[a-zA-Z._0-9]+@\w+\.com",x)
print(x)
#extracting a date from a text.
import re
x="i have two date 2012-06-23 and 2020/11/23"
x=re.findall("\d{4}.\d{2}.\d{2}",x)#. is use for printing a hyphan and slash
print(x)
#extracting a date from a text
import re
x="London Olympic 2012 was held from 27 July 2012 to 12 August 2012."
x=re.findall("\d{2}.\w{2,8}.\d{4}",x)
print(x)
| regular expression.ipynb |