code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Plot multinomial and One-vs-Rest Logistic Regression
#
#
# Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
# The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
# are represented by the dashed lines.
#
#
# +
print(__doc__)
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| scikit-learn-official-examples/linear_model/plot_logistic_multinomial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # Identity operators
# is True if the operands are identical (refer to the same object)
# is not True if the operands are not identical (do not refer to the same object)
# -
names = ["ABC", "XYZ", "PQR"]
num = 10
string = "ABC"
names is string
names_2 = names
names is names_2
names is not "String"
names is not num
| 04. Python Operators and Operands/4. Identity Operators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 공격수 데이터 분석 - Follower 제거 전
# + [markdown] slideshow={"slide_type": "skip"}
# # 작업 데이터 받기
# + slideshow={"slide_type": "skip"}
df_pos = pd.read_csv(r'C:\Users\Gk\Documents\dev\data\LinearRegression_Football_data\df_pos.csv', encoding='utf-8-sig', index_col=0)
# -
# # Position Rounding
df_pos.position = df_pos.position.round()
df_pos.position.unique()
df_atk = df_pos[df_pos.position == 4].append(df_pos[df_pos.position == 2])
df_atk.reset_index(drop=True)
# # 상관관계 확인
# df_atk.corr()[df_atk.corr() > 0.7].to_csv('df_atk_corr.csv', encoding='utf-8-sig')
df_atk.corr()[df_atk.corr() > 0.6]
pd.read_csv("df_atk_corrc.csv", encoding='utf-8', index_col=0)
# # 적합한 n_component 확인
df_for_pca = df_atk[['position', 'shots_total', 'shots_on', 'goals_total', 'goals_conceded', 'goals_assists', 'passes_key', \
'tackles_total', 'tackles_blocks', 'tackles_interceptions', 'duels_total', 'duels_won', 'dribbles_attempts', \
'dribbles_success', 'penalty_saved', 'games_appearences', 'substitutes_in', 'substitutes_bench']]
len(df_for_pca.columns)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_rescaled = scaler.fit_transform(df_for_pca)
# +
from sklearn.decomposition import PCA
pca = PCA().fit(data_rescaled)
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12,6)
fig, ax = plt.subplots()
xi = np.arange(1, 19, step=1)
y = np.cumsum(pca.explained_variance_ratio_)
plt.ylim(0.0,1.1)
plt.plot(xi, y, marker='o', linestyle='--', color='b')
plt.xlabel('Number of Components')
plt.xticks(np.arange(0, 19, step=1)) #change from 0-based array index to 1-based human-readable label
plt.ylabel('Cumulative variance (%)')
plt.title('The number of components needed to explain variance')
plt.axhline(y=0.95, color='r', linestyle='-')
plt.text(0.5, 0.85, '95% cut-off threshold', color = 'red', fontsize=16)
ax.grid(axis='x')
plt.show()
# -
# # 위 결과를 토대로 1차 PCA - 전체 데이터에서 8개 주성분 추출
data = PCA(n_components=8).fit_transform(df_for_pca)
data
df_pca_1 = pd.DataFrame(data, columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
df_pca_1
df_pca_1.corr()[df_pca_1.corr()>0.7]
pca_cols = list(df_for_pca.columns)
npca_cols = df_atk.columns.tolist()
npca_features = [item for item in npca_cols if item not in pca_cols]
npca_features
len(npca_features)
df_ols = pd.concat([df_atk[npca_features].reset_index(drop=True), df_pca_1.reset_index(drop=True)], axis=1)
df_ols = df_ols.drop('player_name', axis=1)
df_ols
"value ~ scale(age) + \
scale(height) + \
scale(weight) + \
scale(rating) + \
scale(follower) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(cards_yellowred) + \
scale(cards_red) + \
scale(penalty_won) + \
scale(penalty_commited) + \
scale(penalty_success) + \
scale(penalty_missed) + \
scale(games_lineups) + \
scale(substitutes_out) + \
scale(games_played) + \
scale(a) + \
scale(b) + \
scale(c) + \
scale(d) + \
scale(e) + \
scale(f) + \
scale(g) + \
scale(h)"
# # 1차 PCA OLS - result.pvalues 기준 feature 제거
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_accuracy) + \
scale(penalty_won) + \
scale(games_played) + \
scale(b) + \
scale(e)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared))
print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
print("------------------------------------------------------------------")
print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
# -
result.pvalues.sort_values(ascending=False)
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_accuracy) + \
scale(penalty_won) + \
scale(games_played) + \
scale(b) + \
scale(follower)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared))
print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
print("------------------------------------------------------------------")
print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
# -
result.pvalues.sort_values(ascending=False)
# # result에 아래 검증 중 예측된 모델의 P값 출력으로 인한 오류 - 1차 OLS는 오류
# # 2차 PCA OLS - summary안의 p값 기준 feature 제거
# #####################################################################################################################
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
# df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(penalty_won) + \
scale(games_played) + \
scale(b) + \
scale(e)"
model = sm.OLS.from_formula(formula, data=df_ols)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result_p = model.fit()
pred = result_p.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result_p.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result_p.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
result.pvalues.sort_values(ascending=False)
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
# df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(penalty_won) + \
scale(games_played) + \
scale(b) + \
scale(e) + \
scale(follower)"
model = sm.OLS.from_formula(formula, data=df_ols)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result_p = model.fit()
pred = result_p.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result_p.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result_p.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
# #####################################################################################################################
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(height) + \
scale(passes_accuracy) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(a) + \
scale(b) + \
scale(e)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
# print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_accuracy) + \
scale(penalty_won) + \
scale(games_played) + \
scale(b) + \
scale(follower)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result_p = model.fit()
pred = result_p.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result_p.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
# print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result_p.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result_p.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
result.pvalues.sort_values(ascending=False)
# +
model_full = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(height) + \
scale(passes_accuracy) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(a) + \
scale(b) + \
scale(e) + \
scale(follower)", data=df_ols)
model_reduced = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(height) + \
scale(passes_accuracy) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(a) + \
scale(b) + \
scale(e)", data=df_ols)
sm.stats.anova_lm(model_reduced.fit(), model_full.fit())
# +
model_full = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(height) + \
scale(passes_accuracy) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(a) + \
scale(b) + \
scale(e) + \
scale(follower)", data=df_ols)
result = model_full.fit()
sm.stats.anova_lm(result, typ=2)
# -
# # 3차 PCA - corr 기준 domain base 주성분 추출
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
df_for_pca
# # 높은 상관관계를 보이는 feature들
# 1. position, goals_total
# 2. shots_total, shots_on, goals_total
# 3. goals_conceded, penalty_saved
# 4. goals_assists, passes_key
# 5. tackles_total, tackles_blocks, tackles_interceptions
# 6. duels_total, duels_won
# 7. dribbles_attempts, dribbles_success
# 8. games_appearences, substitutes_in, substitutes_bench
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_rescaled = pd.DataFrame(scaler.fit_transform(df_for_pca))
# # 1. position, goals_total
df_to_pca = df_for_pca[['position', 'goals_total']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_pg = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['position_goalsTotal'])
df_pg
# 1_2. shots_total, shots_on, goals_total
df_to_pca = df_for_pca[['position', 'shots_total', 'shots_on', 'goals_total']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_psg = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['position_shotsOnTotal_goalsTotal'])
df_psg
# # 2. shots_total, shots_on, goals_total
df_to_pca = df_for_pca[['shots_total', 'shots_on', 'goals_total']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_sg = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['shotsOnTotal_goalsTotal'])
df_sg
# 3. goals_conceded, penalty_saved
df_to_pca = df_for_pca[['goals_conceded', 'penalty_saved']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_gpe = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['goalsConceded_penaltySaved'])
df_gpe
# 4. goals_assists, passes_key
df_to_pca = df_for_pca[['goals_assists', 'passes_key']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_gpa = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['goalsAssists_passesKey'])
df_gpa
# 5. tackles_total, tackles_blocks, tackles_interceptions
df_to_pca = df_for_pca[['tackles_total', 'tackles_blocks', 'tackles_interceptions']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_t = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['tackles'])
df_t
# 6. duels_total, duels_won
df_to_pca = df_for_pca[['duels_total', 'duels_won']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_du = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['duels'])
df_du
# 7. dribbles_attempts, dribbles_success
df_to_pca = df_for_pca[['dribbles_attempts', 'dribbles_success']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_dr = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['dribbles'])
df_dr
# 8. games_appearences, substitutes_in, substitutes_bench
df_to_pca = df_for_pca[['games_appearences', 'substitutes_in', 'substitutes_bench']]
data_rescaled = MinMaxScaler().fit_transform(df_to_pca)
df_gs = pd.DataFrame(data = PCA(n_components=1).fit_transform(data_rescaled), columns=['gamesAppearences_substitutes'])
df_gs
df_pca_2 = pd.concat([df_pg, df_sg, df_gpe, df_gpa, df_t, df_du, df_dr, df_gs], axis=1)
df_pca_2
df_pca_2.corr()[df_pca_2.corr() > 0.7]
# # PCA를 통해 주성분 추출을 했으나, goals_total의 중복 추출로 높은 상관관계 확인
# # 일단 OLS 진행 후, 추후에 다시 한번 둘을 한 component로 만들어 OLS 재진행
df_pca_3 = pd.concat([df_psg, df_gpe, df_gpa, df_t, df_du, df_dr, df_gs], axis=1)
df_pca_3
df_pca_3.corr()[df_pca_3.corr() > 0.7]
pca_cols = list(df_for_pca.columns)
npca_cols = df_pos.columns.tolist()
npca_features = [item for item in npca_cols if item not in pca_cols]
len(pca_cols), len(npca_cols), len(npca_features)
df_ols = pd.concat([df_atk[npca_features].reset_index(drop=True), df_pca_3.reset_index(drop=True)], axis=1)
df_ols = df_ols.drop('player_name', axis=1)
df_ols
df_ols.columns
"value ~ scale(age) + \
scale(height) + \
scale(weight) + \
scale(rating) + \
scale(follower) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(cards_yellowred) + \
scale(cards_red) + \
scale(penalty_won) + \
scale(penalty_commited) + \
scale(penalty_success) + \
scale(penalty_missed) + \
scale(games_lineups) + \
scale(substitutes_out) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsConceded_penaltySaved) + \
scale(goalsAssists_passesKey) + \
scale(tackles) + \
scale(duels) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes)"
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
# df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsAssists_passesKey) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes)"
model = sm.OLS.from_formula(formula, data=df_ols)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result_p = model.fit()
pred = result_p.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result_p.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result_p.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result_p.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
model5 = [[-0.05065965, 0.56461972, -0.21390691, -0.38217239, 0.39463493, 0.40347619, 0.54433083, -0.04739948, 0.24228798, 0.02932993],
[0.00849597, 0.63221316, 0.01726316, -0.16063964, 0.52578786, 0.57285031, -0.39533457, 0.01245289, 0.26820445, 0.14063162]]
result.pvalues.sort_values(ascending=False)
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsAssists_passesKey) + \
scale(gamesAppearences_substitutes) + \
scale(follower)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result_p = model.fit()
pred = result_p.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result_p.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result_p.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result_p.mse_total, mse))
# print("------------------------------------------------------------------")
# print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
print("모델 성능 : {}".format(scores_rm[0].mean()))
# -
result.pvalues.sort_values(ascending=False)
# +
model_full = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsAssists_passesKey) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes) + \
scale(follower)", data=df_ols)
model_reduced = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsAssists_passesKey) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes)", data=df_ols)
sm.stats.anova_lm(model_reduced.fit(), model_full.fit())
# +
model_full = sm.OLS.from_formula(
"value ~ scale(age) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(fouls_committed) + \
scale(cards_yellow) + \
scale(penalty_won) + \
scale(games_played) + \
scale(position_shotsOnTotal_goalsTotal) + \
scale(goalsAssists_passesKey) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes) + \
scale(follower)", data=df_ols)
result = model_full.fit()
sm.stats.anova_lm(result, typ=2)
# -
# # 위에 PCA 1차와 같이 예측된 p값으로 OLS 진행
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(penalty_won) + \
scale(games_played) + \
scale(shotsOnTotal_goalsTotal) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
# print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
# print("------------------------------------------------------------------")
print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
# -
result.pvalues.sort_values(ascending=False)
# +
from sklearn.model_selection import train_test_split
dfX = df_ols.drop(['value'], axis=1)
dfy = df_ols['value']
df = pd.concat([dfX, dfy], axis=1)
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
formula = "value ~ scale(age) + \
scale(passes_total) + \
scale(passes_accuracy) + \
scale(fouls_drawn) + \
scale(penalty_won) + \
scale(games_played) + \
scale(shotsOnTotal_goalsTotal) + \
scale(dribbles) + \
scale(gamesAppearences_substitutes) + \
scale(follower)"
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
print(result.summary())
##############################################################################
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
z = 10
scores_rm = np.zeros([2, z])
cv = KFold(z, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df_ols)):
df_train = df_ols.iloc[idx_train]
df_test = df_ols.iloc[idx_test]
model = sm.OLS.from_formula(formula, data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.value, pred)
mse = mean_squared_error(df_test.value, pred)
# pred = result.predict(df_test)
# rss = ((df_test.value - pred) ** 2).sum()
# tss = ((df_test.value - df_test.value.mean())** 2).sum()
# rsquared = 1 - rss / tss
scores_rm[0, i] = rsquared
scores_rm[1, i] = mse
# print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared))
# print("학습 mse = {:.8f}, 검증 R2 = {:.8f}".format(result.mse_total, mse))
# print("------------------------------------------------------------------")
print("모델 성능 : {}, 모델 mse : {}".format(scores_rm[0].mean(), scores_rm[1].mean()))
# -
# # P-value를 predicted result에서 가지고 와 모델 feature 정리를 2번 진행
# # 각각 상향과 하향 1회씩
| Analysis/6.7_DataAnalysis_AtkMdf_PCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Mask R-CNN
# language: python
# name: mask_r_cnn
# ---
# # Mask R-CNN - Inspect Training Data
#
# Inspect and visualize data loading and pre-processing code.
# +
import os
import sys
import itertools
import math
import logging
import json
import re
import random
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
# Root directory of the project
ROOT_DIR = os.path.abspath("./")
# Import Mask RCNN
sys.path.append("Mask_RCNN") # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
# %matplotlib inline
# -
# ## Configurations
#
# Run one of the code blocks below to import and load the configurations to use.
# +
# Run one of the code blocks
# Shapes toy dataset
# import shapes
# config = shapes.ShapesConfig()
# MS COCO Dataset
import utility_poles as up
config = up.UtilityPoleConfig()
UP_DIR = os.path.join(ROOT_DIR, 'data_dataset')
# -
# ## Dataset
# +
# Load dataset
if config.NAME == "poles":
dataset = up.UtilityPoleDataset()
dataset.load_pole(UP_DIR, "train")
# Must call before using the dataset
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
print("{:3}. {:50}".format(i, info['name']))
# -
# ## Display Samples
#
# Load and display images and masks.
# Load and display random samples
image_ids = np.random.choice(dataset.image_ids, 4)
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names, limit=2)
# ## Bounding Boxes
#
# Rather than using bounding box coordinates provided by the source datasets, we compute the bounding boxes from masks instead. This allows us to handle bounding boxes consistently regardless of the source dataset, and it also makes it easier to resize, rotate, or crop images because we simply generate the bounding boxes from the updates masks rather than computing bounding box transformation for each type of image transformation.
# +
# Load random image and mask.
image_id = random.choice(dataset.image_ids)
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)
# Display image and additional stats
print("image_id ", image_id, dataset.image_reference(image_id))
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)
# -
| inspect_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/saurabhsingh1411/100-Days-Of-ML-Code/blob/master/NLP/Poem_generation_with_Bi_directional_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="F2ZkWveRfyU_"
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
import tensorflow.keras.utils as ku
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="8M5MK1uGf3tL" outputId="96bcee1c-e68f-4a69-c2bf-6f5a6609ee34"
# !wget --no-check-certificate \
# https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sonnets.txt \
# -O /tmp/sonnets.txt
# + id="SoXJcXWof4IO"
data=open('/tmp/sonnets.txt').read()
# + colab={"base_uri": "https://localhost:8080/", "height": 171} id="TjB0qH5df4NN" outputId="83224667-3468-46a9-d347-3db9438826ed"
data
# + colab={"base_uri": "https://localhost:8080/"} id="uADjL1D6f4Pz" outputId="035c1e6e-6b3b-4db9-a68c-f7b4e7df13ad"
corpus=data.lower().split('\n')
corpus
# + id="tV97wPcAf4Sn"
tokenizer=Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words=len(tokenizer.word_index)+1
# + colab={"base_uri": "https://localhost:8080/"} id="3MOMDvfWf4Vg" outputId="e3d01dfe-2525-46a2-9e18-4c72859c8b08"
total_words
# + id="1m6O2_lCin7d"
# create input sequences using list of tokens
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# + id="HHGW4ik1itPN"
# pad sequences
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# + id="PA5wOz7mitUz"
# create predictors and label
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]
label = ku.to_categorical(label, num_classes=total_words)
# + colab={"base_uri": "https://localhost:8080/"} id="lWvTQklRitZs" outputId="5b02e58e-a150-42f1-cfc3-c77d612da9ad"
predictors
# + colab={"base_uri": "https://localhost:8080/"} id="10EuwYOKitdJ" outputId="ee4b6588-94ec-4fe4-86b6-d5bbf124a1cb"
model = Sequential()
model.add(Embedding(total_words, 128, input_length=max_sequence_len - 1))
model.add(Bidirectional(LSTM(120, return_sequences=True)))
model.add(Dropout(0.2))
model.add(LSTM(96))
model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(total_words, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
print(model.summary())
# + colab={"base_uri": "https://localhost:8080/"} id="iTmudV2BjFIr" outputId="21ae256f-c21f-4652-862e-ab2eb06dc544"
history = model.fit(predictors, label, epochs=100, verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="ntS7A4JZjFL9" outputId="02ceff3a-cc47-43fc-af57-fcb3e490e0e2"
import matplotlib.pyplot as plt
acc = history.history['accuracy']
loss = history.history['loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.title('Training accuracy')
plt.plot(epochs, loss, 'b', label='Training Loss')
plt.title('Training loss')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="kuNrj-C0jFSh" outputId="c99df36a-5239-421a-e933-16a00f5db452"
seed_text = "The true purpose of deep learning is making memes because"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
| NLP/Poem_generation_with_Bi_directional_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Project: Create a neural network class
#
# ---
#
# Based on previous code examples, develop a neural network class that is able to classify any dataset provided. The class should create objects based on the desired network architecture:
#
# 1. Number of inputs
# 2. Number of hidden layers
# 3. Number of neurons per layer
# 4. Number of outputs
# 5. Learning rate
#
# The class must have the train, and predict functions.
#
# Test the neural network class on the datasets provided below: Use the input data to train the network, and then pass new inputs to predict on. Print the expected label and the predicted label for the input you used. Print the accuracy of the training after predicting on different inputs.
#
# Use matplotlib to plot the error that the train method generates.
#
# **Don't forget to install Keras and tensorflow in your environment!**
#
# ---
# ### Import the needed Packages
# +
import numpy as np
import matplotlib.pyplot as plt
# Needed for the mnist data
from keras.datasets import mnist
from keras.utils import to_categorical
# -
# ### Define the class
class NeuralNetwork:
def __init__(self, architecture, alpha):
'''
layers: List of integers which represents the architecture of the network.
alpha: Learning rate.
'''
# TODO: Initialize the list of weights matrices, then store
# the network architecture and learning rate
self.alpha = alpha
self.layers = architecture
self.weights = []
self.bs = []
for i in range(len(architecture['nodes'])):
self.bs.append(np.random.randn(architecture['nodes'][i]))
if i == 0:
self.weights.append(np.random.randn(architecture['inputs'], architecture['nodes'][i]))
else:
self.weights.append(np.random.randn(architecture['nodes'][i-1], architecture['nodes'][i]))
self.weights.append(np.random.randn(architecture['nodes'][len(architecture['nodes'])-1], architecture['outputs']))
self.bs.append(np.random.randn(architecture['outputs']))
self.outputs = architecture['outputs']
def __repr__(self):
# construct and return a string that represents the network
# architecture
return "NeuralNetwork: {}".format( "-".join(str(l) for l in self.layers))
def softmax(self, X):
# applies the softmax function to a set of values
expX = np.exp(X)
return expX / expX.sum(axis=1, keepdims=True)
def sigmoid(self, x):
# the sigmoid for a given input value
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_deriv(self, x):
# the derivative of the sigmoid
return x * (1 - x)
def predict(self, inputs):
# TODO: Define the predict function
self.digits = [inputs]
for i in range(len(self.weights)):
if i == len(self.weights) - 1 and self.outputs > 1:
self.digits.append(self.softmax(np.dot(self.digits[i], self.weights[i]) + self.bs[i]))
else:
self.digits.append(self.sigmoid(np.dot(self.digits[i], self.weights[i]) + self.bs[i]))
return self.digits[len(self.digits)-1]
def train(self, inputs, labels, epochs = 1000, displayUpdate = 100):
# TODO: Define the training step for the network. It should include the forward and back propagation
# steps, the updating of the weights, and it should print the error every 'displayUpdate' epochs
# It must return the errors so that they can be displayed with matplotlib
fig, ax = plt.subplots(1,1)
ax.set_xlabel('Epoch')
ax.set_ylabel('Error')
errors = []
for i in range(epochs):
prediction = self.predict(inputs)
error = labels - prediction
errors.append(np.mean(np.abs(error)))
if i%displayUpdate == 0:
print("Error:", np.mean(np.abs(error)))
deltas = []
j = len(self.digits) - 1
while j > 0:
if j != len(self.digits) - 1:
error = np.dot(delta, self.weights[j].T)
delta = error * self.sigmoid_deriv(self.digits[j])
deltas.append(delta)
j-=1
deltas = deltas[::-1]
bs_deltas = []
for d in deltas:
bs_deltas.append(np.sum(d))
for j in range(len(deltas)):
self.weights[j] += np.dot(self.digits[j].T, deltas[j]) * self.alpha
for j in range(len(bs_deltas)):
self.bs[j] += bs_deltas[j] * self.alpha
ax.plot(errors)
# ### Test datasets
# #### XOR
# +
# input dataset
XOR_inputs = np.array([
[0,0],
[0,1],
[1,0],
[1,1]
])
# labels dataset
XOR_labels = np.array([[0,1,1,0]]).T
# +
#TODO: Test the class with the XOR data
arch = {
'inputs': XOR_inputs.shape[1],
'nodes': [4],
'outputs': 1
}
nn = NeuralNetwork(arch, 1)
nn.train(XOR_inputs, XOR_labels, 5000, 100)
# -
# #### Multiple classes
# +
# Creates the data points for each class
class_1 = np.random.randn(700, 2) + np.array([0, -3])
class_2 = np.random.randn(700, 2) + np.array([3, 3])
class_3 = np.random.randn(700, 2) + np.array([-3, 3])
feature_set = np.vstack([class_1, class_2, class_3])
labels = np.array([0]*700 + [1]*700 + [2]*700)
one_hot_labels = np.zeros((2100, 3))
for i in range(2100):
one_hot_labels[i, labels[i]] = 1
plt.figure(figsize=(10,10))
plt.scatter(feature_set[:,0], feature_set[:,1], c=labels, s=30, alpha=0.5)
plt.show()
# +
#TODO: Test the class with the multiple classes data
arch = {
'inputs': feature_set.shape[1],
'nodes': [3],
'outputs': 3
}
nn = NeuralNetwork(arch, 0.01)
nn.train(feature_set, one_hot_labels, 5000, 100)
# -
# #### On the mnist data set
#
# ---
# Train the network to classify hand drawn digits.
#
# For this data set, if the training step is taking too long, you can try to adjust the architecture of the network to have fewer layers, or you could try to train it with fewer input. The data has already been loaded and preprocesed so that it can be used with the network.
#
# ---
# +
# Load the train and test data from the mnist data set
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Plot a sample data point
plt.title("Label: " + str(train_labels[0]))
plt.imshow(train_images[0], cmap="gray")
# +
# Standardize the data
# Flatten the images
train_images = train_images.reshape((60000, 28 * 28))
# turn values from 0-255 to 0-1
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
# Create one hot encoding for the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# +
# TODO: Test the class with the mnist data. Test the training of the network with the test_images data, and
# record the accuracy of the classification.
arch = {
'inputs': train_images.shape[1],
'nodes': [64, 32],
'outputs': 10
}
nn = NeuralNetwork(arch, 0.001)
nn.train(train_images[0:1000], train_labels[0:1000], 1000, 100)
# +
f, plots = plt.subplots((12+3-1)//3, 3, figsize=(15,15))
plots = [plot for sublist in plots for plot in sublist]
res = nn.predict(train_images[0:1000])
res[res > 0.5] = 1
res[res < 0.5] = 0
for image, im_data, plot, r in zip(test_images[0:12], train_images[0:12], plots, res):
plot.set_title(r)
plot.imshow(image.reshape((28,28)), cmap="gray")
# -
# After predicting on the *test_images*, use matplotlib to display some of the images that were not correctly classified. Then, answer the following questions:
#
# 1. **Why do you think those were incorrectly classified?**
# Not enough training or different method is needed
#
# 2. **What could you try doing to improve the classification accuracy?**
# Excecute more iterations
#
| Neural Network Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/smlra-kjsce/PyTorch-101/blob/main/Sine%20Function%20Approximation%20in%20PyTorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dR0RKxShTEiT"
# ## Import Libraries
# + id="pNssNOferi7B"
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# + [markdown] id="MAxg-qX-TIEO"
# ## Define some hyperparameters
# + id="Dw_J3We1XOl5"
LR = 1e-6
MAX_EPOCH = 10
BATCH_SIZE = 512
# + [markdown] id="1Hrjs1vgTKf2"
# ## Defining our model to approximate sine curve
# + id="uOf7aGyEXSc5"
class SineApproximator(nn.Module):
def __init__(self):
super(SineApproximator, self).__init__()
self.regressor = nn.Sequential(nn.Linear(1, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 1))
def forward(self, x):
output = self.regressor(x)
return output
## Since I have inherited nn.Module, backward() function is already implemented and I don't need to define it again.
# + id="KXgTdqUDXWu6"
X = np.random.rand(10**6) * 2 * np.pi
y = np.sin(X)
X_train, X_val, y_train, y_val = map(torch.tensor, train_test_split(X, y, test_size=0.2))
train_dataloader = DataLoader(TensorDataset(X_train.unsqueeze(1), y_train.unsqueeze(1)), batch_size=BATCH_SIZE,
pin_memory=True, shuffle=True)
val_dataloader = DataLoader(TensorDataset(X_val.unsqueeze(1), y_val.unsqueeze(1)), batch_size=BATCH_SIZE,
pin_memory=True, shuffle=True)
model = SineApproximator().to(device)
optimizer = optim.Adam(model.parameters(), lr=LR)
criterion = nn.MSELoss(reduction="mean")
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="_BjPn291YbNL" outputId="2c65a674-4a37-4408-9b51-e7f12fe66c66"
plt.scatter(X,y)
plt.title('Total Dataset for Sine Approximation');
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="M9Yil3aeYpdK" outputId="749eda5d-479b-42ef-9791-9fdba314500f"
plt.scatter(X_train.cpu(),y_train.cpu())
plt.title('train Dataset for Sine Approximation');
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="dV15m7-jYrEh" outputId="4d974912-ff68-4dd0-cb9e-cdab6967be62"
plt.scatter(X_val.cpu(),y_val.cpu())
plt.title('val Dataset for Sine Approximation');
# + [markdown] id="Pbhp7wxOTQH8"
# ## Training our model
# + colab={"base_uri": "https://localhost:8080/"} id="vAwzbZ3-Xkid" outputId="8c322c26-8a54-4fcd-e3e3-2c6b7eb08449"
train_loss_list = list()
val_loss_list = list()
for epoch in range(MAX_EPOCH):
print("epoch %d / %d" % (epoch+1, MAX_EPOCH))
model.train()
# training loop
temp_loss_list = list()
for X_train, y_train in train_dataloader:
X_train = X_train.type(torch.float32).to(device)
y_train = y_train.type(torch.float32).to(device)
optimizer.zero_grad()
score = model(X_train)
loss = criterion(input=score, target=y_train)
loss.backward()
optimizer.step()
temp_loss_list.append(loss.detach().cpu().numpy())
temp_loss_list = list()
for X_train, y_train in train_dataloader:
X_train = X_train.type(torch.float32).to(device)
y_train = y_train.type(torch.float32).to(device)
score = model(X_train)
loss = criterion(input=score, target=y_train)
temp_loss_list.append(loss.detach().cpu().numpy())
train_loss_list.append(np.average(temp_loss_list))
# validation
model.eval()
temp_loss_list = list()
for X_val, y_val in val_dataloader:
X_val = X_val.type(torch.float32).to(device)
y_val = y_val.type(torch.float32).to(device)
score = model(X_val)
loss = criterion(input=score, target=y_val)
temp_loss_list.append(loss.detach().cpu().numpy())
val_loss_list.append(np.average(temp_loss_list))
print("\ttrain loss: %.5f" % train_loss_list[-1])
print("\tval loss: %.5f" % val_loss_list[-1])
# + [markdown] id="Y60Ym2BsTWhr"
# ## Plotting Loss
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="G--C0PBRsNMx" outputId="f7c773e9-9f2d-4362-9e18-97c6227e8042"
train_plot = plt.plot(train_loss_list, label="Training Loss")
val_plot = plt.plot(val_loss_list, label="Validation Loss")
plt.legend(loc="upper right");
# + [markdown] id="2l227HiFTZWF"
# ## Time for prediction!
# + id="VP5giYZ1tkWg"
model.eval()
prediction = model(X_val)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="81ZuTiguaWTN" outputId="aceb8bef-a300-4527-d84c-b117082294b8"
predicted = plt.scatter(X_val.cpu(), prediction.detach().cpu())
original = plt.scatter(X_train.cpu(), y_train.cpu())
plt.legend((predicted, original), ("Predicted", "Original"));
# + [markdown] id="Qb1A0VT_8af8"
# ### So, here we can see that our neural network was able to approximate sine function very well.
# + [markdown] id="Bj0RhPCZTfc5"
# ## State_dict of model and optimizer
# + colab={"base_uri": "https://localhost:8080/"} id="oQfpMzNHL2F3" outputId="b34d98df-371e-469d-8152-a165ad6bbe98"
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name])
# + [markdown] id="70xKgSTyTlgh"
# ## Saving model using save() and state_dict()
# + id="L_zde1DiNSue"
PATH = '/content/sine.pt'
torch.save(model.state_dict(), PATH)
# + [markdown] id="3FYvrYEYTppk"
# ## Loading model using load() and load_state_dict()
# + colab={"base_uri": "https://localhost:8080/"} id="bwVb4kb_N7Q2" outputId="a78f572c-a62a-429e-8238-fb0fd900a8e9"
device = torch.device("cuda")
model = SineApproximator()
model.load_state_dict(torch.load(PATH))
model.to(device)
model.eval()
# + [markdown] id="3DHdz13eTtl8"
# ## Trying pre-trained model for inference/training
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="jpBrFM3qOKzC" outputId="d5feedde-0f0c-45cb-a5c9-7fafec335a73"
new_pred = model(X_val)
pred = plt.scatter(X_val.cpu(), new_pred.detach().cpu())
original = plt.scatter(X_train.cpu(), y_train.cpu())
plt.legend((pred, original), ("Predicted", "Original"));
| Sine Function Approximation in PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# `ApJdataFrames` Erickson2011
# ---
# `Title`: THE INITIAL MASS FUNCTION AND DISK FREQUENCY OF THE Rho OPHIUCHI CLOUD: AN EXTINCTION-LIMITED SAMPLE
# `Authors`: <NAME>.
#
# Data is from this paper:
# http://iopscience.iop.org/1538-3881/142/4/140/
# +
# %pylab inline
import seaborn as sns
sns.set_context("notebook", font_scale=1.5)
#import warnings
#warnings.filterwarnings("ignore")
# -
import pandas as pd
# ## Table 2- Optical Properties of Candidate Young Stellar Objects
addr = "http://iopscience.iop.org/1538-3881/142/4/140/suppdata/aj403656t2_ascii.txt"
names = ['F', 'Ap', 'Alt_Names', 'X-Ray ID', 'RA', 'DEC', 'Li', 'EW_Ha', 'I', 'R-I',
'SpT_Lit', 'Spectral_Type', 'Adopt', 'Notes', 'blank']
tbl2 = pd.read_csv(addr, sep='\t', skiprows=[0,1,2,3,4], skipfooter=7, engine='python', na_values=" ... ",
index_col=False, names = names, usecols=range(len(names)-1))
tbl2.head()
# ## Table 3 - Association Members with Optical Spectra
addr = "http://iopscience.iop.org/1538-3881/142/4/140/suppdata/aj403656t3_ascii.txt"
names = ['F', 'Ap', 'Alt_Names', 'WMR', 'Spectral_Type', 'A_v', 'M_I',
'log_T_eff', 'log_L_bol', 'Mass', 'log_age', 'Criteria', 'Notes', 'blank']
tbl3 = pd.read_csv(addr, sep='\t', skiprows=[0,1,2,3,4], skipfooter=9, engine='python', na_values=" ... ",
index_col=False, names = names, usecols=range(len(names)-1))
tbl3.head()
# ! mkdir ../data/Erickson2011
# ###The code to merge the tables isn't working
# ```python
# on_F_ap = ["F", "Ap"]
# on_name = "Alt_Names"
# erickson2011 = pd.merge(tbl2, tbl3, on=on_F_ap, how="right")
# erickson2011 = pd.merge(tbl2, erickson2011, on="Alt_Names", how="right")
# message = "Table 2: {} entries \nTable 3: {} entries \nMerge: {} entries"
# print message.format(len(tbl2), len(tbl3), len(erickson2011))
# ```
plt.plot(10**tbl3.log_T_eff, 10**tbl3.log_L_bol, '.')
plt.yscale("log")
plt.xlim(5000, 2000)
plt.ylim(1.0E-4, 1.0E1)
plt.xlabel(r"$T_{eff}$")
plt.ylabel(r"$L/L_{sun}$")
plt.title("Erickson et al. 2011 Table 3 HR Diagram")
# Another thing to do would be to filter out the "Possible dwarfs", etc...
# Save the data tables locally.
tbl2.to_csv("../data/Erickson2011/tbl2.csv", sep="\t", index=False)
tbl3.to_csv("../data/Erickson2011/tbl3.csv", sep="\t", index=False)
# *Script finished.*
| notebooks/Erickson2011.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## import modules
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# -
# ## define model architecture
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.cn1 = nn.Conv2d(1, 16, 3, 1)
self.cn2 = nn.Conv2d(16, 32, 3, 1)
self.dp1 = nn.Dropout2d(0.10)
self.dp2 = nn.Dropout2d(0.25)
self.fc1 = nn.Linear(4608, 64) # 4608 is basically 12 X 12 X 32
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = self.cn1(x)
x = F.relu(x)
x = self.cn2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dp1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dp2(x)
x = self.fc2(x)
op = F.log_softmax(x, dim=1)
return op
# ## define training and inference routines
def train(model, device, train_dataloader, optim, epoch):
model.train()
for b_i, (X, y) in enumerate(train_dataloader):
X, y = X.to(device), y.to(device)
optim.zero_grad()
pred_prob = model(X)
loss = F.nll_loss(pred_prob, y) # nll is the negative likelihood loss
loss.backward()
optim.step()
if b_i % 10 == 0:
print('epoch: {} [{}/{} ({:.0f}%)]\t training loss: {:.6f}'.format(
epoch, b_i * len(X), len(train_dataloader.dataset),
100. * b_i / len(train_dataloader), loss.item()))
def test(model, device, test_dataloader):
model.eval()
loss = 0
success = 0
with torch.no_grad():
for X, y in test_dataloader:
X, y = X.to(device), y.to(device)
pred_prob = model(X)
loss += F.nll_loss(pred_prob, y, reduction='sum').item() # loss summed across the batch
pred = pred_prob.argmax(dim=1, keepdim=True) # us argmax to get the most likely prediction
success += pred.eq(y.view_as(pred)).sum().item()
loss /= len(test_dataloader.dataset)
print('\nTest dataset: Overall Loss: {:.4f}, Overall Accuracy: {}/{} ({:.0f}%)\n'.format(
loss, success, len(test_dataloader.dataset),
100. * success / len(test_dataloader.dataset)))
# ## create data loaders
# +
# The mean and standard deviation values are calculated as the mean of all pixel values of all images in the training dataset
train_dataloader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1302,), (0.3069,))])), # train_X.mean()/256. and train_X.std()/256.
batch_size=32, shuffle=True)
test_dataloader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1302,), (0.3069,))
])),
batch_size=500, shuffle=True)
# -
# ## define optimizer and run training epochs
# +
torch.manual_seed(0)
device = torch.device("cpu")
model = ConvNet()
optimizer = optim.Adadelta(model.parameters(), lr=0.5)
# -
# ## model training
for epoch in range(1, 3):
train(model, device, train_dataloader, optimizer, epoch)
test(model, device, test_dataloader)
# ## run inference on trained model
# +
test_samples = enumerate(test_dataloader)
b_i, (sample_data, sample_targets) = next(test_samples)
plt.imshow(sample_data[0][0], cmap='gray', interpolation='none')
plt.show()
# -
print(f"Model prediction is : {model(sample_data).data.max(1)[1][0]}")
print(f"Ground truth is : {sample_targets[0]}")
PATH_TO_MODEL = "./convnet.pth"
torch.save(model.state_dict(), PATH_TO_MODEL)
| Chapter10/mnist_pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Inner join
# PostgreSQL was mentioned in the slides but you'll find that these joins and the material here applies to different forms of SQL as well.
#
# Throughout this course, you'll be working with the countries database containing information about the most populous world cities as well as country-level economic data, population data, and geographic data. This countries database also contains information on languages spoken in each country.
#
# You can see the different tables in this database by clicking on the tabs on the bottom right below query.sql. Click through them to get a sense for the types of data that each table contains before you continue with the course! Take note of the fields that appear to be shared across the tables.
#
# Recall from the video the basic syntax for an INNER JOIN, here including all columns in both tables:
SELECT *
FROM left_table
INNER JOIN right_table
ON left_table.id = right_table.id;
# You'll start off with a SELECT statement and then build up to an inner join with the cities and countries tables. Let's get to it!
# Begin by selecting all columns from the cities table.
SELECT *
FROM cities
# Inner join the cities table on the left to the countries table on the right, keeping all of the fields in both tables.
# You should match the tables on the country_code field in cities and the code field in countries.
# Do not alias your tables here or in the next step. Using cities and countries is fine for now.
SELECT *
FROM cities
INNER JOIN countries
ON cities.country_code = countries.code
# Modify the SELECT statement to keep only the name of the city, the name of the country, and the name of the region the country resides in.
#
# Recall from our Intro to SQL for Data Science course that you can alias fields using AS. Alias the name of the city AS city and the name of the country AS country.
SELECT cities.name AS City, countries.name AS country, region
FROM cities
INNER JOIN countries
ON cities.country_code = countries.code
| Joining Data in SQL/Chapter 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + active=""
# .. _grid_tutorial:
#
# .. currentmodule:: seaborn
# -
# # Plotting on data-aware grids
# + active=""
# When exploring medium-dimensional data, a useful approach is to draw multiple instances of the same plot on different subsets of your dataset. This technique is sometimes called either "lattice", or "trellis" plotting, and it is related to the idea of `"small multiples" <http://en.wikipedia.org/wiki/Small_multiple>`_. It allows a viewer to quickly extract a large amount of information about complex data. Matplotlib offers good support for making figures with multiple axes; seaborn builds on top of this to directly link the structure of the plot to the structure of your dataset.
#
# To use these features, your data has to be in a Pandas DataFrame and it must take the form of what <NAME> calls `"tidy" data <http://vita.had.co.nz/papers/tidy-data.pdf>`_. In brief, that means your dataframe should be structured such that each column is a variable and each row is an observation.
#
# For advanced use, you can use the objects discussed in this part of the tutorial directly, which will provide maximum flexibility. Some seaborn functions (such as :func:`lmplot`, :func:`factorplot`, and :func:`pairplot`) also use them behind the scenes. Unlike other seaborn functions that are "Axes-level" and draw onto specific (possibly already-existing) matplotlib ``Axes`` without otherwise manipulating the figure, these higher-level functions create a figure when called and are generally more strict about how it gets set up. In some cases, arguments either to those functions or to the constructor of the class they rely on will provide a different interface attributes like the figure size, as in the case of :func:`lmplot` where you can set the height and aspect ratio for each facet rather than the overall size of the figure. Any function that uses one of these objects will always return it after plotting, though, and most of these objects have convenience methods for changing how the plot is drawn, often in a more abstract and easy way.
# -
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
sns.set(style="ticks")
np.random.seed(sum(map(ord, "axis_grids")))
# + active=""
# .. _facet_grid:
#
# Plotting small multiples of data subsets
# ----------------------------------------
#
# The :class:`FacetGrid` class is useful when you want to visualize the distribution of a variable or the relationship between multiple variables separately within subsets of your dataset. A :class:`FacetGrid` can be drawn with up to three dimensions: ``row``, ``col``, and ``hue``. The first two have obvious correspondence with the resulting array of axes; think of the hue variable as a third dimension along a depth axis, where different levels are plotted with different colors.
#
# The class is used by initializing a :class:`FacetGrid` object with a dataframe and the names of the variables that will form the row, column, or hue dimensions of the grid. These variables should be categorical or discrete, and then the data at each level of the variable will be used for a facet along that axis. For example, say we wanted to examine differences between lunch and dinner in the ``tips`` dataset.
#
# Additionally, both :func:`lmplot` and :func:`factorplot` use this object internally, and they return the object when they are finsihed so that it can be used for further tweaking.
# -
tips = sns.load_dataset("tips")
g = sns.FacetGrid(tips, col="time")
# + active=""
# Initializing the grid like this sets up the matplotlib figure and axes, but doesn't draw anything on them.
#
# The main approach for visualizing data on this grid is with the :meth:`FacetGrid.map` method. Provide it with a plotting function and the name(s) of variable(s) in the dataframe to plot. Let's look at the distribution of tips in each of these subsets, using a histogram.
# -
g = sns.FacetGrid(tips, col="time")
g.map(plt.hist, "tip");
# + active=""
# This function will draw the figure and annotate the axes, hopefully producing a finished plot in one step. To make a relational plot, just pass multiple variable names. You can also provide keyword arguments, which will be passed to the plotting function:
# -
g = sns.FacetGrid(tips, col="sex", hue="smoker")
g.map(plt.scatter, "total_bill", "tip", alpha=.7)
g.add_legend();
# + active=""
# There are several options for controlling the look of the grid that can be passed to the class constructor.
# -
g = sns.FacetGrid(tips, row="smoker", col="time", margin_titles=True)
g.map(sns.regplot, "size", "total_bill", color=".3", fit_reg=False, x_jitter=.1);
# + active=""
# Note that ``margin_titles`` isn't formally supported by the matplotlib API, and may not work well in all cases. In particular, it currently can't be used with a legend that lies outside of the plot.
#
# The size of the figure is set by providing the height of *each* facet, along with the aspect ratio:
# -
g = sns.FacetGrid(tips, col="day", size=4, aspect=.5)
g.map(sns.barplot, "sex", "total_bill");
# + active=""
# With versions of matplotlib > 1.4, you can pass parameters to be used in the `gridspec` module. The can be used to draw attention to a particular facet by increasing its size. It's particularly useful when visualizing distributions of datasets with unequal numbers of groups in each facet.
# -
titanic = sns.load_dataset("titanic")
titanic = titanic.assign(deck=titanic.deck.astype(object)).sort_values("deck")
g = sns.FacetGrid(titanic, col="class", sharex=False,
gridspec_kws={"width_ratios": [5, 3, 3]})
g.map(sns.boxplot, "deck", "age");
# + active=""
# The default ordering of the facets is derived from the information in the DataFrame. If the variable used to define facets has a categorical type, then the order of the categories is used. Otherwise, the facets will be in the order of appearence of the category levels. It is possible, however, to specify an ordering of any facet dimension with the appropriate ``*_order`` parameter:
# -
ordered_days = tips.day.value_counts().index
g = sns.FacetGrid(tips, row="day", row_order=ordered_days,
size=1.7, aspect=4,)
g.map(sns.distplot, "total_bill", hist=False, rug=True);
# + active=""
# Any seaborn color palette (i.e., something that can be passed to :func:`color_palette()` can be provided. You can also use a dictionary that maps the names of values in the ``hue`` variable to valid matplotlib colors:
# -
pal = dict(Lunch="seagreen", Dinner="gray")
g = sns.FacetGrid(tips, hue="time", palette=pal, size=5)
g.map(plt.scatter, "total_bill", "tip", s=50, alpha=.7, linewidth=.5, edgecolor="white")
g.add_legend();
# + active=""
# You can also let other aspects of the plot vary across levels of the hue variable, which can be helpful for making plots that will be more comprehensible when printed in black-and-white. To do this, pass a dictionary to ``hue_kws`` where keys are the names of plotting function keyword arguments and values are lists of keyword values, one for each level of the hue variable.
# -
g = sns.FacetGrid(tips, hue="sex", palette="Set1", size=5, hue_kws={"marker": ["^", "v"]})
g.map(plt.scatter, "total_bill", "tip", s=100, linewidth=.5, edgecolor="white")
g.add_legend();
# + active=""
# If you have many levels of one variable, you can plot it along the columns but "wrap" them so that they span multiple rows. When doing this, you cannot use a ``row`` variable.
# -
attend = sns.load_dataset("attention").query("subject <= 12")
g = sns.FacetGrid(attend, col="subject", col_wrap=4, size=2, ylim=(0, 10))
g.map(sns.pointplot, "solutions", "score", color=".3", ci=None);
# + active=""
# Once you've drawn a plot using :meth:`FacetGrid.map` (which can be called multiple times), you may want to adjust some aspects of the plot. There are also a number of methods on the :class:`FacetGrid` object for manipulating the figure at a higher level of abstraction. The most general is :meth:`FacetGrid.set`, and there are other more specialized methods like :meth:`FacetGrid.set_axis_labels`, which respects the fact that interior facets do not have axis labels. For example:
# -
with sns.axes_style("white"):
g = sns.FacetGrid(tips, row="sex", col="smoker", margin_titles=True, size=2.5)
g.map(plt.scatter, "total_bill", "tip", color="#334488", edgecolor="white", lw=.5);
g.set_axis_labels("Total bill (US Dollars)", "Tip");
g.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);
g.fig.subplots_adjust(wspace=.02, hspace=.02);
# + active=""
# For even more customization, you can work directly with the underling matplotlib ``Figure`` and ``Axes`` objects, which are stored as member attributes at ``fig`` and ``axes`` (a two-dimensional array), respectively. When making a figure without row or column faceting, you can also use the ``ax`` attribute to directly access the single axes.
# -
g = sns.FacetGrid(tips, col="smoker", margin_titles=True, size=4)
g.map(plt.scatter, "total_bill", "tip", color="#338844", edgecolor="white", s=50, lw=1)
for ax in g.axes.flat:
ax.plot((0, 50), (0, .2 * 50), c=".2", ls="--")
g.set(xlim=(0, 60), ylim=(0, 14));
# + active=""
# .. _custom_map_func:
#
# Mapping custom functions onto the grid
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You're not limited to existing matplotlib and seaborn functions when using :class:`FacetGrid`. However, to work properly, any function you use must follow a few rules:
#
# 1. It must plot onto the "currently active" matplotlib ``Axes``. This will be true of functions in the ``matplotlib.pyplot`` namespace, and you can call ``plt.gca`` to get a reference to the current ``Axes`` if you want to work directly with its methods.
# 2. It must accept the data that it plots in positional arguments. Internally, :class:`FacetGrid` will pass a ``Series`` of data for each of the named positional arguments passed to :meth:`FacetGrid.map`.
# 3. It must be able to accept ``color`` and ``label`` keyword arguments, and, ideally, it will do something useful with them. In most cases, it's easiest to catch a generic dictionary of ``**kwargs`` and pass it along to the underlying plotting function.
#
# Let's look at minimal example of a function you can plot with. This function will just take a single vector of data for each facet:
# +
def quantile_plot(x, **kwargs):
qntls, xr = stats.probplot(x, fit=False)
plt.scatter(xr, qntls, **kwargs)
g = sns.FacetGrid(tips, col="sex", size=4)
g.map(quantile_plot, "total_bill");
# + active=""
# If we want to make a bivariate plot, you should write the function so that it accepts the x-axis variable first and the y-axis variable second:
# +
def qqplot(x, y, **kwargs):
_, xr = stats.probplot(x, fit=False)
_, yr = stats.probplot(y, fit=False)
plt.scatter(xr, yr, **kwargs)
g = sns.FacetGrid(tips, col="smoker", size=4)
g.map(qqplot, "total_bill", "tip");
# + active=""
# Because ``plt.scatter`` accepts ``color`` and ``label`` keyword arguments and does the right thing with them, we can add a hue facet without any difficulty:
# -
g = sns.FacetGrid(tips, hue="time", col="sex", size=4)
g.map(qqplot, "total_bill", "tip")
g.add_legend();
# + active=""
# This approach also lets us use additional aesthetics to distinguish the levels of the hue variable, along with keyword arguments that won't be depdendent on the faceting variables:
# -
g = sns.FacetGrid(tips, hue="time", col="sex", size=4,
hue_kws={"marker": ["s", "D"]})
g.map(qqplot, "total_bill", "tip", s=40, edgecolor="w")
g.add_legend();
# + active=""
# Sometimes, though, you'll want to map a function that doesn't work the way you expect with the ``color`` and ``label`` keyword arguments. In this case, you'll want to explictly catch them and handle them in the logic of your custom function. For example, this approach will allow use to map ``plt.hexbin``, which otherwise does not play well with the :class:`FacetGrid` API:
# +
def hexbin(x, y, color, **kwargs):
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(x, y, gridsize=15, cmap=cmap, **kwargs)
with sns.axes_style("dark"):
g = sns.FacetGrid(tips, hue="time", col="time", size=4)
g.map(hexbin, "total_bill", "tip", extent=[0, 50, 0, 10]);
# + active=""
# .. _pair_grid:
#
# Plotting pairwise relationships in a dataset
# --------------------------------------------
#
# :class:`PairGrid` also allows you to quickly draw a grid of small subplots using the same plot type to visualize data in each. In a :class:`PairGrid`, each row and column is assigned to a different variable, so the resulting plot shows each pairwise relationship in the dataset. This style of plot is sometimes called a "scatterplot matrix", as this is the most common way to show each relationship, but :class:`PairGrid` is not limited to scatterplots.
#
# It's important to understand the differences between a :class:`FacetGrid` and a :class:`PairGrid`. In the former, each facet shows the same relationship conditioned on different levels of other variables. In the latter, each plot shows a different relationship (although the upper and lower triangles will have mirrored plots). Using :class:`PairGrid` can give you a very quick, very high-level summary of interesting relationships in your dataset.
#
# The basic usage of the class is very similar to :class:`FacetGrid`. First you initialize the grid, then you pass plotting function to a ``map`` method and it will be called on each subplot. There is also a companion function, :func:`pairplot` that trades off some flexibility for faster plotting.
#
# -
iris = sns.load_dataset("iris")
g = sns.PairGrid(iris)
g.map(plt.scatter);
# + active=""
# It's possible to plot a different function on the diagonal to show the univariate distribution of the variable in each column. Note that the axis ticks won't correspond to the count or density axis of this plot, though.
# -
g = sns.PairGrid(iris)
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter);
# + active=""
# A very common way to use this plot colors the observations by a separate categorical variable. For example, the iris dataset has four measurements for each of three different species of iris flowers so you can see how they differ.
# -
g = sns.PairGrid(iris, hue="species")
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
g.add_legend();
# + active=""
# By default every numeric column in the dataset is used, but you can focus on particular relationships if you want.
# -
g = sns.PairGrid(iris, vars=["sepal_length", "sepal_width"], hue="species")
g.map(plt.scatter);
# + active=""
# It's also possible to use a different function in the upper and lower triangles to emphasize different aspects of the relationship.
# -
g = sns.PairGrid(iris)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_diag(sns.kdeplot, lw=3, legend=False);
# + active=""
# The square grid with identity relationships on the diagonal is actually just a special case, and you can plot with different variables in the rows and columns.
# -
g = sns.PairGrid(tips, y_vars=["tip"], x_vars=["total_bill", "size"], size=4)
g.map(sns.regplot, color=".3")
g.set(ylim=(-1, 11), yticks=[0, 5, 10]);
# + active=""
# Of course, the aesthetic attributes are configurable. For instance, you can use a different palette (say, to show an ordering of the ``hue`` variable) and pass keyword arguments into the plotting functions.
# -
g = sns.PairGrid(tips, hue="size", palette="GnBu_d")
g.map(plt.scatter, s=50, edgecolor="white")
g.add_legend();
# + active=""
# :class:`PairGrid` is flexible, but to take a quick look at a dataset, it can be easier to use :func:`pairplot`. This function uses scatterplots and histograms by default, although a few other kinds will be added (currently, you can also plot regression plots on the off-diagonals and KDEs on the diagonal).
# -
sns.pairplot(iris, hue="species", size=2.5);
# + active=""
# You can also control the aesthetics of the plot with keyword arguments, and it returns the :class:`PairGrid` instance for further tweaking.
# -
g = sns.pairplot(iris, hue="species", palette="Set2", diag_kind="kde", size=2.5)
| doc/tutorial/axis_grids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
# Slicing {#slice_example}
# =======
#
# Extract thin planar slices from a volume.
#
# +
import matplotlib.pyplot as plt
import numpy as np
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
# -
# PyVista meshes have several slicing filters bound directly to all
# datasets. These filters allow you to slice through a volumetric dataset
# to extract and view sections through the volume of data.
#
# One of the most common slicing filters used in PyVista is the
# `pyvista.DataSetFilters.slice_orthogonal`{.interpreted-text role="func"}
# filter which creates three orthogonal slices through the dataset
# parallel to the three Cartesian planes. For example, let\'s slice
# through the sample geostatistical training image volume. First, load up
# the volume and preview it:
#
# +
mesh = examples.load_channels()
# define a categorical colormap
cmap = plt.cm.get_cmap("viridis", 4)
mesh.plot(cmap=cmap)
# -
# Note that this dataset is a 3D volume and there might be regions within
# this volume that we would like to inspect. We can create slices through
# the mesh to gain further insight about the internals of the volume.
#
# +
slices = mesh.slice_orthogonal()
slices.plot(cmap=cmap)
# -
# The orthogonal slices can be easily translated throughout the volume:
#
slices = mesh.slice_orthogonal(x=20, y=20, z=30)
slices.plot(cmap=cmap)
# We can also add just a single slice of the volume by specifying the
# origin and normal of the slicing plane with the
# `pyvista.DataSetFilters.slice`{.interpreted-text role="func"} filter:
#
# +
# Single slice - origin defaults to the center of the mesh
single_slice = mesh.slice(normal=[1, 1, 0])
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(single_slice, cmap=cmap)
p.show()
# -
# Adding slicing planes uniformly across an axial direction can also be
# automated with the
# `pyvista.DataSetFilters.slice_along_axis`{.interpreted-text role="func"}
# filter:
#
# +
slices = mesh.slice_along_axis(n=7, axis="y")
slices.plot(cmap=cmap)
# -
# Slice Along Line
# ================
#
# We can also slice a dataset along a `pyvista.Spline`{.interpreted-text
# role="func"} or `pyvista.Line`{.interpreted-text role="func"} using the
# `DataSetFilters.slice_along_line`{.interpreted-text role="func"} filter.
#
# First, define a line source through the dataset of interest. Please note
# that this type of slicing is computationally expensive and might take a
# while if there are a lot of points in the line - try to keep the
# resolution of the line low.
#
# +
model = examples.load_channels()
def path(y):
"""Equation: x = a(y-h)^2 + k"""
a = 110.0 / 160.0 ** 2
x = a * y ** 2 + 0.0
return x, y
x, y = path(np.arange(model.bounds[2], model.bounds[3], 15.0))
zo = np.linspace(9.0, 11.0, num=len(y))
points = np.c_[x, y, zo]
spline = pv.Spline(points, 15)
spline
# -
# Then run the filter
#
slc = model.slice_along_line(spline)
slc
p = pv.Plotter()
p.add_mesh(slc, cmap=cmap)
p.add_mesh(model.outline())
p.show(cpos=[1, -1, 1])
# Multiple Slices in Vector Direction
# ===================================
#
# Slice a mesh along a vector direction perpendicularly.
#
# +
mesh = examples.download_brain()
# Create vector
vec = np.random.rand(3)
# Normalize the vector
normal = vec / np.linalg.norm(vec)
# Make points along that vector for the extent of your slices
a = mesh.center + normal * mesh.length / 3.0
b = mesh.center - normal * mesh.length / 3.0
# Define the line/points for the slices
n_slices = 5
line = pv.Line(a, b, n_slices)
# Generate all of the slices
slices = pv.MultiBlock()
for point in line.points:
slices.append(mesh.slice(normal=normal, origin=point))
# -
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(slices, opacity=0.75)
p.add_mesh(line, color="red", line_width=5)
p.show()
# Slice At Different Bearings
# ===========================
#
# From
# [pyvista-support\#23](https://github.com/pyvista/pyvista-support/issues/23)
#
# An example of how to get many slices at different bearings all centered
# around a user-chosen location.
#
# Create a point to orient slices around
#
ranges = np.array(model.bounds).reshape(-1, 2).ptp(axis=1)
point = np.array(model.center) - ranges*0.25
# Now generate a few normal vectors to rotate a slice around the z-axis.
# Use equation for circle since its about the Z-axis.
#
increment = np.pi/6.
# use a container to hold all the slices
slices = pv.MultiBlock() # treat like a dictionary/list
for theta in np.arange(0, np.pi, increment):
normal = np.array([np.cos(theta), np.sin(theta), 0.0]).dot(np.pi/2.)
name = f'Bearing: {np.rad2deg(theta):.2f}'
slices[name] = model.slice(origin=point, normal=normal)
slices
# And now display it!
#
p = pv.Plotter()
p.add_mesh(slices, cmap=cmap)
p.add_mesh(model.outline())
p.show()
| slicing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
os.environ['THEANO_FLAGS'] = 'floatX=float32,device=gpu'
import json
import subprocess
import cPickle
import sys
import binascii
import multiprocessing as mp
from itertools import chain
from collections import OrderedDict
import logging
import numpy as np
import random
from copy import copy
import blocks
from blocks.bricks import Linear, Softmax, Softplus, NDimensionalSoftmax,\
BatchNormalizedMLP,Rectifier, Logistic, Tanh, MLP
from blocks.bricks.recurrent import GatedRecurrent, Fork, LSTM
from blocks.initialization import Constant, IsotropicGaussian, Identity, Uniform
from blocks.bricks.cost import BinaryCrossEntropy, CategoricalCrossEntropy
from blocks.filter import VariableFilter
from blocks.roles import PARAMETER
from blocks.graph import ComputationGraph
import theano
from theano import tensor as T
sys.setrecursionlimit(100000)
# +
def parse_header(line):
ret_dict = {}
h = line.split()
#ret_dict['direction'] = " ".join(h[3:6])
if h[2] == 'IP6':
"""
Conditional formatting based on ethernet type.
IPv4 format: 0.0.0.0.port
IPv6 format (one of many): 0:0:0:0:0:0.port
"""
ret_dict['src_port'] = h[3].split('.')[-1]
ret_dict['src_ip'] = h[3].split('.')[0]
ret_dict['dest_port'] = h[5].split('.')[-1].split(':')[0]
ret_dict['dest_ip'] = h[5].split('.')[0]
else:
if len(h[3].split('.')) > 4:
ret_dict['src_port'] = h[3].split('.')[-1]
ret_dict['src_ip'] = '.'.join(h[3].split('.')[:-1])
else:
ret_dict['src_ip'] = h[3]
ret_dict['src_port'] = ''
if len(h[5].split('.')) > 4:
ret_dict['dest_port'] = h[5].split('.')[-1].split(':')[0]
ret_dict['dest_ip'] = '.'.join(h[5].split('.')[:-1])
else:
ret_dict['dest_ip'] = h[5].split(':')[0]
ret_dict['dest_port'] = ''
return ret_dict
def parse_data(line):
ret_str = ''
h, d = line.split(':', 1)
ret_str = d.strip().replace(' ', '')
return ret_str
def process_packet(output):
# TODO!! throws away the first packet!
ret_header = {}
ret_dict = {}
ret_data = ''
hasHeader = False
for line in output:
line = line.strip()
if line:
if not line.startswith('0x'):
# header line
if ret_dict and ret_data:
# about to start new header, finished with hex
ret_dict['data'] = ret_data
yield ret_dict
ret_dict.clear()
ret_header.clear()
ret_data = ''
hasHeader = False
# parse next header
try:
ret_header = parse_header(line)
ret_dict.update(ret_header)
hasHeader = True
except:
ret_header.clear()
ret_dict.clear()
ret_data = ''
hasHeader = False
else:
# hex data line
if hasHeader:
data = parse_data(line)
ret_data = ret_data + data
else:
continue
def is_clean_packet(packet):
"""
Returns whether or not the parsed packet is valid
or not. Checks that both the src and dest
ports are integers. Checks that src and dest IPs
are valid address formats. Checks that packet data
is hex. Returns True if all tests pass, False otherwise.
"""
if not packet['src_port'].isdigit(): return False
if not packet['dest_port'].isdigit(): return False
if packet['src_ip'].isalpha(): return False
if packet['dest_ip'].isalpha(): return False
#try:
# ipaddress.ip_address(packet['src_ip'])
# ipaddress.ip_address(packet['dest_ip'])
#except:
# return False
if 'data' in packet:
try:
int(packet['data'], 16)
except:
return False
return True
def order_keys(hexSessionDict):
orderedKeys = []
for key in sorted(hexSessionDict.keys(), key=lambda key: hexSessionDict[key][1]):
orderedKeys.append(key)
return orderedKeys
def read_pcap(path):
hex_sessions = {}
proc = subprocess.Popen('tcpdump -nn -tttt -xx -r '+path,
shell=True,
stdout=subprocess.PIPE)
insert_num = 0 # keeps track of insertion order into dict
for packet in process_packet(proc.stdout):
if not is_clean_packet(packet):
continue
if 'data' in packet:
key = (packet['src_ip']+":"+packet['src_port'], packet['dest_ip']+":"+packet['dest_port'])
rev_key = (key[1], key[0])
if key in hex_sessions:
hex_sessions[key][0].append(packet['data'])
elif rev_key in hex_sessions:
hex_sessions[rev_key][0].append(packet['data'])
else:
hex_sessions[key] = ([packet['data']], insert_num)
insert_num += 1
return hex_sessions
def pickleFile(thing2save, file2save2 = None, filePath='/work/notebooks/drawModels/', fileName = 'myModels'):
if file2save2 == None:
f=file(filePath+fileName+'.pickle', 'wb')
else:
f=file(filePath+file2save2, 'wb')
cPickle.dump(thing2save, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def loadFile(filePath):
file2open = file(filePath, 'rb')
loadedFile = cPickle.load(file2open)
file2open.close()
return loadedFile
def removeBadSessionizer(hexSessionDict, saveFile=False, dataPath=None, fileName=None):
for ses in hexSessionDict.keys():
paclens = []
for pac in hexSessionDict[ses][0]:
paclens.append(len(pac))
if np.min(paclens)<80:
del hexSessionDict[ses]
if saveFile:
print 'pickling sessions dictionary... mmm'
pickleFile(hexSessionDict, filePath=dataPath, fileName=fileName)
#with open(dataPath+'/'+fileName+'.pickle', 'wb') as handle:
# cPickle.dump(hexSessions, handle)
return hexSessionDict
# -
# dataPath = '/data/fs4/datasets/pcaps/gregPcaps/'
# dirList = os.listdir('/data/fs4/datasets/pcaps/gregPcaps/')
# dirList
# complicated = {}
#
# for capture in dirList:
# dictName = capture.split('.')[0]
#
# start = time.time()
#
# hexSessions = read_pcap(dataPath+capture)
# hexSessions = removeBadSessionizer(hexSessions)
# complicated[dictName] = hexSessions
#
# end = time.time()
#
# print dictName + ' is done'
# print 'time to run (secs): ', (end - start)
#
#
# pickleFile(complicated, filePath='/data/fs4/home/bradh/', fileName='complicated')
#big file
with open('complicated.pickle', 'rb') as unhandle:
compDict= cPickle.load(unhandle)
# +
sess = 0
for di in compDict.keys():
sess += len(compDict[di].keys())
print di, " ", len(compDict[di].keys())
print sess
# +
# #%matplotlib inline
maxPackets = 2
packetTimeSteps = 28
loadPrepedData = True
dataPath = '/data/fs4/home/bradh/bigFlows.pickle'
packetReverse = False
padOldTimeSteps = True
runname = 'bakeoff_nomac_noip_noport'
rnnType = 'gru' #gru or lstm
wtstd = 0.2
dimIn = 257 #hex has 256 characters + the <EOP> character
dim = 100 #dimension reduction size
batch_size = 20
numClasses = 6
clippings = 1
epochs = 5000
lr = 0.0001
decay = 0.9
trainPercent = 0.8
module_logger = logging.getLogger(__name__)
import ast
import json
import subprocess
import sys
def pickleFile(thing2save, file2save2 = None, filePath='/work/notebooks/drawModels/', fileName = 'myModels'):
if file2save2 == None:
f=file(filePath+fileName+'.pickle', 'wb')
else:
f=file(filePath+file2save2, 'wb')
cPickle.dump(thing2save, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def loadFile(filePath):
file2open = file(filePath, 'rb')
loadedFile = cPickle.load(file2open)
file2open.close()
return loadedFile
def removeBadSessionizer(hexSessionDict, saveFile=False, dataPath=None, fileName=None):
for ses in hexSessionDict.keys():
paclens = []
for pac in hexSessionDict[ses][0]:
paclens.append(len(pac))
if np.min(paclens)<80:
del hexSessionDict[ses]
if saveFile:
print 'pickling sessions'
pickleFile(hexSessionDict, filePath=dataPath, fileName=fileName)
return hexSessionDict
#Making the hex dictionary
#def dstPortSwapOneOut(hexSessionList):
#THINK THROUGH
def oneHot(index, granular = 'hex'):
if granular == 'hex':
vecLen = 257
else:
vecLen = 17
zeroVec = np.zeros(vecLen)
zeroVec[index] = 1.0
return zeroVec
def oneSessionEncoder(sessionPackets, hexDict, maxPackets = 2, packetTimeSteps = 100,
packetReverse = False, charLevel = False, padOldTimeSteps = True):
sessionCollect = []
packetCollect = []
if charLevel:
vecLen = 17
else:
vecLen = 257
if len(sessionPackets) > maxPackets: #crop the number of sessions to maxPackets
sessionList = copy(sessionPackets[:maxPackets])
else:
sessionList = copy(sessionPackets)
for rawpacket in sessionList:
packet = copy(rawpacket)
packet = packet[24:52]
#packet = packet[32:36]+packet[44:46]+packet[46:48]+packet[52:60]+packet[60:68]\
#+packet[68:70]+packet[70:72]+packet[72:74]
packet = [hexDict[packet[i:i+2]] for i in xrange(0,len(packet)-2+1,2)]
if len(packet) >= packetTimeSteps: #crop packet to length packetTimeSteps
packet = packet[:packetTimeSteps]
packet = packet+[256] #add <EOP> end of packet token
else:
packet = packet+[256] #add <EOP> end of packet token
packetCollect.append(packet)
pacMat = np.array([oneHot(x) for x in packet]) #one hot encoding of packet into a matrix
pacMatLen = len(pacMat)
#padding packet
if packetReverse:
pacMat = pacMat[::-1]
if pacMatLen < packetTimeSteps:
#pad by stacking zeros on top of data so that earlier timesteps do not have information
#padding the packet such that zeros are after the actual info for better translation
if padOldTimeSteps:
pacMat = np.vstack( ( np.zeros((packetTimeSteps-pacMatLen,vecLen)), pacMat) )
else:
pacMat = np.vstack( (pacMat, np.zeros((packetTimeSteps-pacMatLen,vecLen))) )
if pacMatLen > packetTimeSteps:
pacMat = pacMat[:packetTimeSteps, :]
sessionCollect.append(pacMat)
#padding session
sessionCollect = np.asarray(sessionCollect, dtype=theano.config.floatX)
numPacketsInSession = sessionCollect.shape[0]
if numPacketsInSession < maxPackets:
#pad sessions to fit the
sessionCollect = np.vstack( (sessionCollect,np.zeros((maxPackets-numPacketsInSession,
packetTimeSteps, vecLen))) )
return sessionCollect, packetCollect
# # Learning functions
# In[14]:
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def dropout(X, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
# Gradient clipping
def clip_norm(g, c, n):
'''n is the norm, c is the threashold, and g is the gradient'''
if c > 0:
g = T.switch(T.ge(n, c), g*c/n, g)
return g
def clip_norms(gs, c):
norm = T.sqrt(sum([T.sum(g**2) for g in gs]))
return [clip_norm(g, c, norm) for g in gs]
# Regularizers
def max_norm(p, maxnorm = 0.):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(p), axis=0))
desired = T.clip(norms, 0, maxnorm)
p = p * (desired/ (1e-7 + norms))
return p
def gradient_regularize(p, g, l1 = 0., l2 = 0.):
g += p * l2
g += T.sgn(p) * l1
return g
def weight_regularize(p, maxnorm = 0.):
p = max_norm(p, maxnorm)
return p
def Adam(params, cost, lr=0.0002, b1=0.1, b2=0.001, e=1e-8, l1 = 0., l2 = 0., maxnorm = 0., c = 8):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, c)
i = theano.shared(floatX(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
g_t = gradient_regularize(p, g_t, l1=l1, l2=l2)
p_t = p - (lr_t * g_t)
p_t = weight_regularize(p_t, maxnorm=maxnorm)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
return updates
def RMSprop(cost, params, lr = 0.001, l1 = 0., l2 = 0., maxnorm = 0., rho=0.9, epsilon=1e-6, c = 8):
grads = T.grad(cost, params)
grads = clip_norms(grads, c)
updates = []
for p, g in zip(params, grads):
g = gradient_regularize(p, g, l1 = l1, l2 = l2)
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
updates.append((acc, acc_new))
updated_p = p - lr * (g / T.sqrt(acc_new + epsilon))
updated_p = weight_regularize(updated_p, maxnorm = maxnorm)
updates.append((p, updated_p))
return updates
# # Training functions
def predictClass(predictFun, sampleList, compDict, hexDict,
numClasses = 6, trainPercent = 0.8, dimIn=257, maxPackets=2,
packetTimeSteps = 16, padOldTimeSteps=True):
testCollect = []
predtargets = []
actualtargets = []
trainingSessions = []
trainingTargets = []
for d in range(len(sampleList)):
sampleLen = len(compDict[sampleList[d]].keys()) #num sessions in a dictionary
sampleKeys = compDict[sampleList[d]].keys()[-400:]
for key in sampleKeys:
oneEncoded = oneSessionEncoder(compDict[sampleList[d]][key][0],
hexDict = hexDict,
packetReverse=packetReverse,
padOldTimeSteps = padOldTimeSteps,
maxPackets = maxPackets,
packetTimeSteps = packetTimeSteps)
trainingSessions.append(oneEncoded[0])
trainIndex = [0]*numClasses
trainIndex[d] = 1
trainingTargets.append(trainIndex)
sessionsMinibatch = np.asarray(trainingSessions, dtype=theano.config.floatX).reshape((-1, packetTimeSteps, 1, dimIn))
targetsMinibatch = np.asarray(trainingTargets, dtype=theano.config.floatX)
predcostfun = predictFun(sessionsMinibatch)
testCollect.append(np.mean(np.argmax(predcostfun,axis=1) == np.argmax(targetsMinibatch, axis=1)))
predtargets = np.argmax(predcostfun,axis=1)
actualtargets = np.argmax(targetsMinibatch, axis=1)
print "TEST accuracy: ", np.mean(testCollect)
print
return actualtargets, predtargets, np.mean(testCollect)
def binaryPrecisionRecall(predictions, targets, numClasses = 6):
for cla in range(numClasses):
confustop = np.array([])
confusbottom = np.array([])
predictions = np.asarray(predictions).flatten()
targets = np.asarray(targets).flatten()
pred1 = np.where(predictions == cla)
pred0 = np.where(predictions != cla)
target1 = np.where(targets == cla)
target0 = np.where(targets != cla)
truePos = np.intersect1d(pred1[0],target1[0]).shape[0]
trueNeg = np.intersect1d(pred0[0],target0[0]).shape[0]
falsePos = np.intersect1d(pred1[0],target0[0]).shape[0]
falseNeg = np.intersect1d(pred0[0],target1[0]).shape[0]
top = np.append(confustop, (truePos, falsePos))
bottom = np.append(confusbottom, (falseNeg, trueNeg))
confusionMatrix = np.vstack((top, bottom))
precision = float(truePos)/(truePos + falsePos + 0.00001) #1 - (how much junk did we give user)
recall = float(truePos)/(truePos + falseNeg + 0.00001) #1 - (how much good stuff did we miss)
f1 = 2*((precision*recall)/(precision+recall+0.00001))
print 'class '+str(cla)+' precision: ', precision
print 'class '+str(cla)+' recall: ', recall
print 'class '+str(cla)+' f1: ', f1
print
# -
sampleList = ['NESTthermostat-nf-10days-96bytes',
'a-printers-24hrs-96bytes-E-VA-SRV-FW1A-2016-08-09_14-17-vlan34',
'SonySmartTV-nf-10days-96bytes',
'a-fs-24hrs-96bytes-E-ASH-SRV-FW1A-2016-08-09_18-17-vlan40',
'TiVoSeries4-nf-10days-96bytes',
'b-dc-24hrs-96bytes-E-QD-SRV-FW1A-2016-08-09_18-16-vlan210']
# hexDict = hexTokenizer()
# trainingTargets = []
# trainingSessions = []
# for d in range(len(sampleList)):
# sampleLen = len(compDict[sampleList[d]].keys())
# sampleKeys = random.sample(compDict[sampleList[d]].keys()[:sampleLen], 5)
#
# for key in sampleKeys:
# oneEncoded = oneSessionEncoder(compDict[sampleList[d]][key][0],
# hexDict = hexDict,
# packetReverse=packetReverse,
# padOldTimeSteps = padOldTimeSteps,
# maxPackets = maxPackets,
# packetTimeSteps = packetTimeSteps)
# trainIndex = [0]*numClasses
# trainIndex[d] = 1
# trainingTargets.append(trainIndex)
# trainingSessions.append(oneEncoded[0])
#
#Making the hex dictionary
def hexTokenizer():
hexstring = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19\
, 1A, 1B, 1C, 1D, 1E, 1F, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 2A, 2B, 2C, 2D, 2E, 2F, 30, 31, 32, 33, 34, 35\
, 36, 37, 38, 39, 3A, 3B, 3C, 3D, 3E, 3F, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 4A, 4B, 4C, 4D, 4E, 4F, 50, 51\
, 52, 53, 54, 55, 56, 57, 58, 59, 5A, 5B, 5C, 5D, 5E, 5F, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 6A, 6B, 6C, 6D\
, 6E, 6F, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 7A, 7B, 7C, 7D, 7E, 7F, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89\
, 8A, 8B, 8C, 8D, 8E, 8F, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 9A, 9B, 9C, 9D, 9E, 9F, A0, A1, A2, A3, A4, A5\
, A6, A7, A8, A9, AA, AB, AC, AD, AE, AF, B0, B1, B2, B3, B4, B5, B6, B7, B8, B9, BA, BB, BC, BD, BE, BF, C0, C1\
, C2, C3, C4, C5, C6, C7, C8, C9, CA, CB, CC, CD, CE, CF, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, DA, DB, DC, DD\
, DE, DF, E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, EA, EB, EC, ED, EE, EF, F0, F1, F2, F3, F4, F5, F6, F7, F8, F9\
, FA, FB, FC, FD, FE, FF'.replace('\t', '')
hexList = [x.strip() for x in hexstring.lower().split(',')]
hexList.append('<EOP>') #End Of Packet token
#EOS token??????
hexDict = {}
for key, val in enumerate(hexList):
if len(val) == 1:
val = '0'+val
hexDict[val] = key #dictionary k=hex, v=int
return hexDict
def training(runname, rnnType, maxPackets, packetTimeSteps, packetReverse, padOldTimeSteps, wtstd,
lr, decay, clippings, dimIn, dim, numClasses, batch_size, epochs,
trainPercent):
print locals()
print
X = T.tensor4('inputs')
Y = T.matrix('targets')
linewt_init = IsotropicGaussian(wtstd)
line_bias = Constant(1.0)
rnnwt_init = IsotropicGaussian(wtstd)
rnnbias_init = Constant(0.0)
classifierWts = IsotropicGaussian(wtstd)
learning_rateClass = theano.shared(np.array(lr, dtype=theano.config.floatX))
learning_decay = np.array(decay, dtype=theano.config.floatX)
hexDict = hexTokenizer()
###DATA PREP
print 'initializing network graph'
###ENCODER
if rnnType == 'gru':
rnn = GatedRecurrent(dim=dim, weights_init = rnnwt_init, biases_init = rnnbias_init, name = 'gru')
dimMultiplier = 2
else:
rnn = LSTM(dim=dim, weights_init = rnnwt_init, biases_init = rnnbias_init, name = 'lstm')
dimMultiplier = 4
fork = Fork(output_names=['linear', 'gates'],
name='fork', input_dim=dimIn, output_dims=[dim, dim * dimMultiplier],
weights_init = linewt_init, biases_init = line_bias)
###CONTEXT
if rnnType == 'gru':
rnnContext = GatedRecurrent(dim=dim, weights_init = rnnwt_init,
biases_init = rnnbias_init, name = 'gruContext')
else:
rnnContext = LSTM(dim=dim, weights_init = rnnwt_init, biases_init = rnnbias_init,
name = 'lstmContext')
forkContext = Fork(output_names=['linearContext', 'gatesContext'],
name='forkContext', input_dim=dim, output_dims=[dim, dim * dimMultiplier],
weights_init = linewt_init, biases_init = line_bias)
forkDec = Fork(output_names=['linear', 'gates'],
name='forkDec', input_dim=dim, output_dims=[dim, dim*dimMultiplier],
weights_init = linewt_init, biases_init = line_bias)
#CLASSIFIER
bmlp = BatchNormalizedMLP( activations=[Logistic(),Logistic()],
dims=[dim, dim, numClasses],
weights_init=classifierWts,
biases_init=Constant(0.0001) )
#initialize the weights in all the functions
fork.initialize()
rnn.initialize()
forkContext.initialize()
rnnContext.initialize()
forkDec.initialize()
bmlp.initialize()
def onestepEnc(X):
data1, data2 = fork.apply(X)
if rnnType == 'gru':
hEnc = rnn.apply(data1, data2)
else:
hEnc, _ = rnn.apply(data2)
return hEnc
hEnc, _ = theano.scan(onestepEnc, X) #(mini*numPackets, packetLen, 1, hexdictLen)
hEncReshape = T.reshape(hEnc[:,-1], (-1, maxPackets, 1, dim)) #[:,-1] takes the last rep for each packet
#(mini, numPackets, 1, dimReduced)
def onestepContext(hEncReshape):
data3, data4 = forkContext.apply(hEncReshape)
if rnnType == 'gru':
hContext = rnnContext.apply(data3, data4)
else:
hContext, _ = rnnContext.apply(data4)
return hContext
hContext, _ = theano.scan(onestepContext, hEncReshape)
hContextReshape = T.reshape(hContext[:,-1], (-1,dim))
data5, _ = forkDec.apply(hContextReshape)
pyx = bmlp.apply(data5)
softmax = Softmax()
softoutClass = softmax.apply(pyx)
costClass = T.mean(CategoricalCrossEntropy().apply(Y, softoutClass))
#CREATE GRAPH
cgClass = ComputationGraph([costClass])
paramsClass = VariableFilter(roles = [PARAMETER])(cgClass.variables)
updatesClass = Adam(paramsClass, costClass, learning_rateClass, c=clippings)
#updatesClass = RMSprop(costClass, paramsClass, learning_rateClass, c=clippings)
#print 'grad compiling'
#gradients = T.grad(costClass, paramsClass)
#gradients = clip_norms(gradients, clippings)
#gradientFun = theano.function([X,Y], gradients, allow_input_downcast=True)
#print 'finish with grads'
print 'compiling graph you talented soul'
classifierTrain = theano.function([X,Y], [costClass, hEnc, hContext, pyx, softoutClass],
updates=updatesClass, allow_input_downcast=True)
classifierPredict = theano.function([X], softoutClass, allow_input_downcast=True)
print 'finished compiling'
epochCost = []
gradNorms = []
trainAcc = []
testAcc = []
costCollect = []
trainCollect = []
print 'training begins'
iteration = 0
#epoch
for epoch in xrange(epochs):
#iteration/minibatch
#for start, end in zip(range(0, trainIndex,batch_size),
# range(batch_size, trainIndex, batch_size)):
trainingTargets = []
trainingSessions = []
for d in range(len(sampleList)):
sampleLen = len(compDict[sampleList[d]].keys())
sampleKeys = random.sample(compDict[sampleList[d]].keys()[:sampleLen], 5)
for key in sampleKeys:
oneEncoded = oneSessionEncoder(compDict[sampleList[d]][key][0],
hexDict = hexDict,
packetReverse=packetReverse,
padOldTimeSteps = padOldTimeSteps,
maxPackets = maxPackets,
packetTimeSteps = packetTimeSteps)
trainIndex = [0]*numClasses
trainIndex[d] = 1
trainingTargets.append(trainIndex)
trainingSessions.append(oneEncoded[0])
sessionsMinibatch = np.asarray(trainingSessions).reshape((-1, packetTimeSteps, 1, dimIn))
targetsMinibatch = np.asarray(trainingTargets)
costfun = classifierTrain(sessionsMinibatch, targetsMinibatch)
costCollect.append(costfun[0])
trainCollect.append(np.mean(np.argmax(costfun[-1],axis=1) == np.argmax(targetsMinibatch, axis=1)))
iteration+=1
if iteration == 1:
print 'you are amazing'
if iteration%200 == 0:
print
print ' Iteration: ', iteration
print ' Cost: ', np.mean(costCollect[-20:])
print ' TRAIN accuracy: ', np.mean(trainCollect[-20:])
print
#grads = gradientFun(sessionsMinibatch, targetsMinibatch)
#for gra in grads:
# print ' gradient norms: ', np.linalg.norm(gra)
np.savetxt('/data/fs4/home/bradh/outputs/'+runname+"_TRAIN.csv", trainCollect[::50], delimiter=",")
np.savetxt('/data/fs4/home/bradh/outputs/'+runname+"_COST.csv", costCollect[::50], delimiter=",")
#testing accuracy
if iteration%500 == 0:
predtar, acttar, testCollect = predictClass(classifierPredict, sampleList, compDict, hexDict,
numClasses, trainPercent, dimIn,
maxPackets,packetTimeSteps, padOldTimeSteps)
binaryPrecisionRecall(predtar, acttar)
testAcc.append(testCollect)
np.savetxt('/data/fs4/home/bradh/outputs/'+runname+"_TEST.csv", testAcc, delimiter=",")
#save the models
if iteration%500 == 0:
#pickleFile(classifierTrain, filePath='/data/fs4/home/bradh/outputs/',
# fileName=runname+'TRAIN'+str(iteration))
pickleFile(classifierPredict, filePath='/data/fs4/home/bradh/outputs/',
fileName=runname+'PREDICT'+str(iteration))
#epochCost.append(np.mean(costCollect[-50:]))
#trainAcc.append(np.mean(trainCollect[-50:]))
#print 'Epoch: ', epoch
#module_logger.debug('Epoch:%r',epoch)
#print 'Epoch cost average: ', epochCost[-1]
#print 'Epoch TRAIN accuracy: ', trainAcc[-1]
return classifierPredict, classifierTrain
#TODO: expose classifier dim
train, predict = training(runname, rnnType, maxPackets, packetTimeSteps, packetReverse, padOldTimeSteps, wtstd,
lr, decay, clippings, dimIn, dim, numClasses, batch_size, epochs,
trainPercent)
hexDict = hexTokenizer()
predtar, acttar, testCollect = predictClass(train,sampleList, compDict, hexDict,
numClasses, trainPercent, dimIn,
maxPackets,packetTimeSteps, padOldTimeSteps)
binaryPrecisionRecall(predtar, acttar)
d=5
sampleLen = len(compDict[sampleList[d]].keys())
sampleKeys = random.sample(compDict[sampleList[d]].keys()[:sampleLen], 10)
trainingTargets = []
trainingSessions = []
for key in sampleKeys:
oneEncoded = oneSessionEncoder(compDict[sampleList[d]][key][0],
hexDict = hexDict,
packetReverse=packetReverse,
padOldTimeSteps = padOldTimeSteps,
maxPackets = maxPackets,
packetTimeSteps = packetTimeSteps)
trainIndex = [0]*numClasses
trainIndex[d] = 1
trainingTargets.append(trainIndex)
trainingSessions.append(oneEncoded[0])
sessionsMinibatch = np.asarray(trainingSessions).reshape((-1, 28, 1, 257))
np.argmax(train(sessionsMinibatch), axis = 1)
oneSessionEncoder(compDict[sampleList[d]][key][0],
hexDict = hexDict,
packetReverse=packetReverse,
padOldTimeSteps = padOldTimeSteps,
maxPackets = maxPackets,
packetTimeSteps = packetTimeSteps)
| notebooks/advesarial/bakeoff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# ## Defining a minimal Medium
# + [markdown] tags=[]
# ### Import and methods
# +
import cobra
import pandas as pd
model_gf = cobra.io.read_sbml_model("2.2/finegoldia_magna_ATCC_29328_2.2.fo.ch.mp.mcb.lt.re.ar.gpr.pw.gf1.gfmm.gf2.circ.xml")
unwanted_metabolites = ["EX_o2_e"]
# import medium
snm3 = pd.read_csv("SNM3.csv", sep="\t")
snm3_dict = {f"EX_{met['BiGG']}_e" : 10.0 for i,met in snm3.iterrows()}
snm3_dict = {k: v for k,v in snm3_dict.items() if k not in unwanted_metabolites}
# For all exchanges open
for reac in model_gf.exchanges:
if reac.id in unwanted_metabolites: # elimintes unwanted metabolites (O2)
reac.lower_bound = 0.0
else:
reac.lower_bound = -1000.0
# Define SNM3 medium
#for reac in model_gf.exchanges:
# if reac.id in snm3_dict:
# reac.lower_bound = -10.0
# else:
# reac.lower_bound = 0.0
# +
def find_conpro(model, metabolite, conpro): # finds the metabolites that are produced/consumed from this metabolite
r_query = []
for r in model.metabolites.get_by_id(metabolite).reactions:
if conpro == "produced" and r.get_coefficient(metabolite) < 0:
r_query += [m for m,stoi in r.metabolites.items() if stoi > 0]
elif conpro == "consumed" and r.get_coefficient(metabolite) > 0:
r_query += [m for m,stoi in r.metabolites.items() if stoi < 0]
elif conpro != "produced" or conpro == "consumed":
print(f"Wrong conpro argument {conpro}, use one of: produced, consumed")
return list(set(r_query))
def tree_metabolite(model, metabolite, conpro, depth, reac_thresh): # makes a tree out of produced/consumed metabolites { {}, {} }
if depth == 0:
return None
sparse_metab = {m.id for m in find_conpro(model, metabolite, conpro) if len(m.reactions) <= reac_thresh}
return {m : tree_metabolite(model, m, conpro, depth-1, reac_thresh) for m in sparse_metab}
def tree_str(nested_tree, direction = ">", delimiter = "|--", depth = 0): # makes a tree to a string
t_str = ""
for parent, child in nested_tree.items():
if not child:
t_str = t_str + f"{delimiter * depth}{direction}{parent}\n"
else:
t_str = t_str + f"{delimiter * depth}{direction}{parent}\n{tree_str(child, direction, delimiter, depth + 1)}"
return t_str
# + [markdown] tags=[]
# ### 1. Defining a big medium with different growth-rates
# + tags=[]
growth_rates = [gr / 10 for gr in range(1, 22)]
minmeds_dict = {gr: cobra.medium.minimal_medium(model_gf, gr, minimize_components = True) for gr in growth_rates}
minmeds_df = pd.concat(minmeds_dict, axis=1)
# + tags=[]
minmeds_df
# -
# 26dap__M_e | LalaDgluMdapDala_e
# ( fe3_e |fe3pyovd_kt_e ) & ( peamn_e & | fe2_e )
# tyr__L_e | 4hphac_e
#
# EX_peamn_e, EX_fe3pyovd_kt_e intermittedly there, both involved in iron
# EX_o2_e biologically not in FM -> take out
#
# --- found out via tree making ---
metab_id = "fe3pyovd_kt_e"
tm = { metab_id: tree_metabolite(model_gf, metab_id, "produced", 4, 10) }
print(tree_str(tm, ">"))
# => minimal medium: "EX_4hphac_e", "EX_LalaDgluMdapDala_e", "EX_peamn_e", "EX_fe3pyovd_kt_e" entfernt,
# da Einfache Metalle, Aminosäuren und einfachere Strukturen bevorzugt wurden
minimal_medium_all = list(minmeds_df.index)
for x in ["EX_4hphac_e", "EX_LalaDgluMdapDala_e", "EX_peamn_e", "EX_fe3pyovd_kt_e"]:
minimal_medium_all.remove(x)
minimal_medium_all = {r: 10.0 for r in minimal_medium_all}
print(len(minimal_medium_all))
model_gf.medium = minimal_medium_all
model_gf.slim_optimize()
# adding SNM3 medium to minimal medium
snm3_dict = {k: v for k,v in snm3_dict.items() if k in model_gf.reactions}
minimal_and_snm3_medium = minimal_medium_all.copy()
minimal_and_snm3_medium.update(snm3_dict)
model_gf.medium = minimal_and_snm3_medium
model_gf.slim_optimize()
# ### 2. Taking one out a a time until no more growth
# + [markdown] tags=[]
# #### Helping methods
# +
def littlest_growth_diff(model, medium:dict, threshold):
model_acc = model.copy()
medium = medium.copy()
model_acc.medium = medium
prior_growth = model_acc.slim_optimize()
diff_dict = {}
for metab, flux in medium.items():
new_medium = medium.copy()
new_medium.pop(metab)
model_acc.medium = new_medium
diff_dict[prior_growth - model_acc.slim_optimize()] = metab
metab_smallest_diff = diff_dict[ min( diff_dict.keys() ) ]
new_medium = medium.copy()
new_medium.pop(metab_smallest_diff)
model_acc.medium = new_medium
if model_acc.slim_optimize() <= threshold:
print("No more growth")
return None
return metab_smallest_diff
def eliminate_until(model, medium:dict, threshold):
model_acc = model.copy()
elim_metab = littlest_growth_diff(model, medium, threshold)
if elim_metab is None:
return medium
else:
new_medium = medium.copy()
new_medium.pop(elim_metab)
return eliminate_until(model, new_medium, threshold)
# -
# #### 2.1 Only for minimal medium
print(f"Before minimization: {len(minimal_medium_all)}")
mini_mini_all = eliminate_until(model_gf, minimal_medium_all, 0.01)
print(f"After minimization: {len(mini_mini_all)}")
print(mini_mini_all.keys())
# #### 2.2 For minimal medium + SNM3
print(f"Before minimization: {len(minimal_and_snm3_medium)}")
mini_mini_snm3 = eliminate_until(model_gf, minimal_and_snm3_medium, 0.01)
print(f"After minimization: {len(mini_mini_snm3)}")
# ### 3. Grow & report final media
# Normal minimal medium
model_gf.medium = mini_mini_all
model_gf.slim_optimize()
# SNM3 minimal medium
model_gf.medium = mini_mini_snm3
model_gf.slim_optimize()
# #### 3.1 Difference of media
# The final media differ in two ways:
# Difference between two media:
combined_set = set(mini_mini_snm3.keys()) | set(mini_mini_all.keys())
print(combined_set - set(mini_mini_all.keys()))
print(combined_set - set(mini_mini_snm3.keys()))
# #### 3.2 Missing in SNM3
# The final difference is, that Reduced glutathione (gthrd_e) is in the standard minimal medium, while L-Glutamate (glu__L_e), L-Cysteine (cys__L_e) remain in the snm3 combined minimal medium
# Difference between minmed and snm3:
print(set(mini_mini_all.keys() - set(snm3_dict.keys())))
# Difference between snm3 minmed and snm3:
print(set(mini_mini_snm3.keys() - set(snm3_dict.keys())))
# The 10 missing metabolites in the SNM3 medium are:
# - Meso-2,6-Diaminoheptanedioate 26dap__M_e
# - Octadecanoate (n-C18:0) ocdca_e
# - Reduced glutathione gthrd_e
# - L-Tyrosine tyr__L_e
# - L-Isoleucine ile__L_e
# - Maltohexaose malthx_e
# - Benzoate bz_e
# - NMN C11H14N2O8P nmn_e
# - Hexadecanoate (n-C16:0) hdca_e
# - O-Phospho-L-serine pser__L_e
#
# which can be decreased through the snm3 minimal medium to 9 because Reduced glutathione (gthrd_e) can be avoided
# +
model_gf = cobra.io.read_sbml_model("2.2/finegoldia_magna_ATCC_29328_2.2.fo.ch.mp.mcb.lt.re.ar.gpr.pw.gf1.gfmm.gf2.circ.xml")
# Export as table
exp_df = pd.DataFrame(mini_mini_all.items(), columns=['reaction', 'flux'])
names = [list(model_gf.reactions.get_by_id(row[1]['reaction']).metabolites.keys())[0].name for row in exp_df.iterrows()]
exp_df["names"] = names
exp_df.to_csv("../Tables/minimal_medium_all.csv")
exp_df = pd.DataFrame(mini_mini_snm3.items(), columns=['reaction', 'flux'])
names = [list(model_gf.reactions.get_by_id(row[1]['reaction']).metabolites.keys())[0].name for row in exp_df.iterrows()]
exp_df["names"] = names
exp_df.to_csv("../Tables/minimal_medium_snm3.csv")
# -
for row in exp_df.iterrows():
print(row[1]["reaction"])
| min_medium_search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pickle
import matplotlib.pyplot as plt
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
# -
pkl = open('/home/tlappas/data_science/Yelp-Ratings/data/eda/star_counts.pkl', 'rb')
counts = pickle.load(pkl)
plt.bar(x=[1,2,3,4,5], height=list(zip(*counts[2]))[1])
plt.title('All Reviews\n', fontsize=20)
plt.xlabel('\nStar Rating', fontsize=16)
plt.ylabel('Frequencies\n', fontsize=16)
# +
data = counts[1]
fig, ax = plt.subplots(5, 5, sharey='row', figsize=(20,25))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)
fig.suptitle('Star Ratings by Categories', fontsize=22)
ax = ax.reshape(-1)
for i, (key, val) in enumerate(data.items()):
if i % 5 == 0:
ax[i].set_ylabel('Frequency\n', fontsize=14)
ax[i].set_title(key+'\n', fontsize=16)
ax[i].set_xlabel('Star Rating', fontsize=14)
ax[i].set_ylim([0,1800000])
ax[i].set_xticks([1,2,3,4,5])
ax[i].bar(x=[1,2,3,4,5], height=val)
fig.delaxes(ax[22])
fig.delaxes(ax[23])
fig.delaxes(ax[24])
# -
| notebooks/tsl-star-ratings-by-category.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from copy import deepcopy
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
import gym
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import pandas as pd
class ReplayBuffer():
def __init__(self, mem_size, batch_size, input_dims):
self.mem_size = mem_size
self.mem_centr = 0
self.batch_size = batch_size
self.state_memory = np.zeros(
(self.mem_size, *input_dims), dtype=np.float32)
self.new_state_memory = np.zeros(
(self.mem_size, *input_dims), dtype=np.float32)
self.action_memory = np.zeros(self.mem_size, dtype=np.int32)
self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)
self.terminal_memory = np.zeros(self.mem_size, dtype=np.int32)
def store_transitions(self, state, action, reward, new_state, done):
index = self.mem_centr % self.mem_size
self.state_memory[index] = state
self.new_state_memory[index] = new_state
self.action_memory[index] = action
self.reward_memory[index] = reward
self.terminal_memory[index] = 1 - int(done)
self.mem_centr = self.mem_centr + 1
def is_sampleable(self):
if self.mem_centr >= self.batch_size:
return True
else:
return False
def sample_buffer(self):
if not(self.is_sampleable()):
return []
max_mem = min(self.mem_size, self.mem_centr)
batch = np.random.choice(max_mem, self.batch_size, replace=False)
states = self.state_memory[batch]
new_states = self.new_state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
terminals = self.terminal_memory[batch]
return states, new_states, actions, rewards, terminals
def build_dqn(lr, n_actions):
model = keras.Sequential([
keras.layers.Dense(100, activation='relu'),
keras.layers.Dense(100, activation='relu'),
keras.layers.Dense(n_actions, activation=None)
])
model.compile(optimizer=Adam(learning_rate=lr), loss='mean_squared_error')
return model
# +
class Agent():
def __init__(self, n_actions, input_dims,
lr=1e-4, gamma=0.9, mem_size=128, batch_size=64,
epsilon_decay=0.995, target_update_frequency=256):
self.n_actions = n_actions
self.gamma = gamma
self.epsilon_decay = epsilon_decay
self.batch_size = batch_size
self.target_update_freq = target_update_frequency
self.policy_network = build_dqn(lr=lr, n_actions=n_actions)
self.target_network = deepcopy(self.policy_network)
self.replay_mem = ReplayBuffer(
mem_size=mem_size, batch_size=batch_size, input_dims=input_dims)
self.epsilon = 1
def choose_action(self, obs):
if np.random.random() < self.epsilon:
action = np.random.randint(self.n_actions)
else:
obs = np.array([obs])
policy_values = self.policy_network.predict(obs)
action = np.argmax(policy_values)
return action
def store_memory(self, state, action, reward, new_state, done):
self.replay_mem.store_transitions(state, action, reward, new_state, done)
def train(self):
if not(self.replay_mem.is_sampleable()):
return 0
states, new_states, actions, rewards, dones = self.replay_mem.sample_buffer()
q_eval = self.policy_network.predict(states)
q_next = self.target_network.predict(new_states)
batch_index = np.arange(self.batch_size)
q_target = deepcopy(q_eval)
q_target[batch_index, actions] = rewards + \
self.gamma * np.max(q_next, axis=1) * dones
loss = self.policy_network.train_on_batch(states, q_target)
self.epsilon = max(self.epsilon * self.epsilon_decay, 0.1)
if(self.replay_mem.mem_centr % self.target_update_freq == 0):
self.target_network.set_weights(self.policy_network.get_weights())
return loss
def save_model(self, file_path='./model/tf_ddqn_model.model'):
self.policy_network.save(file_path)
def load_model(self, file_path='./model/tf_ddqn_model.model'):
self.policy_network = load_model(file_path)
self.target_network = load_model(file_path)
# -
tf.compat.v1.disable_eager_execution()
tf.get_logger().setLevel('ERROR')
# +
lr = 3e-4
gamma = 0.99
epsilon_decay = 1 - (2e-5)
episodes = 600
# +
mem_size = 1024
batch_size = 32
target_update_frequency = 300
# -
env = gym.make('LunarLander-v2')
agent = Agent(n_actions=env.action_space.n, input_dims=env.observation_space.shape,
lr=lr, gamma=gamma, mem_size=mem_size, batch_size=batch_size,
epsilon_decay=epsilon_decay, target_update_frequency=target_update_frequency)
scores = []
eps = []
losses = []
# +
pbar = tqdm(range(episodes))
for i in pbar:
done = False
score = 0
obs = env.reset()
ep_loss = []
while not(done):
action = agent.choose_action(obs)
new_obs, reward, done, _ = env.step(action)
#env.render()
score = score + reward
agent.store_memory(state=obs, action=action, reward=reward, new_state=new_obs, done=done)
obs = deepcopy(new_obs)
loss = agent.train()
ep_loss.append(loss)
scores.append(score)
eps.append(agent.epsilon)
losses.append(ep_loss)
pbar.set_description("Current_score = %s" % score)
# -
agent.save_model()
env.close()
plt.plot(eps, label="epsilon")
plt.legend()
plt.savefig('./plots/tf/ddqn/epsilon.png')
plt.show()
losses_array = []
for x in losses:
losses_array.append(np.mean(np.array(x)))
plt.plot(losses_array, label="loss")
plt.legend()
plt.savefig('./plots/tf/ddqn/losses.png')
plt.show()
# +
resolution = 50
cumsum_losses = np.array(pd.Series(np.array(losses_array)).rolling(window=resolution).mean() )
plt.plot(cumsum_losses, label="loss")
plt.legend()
plt.savefig('./plots/tf/ddqn/losses_trend.png')
plt.show()
# -
plt.plot(scores, label="rewards")
plt.legend()
plt.savefig('./plots/tf/ddqn/rewards.png')
plt.show()
# +
resolution = 50
cumsum_rewards = np.array(pd.Series(np.array(scores)).rolling(window=resolution).mean() )
plt.plot(cumsum_rewards, label="rewards")
plt.legend()
plt.savefig('./plots/tf/ddqn/rewards_trend.png')
plt.show()
# +
test_env = gym.make('LunarLander-v2')
test_agent = Agent(n_actions=test_env.action_space.n, input_dims=test_env.observation_space.shape)
test_agent.epsilon = 0.0
test_agent.load_model()
# +
test_episodes = 10
pbar = tqdm(range(test_episodes))
for i in pbar:
done = False
score = 0
obs = test_env.reset()
test_env.render()
while not(done):
action = test_agent.choose_action(obs)
new_obs, reward, done, _ = test_env.step(action)
test_env.render()
score = score + reward
obs = deepcopy(new_obs)
pbar.set_description("Current_score = %s" % score)
print("score in episode ", (i+1) ," : ",score)
test_env.close()
| Week5/2_ddqn_tf_lunar_lander.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 2.413514, "end_time": "2021-01-28T15:59:34.938111", "exception": false, "start_time": "2021-01-28T15:59:32.524597", "status": "completed"} tags=[]
# !pip install tensorflow==2.1.1 import_ipynb==0.1.3
# + papermill={"duration": 3.004399, "end_time": "2021-01-28T15:59:37.950270", "exception": false, "start_time": "2021-01-28T15:59:34.945871", "status": "completed"} tags=[]
import tensorflow as tf
from tensorflow.keras.applications import ResNet50V2, MobileNetV3Small
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras import Model
import import_ipynb
from codait_utils import *
# + papermill={"duration": 0.029222, "end_time": "2021-01-28T15:59:37.986639", "exception": false, "start_time": "2021-01-28T15:59:37.957417", "status": "completed"} tags=[]
unzip('.','data_small.zip')
# + papermill={"duration": 0.822596, "end_time": "2021-01-28T15:59:38.817009", "exception": false, "start_time": "2021-01-28T15:59:37.994413", "status": "completed"} tags=[]
# !mv data_small data
# + papermill={"duration": 0.127534, "end_time": "2021-01-28T15:59:38.952236", "exception": false, "start_time": "2021-01-28T15:59:38.824702", "status": "completed"} tags=[]
batch_size = 32
img_height = 244
img_width = 244
input_shape = (img_width,img_height)
num_classes=2
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
'data/train',
validation_split=None,
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
'data/val',
validation_split=None,
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
train_ds = train_ds.map(lambda x, y: (x, tf.one_hot(y, depth=num_classes)))
val_ds = val_ds.map(lambda x, y: (x, tf.one_hot(y, depth=num_classes)))
# + papermill={"duration": 0.014987, "end_time": "2021-01-28T15:59:38.975558", "exception": false, "start_time": "2021-01-28T15:59:38.960571", "status": "completed"} tags=[]
def my_net(model,freeze_layers=10,full_freeze='N'):
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(2,activation='sigmoid')(x)
model_final = Model(model.input,out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# + papermill={"duration": 0.015812, "end_time": "2021-01-28T15:59:38.999942", "exception": false, "start_time": "2021-01-28T15:59:38.984130", "status": "completed"} tags=[]
#model = ResNet50V2(weights='imagenet',include_top=False)
#model = my_net(model)
# + papermill={"duration": 5.130591, "end_time": "2021-01-28T15:59:44.139230", "exception": false, "start_time": "2021-01-28T15:59:39.008639", "status": "completed"} tags=[]
model = tf.keras.applications.MobileNetV2(
input_shape=(244,244,3), alpha=1.0, include_top=False,
input_tensor=None, pooling=None, classes=2,
classifier_activation='softmax'
)
model = my_net(model)
# + papermill={"duration": 0.031121, "end_time": "2021-01-28T15:59:44.197588", "exception": false, "start_time": "2021-01-28T15:59:44.166467", "status": "completed"} tags=[]
#model = tf.keras.applications.VGG16(
# include_top=True, weights=None, input_tensor=None,
# input_shape=(244, 244, 3), pooling=None, classes=2,
# classifier_activation='softmax'
#)
#model = my_net(model)
# + papermill={"duration": 0.040951, "end_time": "2021-01-28T15:59:44.261989", "exception": false, "start_time": "2021-01-28T15:59:44.221038", "status": "completed"} tags=[]
model.compile(
optimizer="adam",
loss='categorical_crossentropy',
metrics=['accuracy']
)
# + papermill={"duration": 21.224432, "end_time": "2021-01-28T16:00:05.511392", "exception": false, "start_time": "2021-01-28T15:59:44.286960", "status": "completed"} tags=[]
model.fit(
train_ds,
batch_size=batch_size,
epochs=2,
validation_data=val_ds
)
# + papermill={"duration": 0.869714, "end_time": "2021-01-28T16:00:06.410787", "exception": false, "start_time": "2021-01-28T16:00:05.541073", "status": "completed"} tags=[]
# !rm -Rf model
# + papermill={"duration": 16.300656, "end_time": "2021-01-28T16:00:22.737170", "exception": false, "start_time": "2021-01-28T16:00:06.436514", "status": "completed"} tags=[]
model.save('model')
# + papermill={"duration": 1.743317, "end_time": "2021-01-28T16:00:24.524372", "exception": false, "start_time": "2021-01-28T16:00:22.781055", "status": "completed"} tags=[]
zipdir('model.zip', 'model')
# -
# !rm -Rf model
| train-trusted-ai.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Basic NumPy
#
# NumPy ('Numerical Python') is the defacto standard module for doing numerical work in Python. Its main feature is its array data type which allows very compact and efficient storage of homogenous (of the same type) data.
#
# A lot of the material in this section is based on [SciPy Lecture Notes](http://www.scipy-lectures.org/intro/numpy/array_object.html) ([CC-by 4.0](http://www.scipy-lectures.org/preface.html#license)).
#
# As you go through this material, you'll likely find it useful to refer to the [NumPy documentation](https://docs.scipy.org/doc/numpy/), particularly the [array objects](https://docs.scipy.org/doc/numpy/reference/arrays.html) section.
#
# As with `pandas` there is a standard convention for importing `numpy`, and that is as `np`:
import numpy as np
# Now that we have access to the `numpy` package we can start using its features.
#
# ## Creating arrays
#
# In many ways a NumPy array can be treated like a standard Python `list` and much of the way you interact with it is identical. Given a list, you can create an array as follows:
python_list = [1, 2, 3, 4, 5, 6, 7, 8]
numpy_array = np.array(python_list)
print(numpy_array)
# ndim give the number of dimensions
numpy_array.ndim
# the shape of an array is a tuple of its length in each dimension. In this case it is only 1-dimensional
numpy_array.shape
# as in standard Python, len() gives a sensible answer
len(numpy_array)
nested_list = [[1, 2, 3], [4, 5, 6]]
two_dim_array = np.array(nested_list)
print(two_dim_array)
two_dim_array.ndim
two_dim_array.shape
# It's very common when working with data to not have it already in a Python list but rather to want to create some data from scratch. `numpy` comes with a whole suite of functions for creating arrays. We will now run through some of the most commonly used.
# The first is `np.arange` (meaning "array range") which works in a vary similar fashion the the standard Python `range()` function, including how it defaults to starting from zero, doesn't include the number at the top of the range and how it allows you to specify a 'step:
np.arange(10) #0 .. n-1 (!)
np.arange(1, 9, 2) # start, end (exclusive), step
# Next up is the `np.linspace` (meaning "linear space") which generates a given floating point numbers starting from the first argument up to the second argument. The third argument defines how many numbers to create:
np.linspace(0, 1, 6) # start, end, num-points
# Note how it included the end point unlike `arange()`. You can change this feature by using the `endpoint` argument:
np.linspace(0, 1, 5, endpoint=False)
# `np.ones` creates an n-dimensional array filled with the value `1.0`. The argument you give to the function defines the shape of the array:
np.ones((3, 3)) # reminder: (3, 3) is a tuple
# Likewise, you can create an array of any size filled with zeros:
np.zeros((2, 2))
# The `np.eye` (referring to the matematical identity matrix, commonly labelled as `I`) creates a square matrix of a given size with `1.0` on the diagonal and `0.0` elsewhere:
np.eye(3)
# The `np.diag` creates a square matrix with the given values on the diagonal and `0.0` elsewhere:
np.diag([1, 2, 3, 4])
# Finally, you can fill an array with random numbers:
np.random.rand(4) # uniform in [0, 1]
np.random.randn(4) # Gaussian
# ### Exercise 5
#
# - Experiment with `arange`, `linspace`, `ones`, `zeros`, `eye` and `diag`.
# - Create different kinds of arrays with random numbers.
# - Look at the function `np.empty`. What does it do? When might this be useful?
# ## Reshaping arrays
#
# Behind the scenes, a multi-dimensional NumPy `array` is just stored as a linear segment of memory. The fact that it is presented as having more than one dimension is simply a layer on top of that (sometimes called a *view*). This means that we can simply change that interpretive layer and change the shape of an array very quickly (i.e without NumPy having to copy any data around).
#
# This is mostly done with the `reshape()` method on the array object:
my_array = np.arange(16)
my_array
my_array.shape
my_array.reshape((2, 8))
my_array.reshape((4, 4))
# Note that if you check, `my_array.shape` will still return `(16,)` as `reshaped` is simply a *view* on the original data, it hasn't actually *changed* it. If you want to edit the original object in-place then you can use the `resize()` method.
#
# You can also transpose an array using the `transpose()` method which mirrors the array along its diagonal:
my_array.reshape((2, 8)).transpose()
my_array.reshape((4,4)).transpose()
# ### Exercise 6
#
# Using the NumPy documentation at https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html, to create, **in one line** a NumPy array which looks like:
#
# ```python
# [10, 60, 20, 70, 30, 80, 40, 90, 50, 100]
# ```
#
# Hint: you will need to use `transpose()`, `reshape()` and `arange()` as well as one new function from the "Shape manipulation" section of the documentation. Can you find a method which uses less than 4 function calls?
# There is [still more numpy to learn](14_more_numpy.ipynb)
| 13_basic_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # G2Engine Reference
#
# More information:
#
# 1. [GitHub repository](https://github.com/Senzing/docker-jupyter)
# 1. [Senzing documentation](http://docs.senzing.com/?python#g2config)
# ## Table of contents
#
# 1. [Prepare environment](#Prepare-environment)
# 1. [Initialize Senzing configuration](#Initialize-Senzing-configuration)
# 1. [Initialize python environment](#Initialize-python-environment)
# 1. [Helper class for JSON rendering](#Helper-class-for-JSON-rendering)
# 1. [System path](#System-path)
# 1. [Initialize variables](#Initialize-variables)
# 1. [G2Engine](#G2Engine)
# 1. [G2Engine initialization](#G2Engine-initialization)
# 1. [initWithConfigIDV2](#initWithConfigIDV2)
# 1. [reinitV2](#reinitV2)
# 1. [primeEngine](#primeEngine)
# 1. [getActiveConfigID](#getActiveConfigID)
# 1. [exportConfig](#exportConfig)
# 1. [stats](#stats)
# 1. [getRepositoryLastModifiedTime](#getRepositoryLastModifiedTime)
# 1. [Insert](#Insert)
# 1. [Insert parameters](#Insert-parameters)
# 1. [addRecord](#addRecord)
# 1. [addRecordWithReturnedRecordID](#addRecordWithReturnedRecordID)
# 1. [addRecordWithInfo](#addRecordWithInfo)
# 1. [Search](#Search)
# 1. [Record search](#Record-search)
# 1. [getRecordV2](#getRecordV2)
# 1. [Entity Search](#Entity-Search)
# 1. [getEntityByRecordIDV2](#getEntityByRecordIDV2)
# 1. [getEntityByEntityIDV2](#getEntityByEntityIDV2)
# 1. [Search By Attributes](#Search-By-Attributes)
# 1. [searchByAttributes](#searchByAttributes)
# 1. [searchByAttributesV2](#searchByAttributesV2)
# 1. [Finding Paths](#Finding-Paths)
# 1. [findPathByEntityID](#findPathByEntityID)
# 1. [findPathByEntityIDV2](#findPathByEntityIDV2)
# 1. [findPathByRecordID](#findPathByRecordID)
# 1. [findPathByRecordIDV2](#findPathByRecordIDV2)
# 1. [Finding Paths with Exclusions](#Finding-Paths-with-Exclusions)
# 1. [findPathExcludingByEntityID](#findPathExcludingByEntityID)
# 1. [findPathExcludingByRecordID](#findPathExcludingByRecordID)
# 1. [Finding Paths with Required Sources](#Finding-Paths-with-Required-Sources)
# 1. [findPathIncludingSourceByEntityID](#findPathIncludingSourceByEntityID)
# 1. [findPathIncludingSourceByRecordID](#findPathIncludingSourceByRecordID)
# 1. [Finding Networks](#Finding-Networks)
# 1. [findNetworkByEntityID](#findNetworkByEntityID)
# 1. [findNetworkByEntityIDV2](#findNetworkByEntityIDV2)
# 1. [findNetworkByRecordID](#findNetworkByRecordID)
# 1. [findNetworkByRecordIDV2](#findNetworkByRecordIDV2)
# 1. [Connection Details](#Connection-details)
# 1. [whyEntityByRecordID](#whyEntityByRecordID)
# 1. [whyEntityByRecordIDV2](#whyEntityByRecordIDV2)
# 1. [whyEntityByEntityID](#whyEntityByEntityID)
# 1. [whyEntityByEntityIDV2](#whyEntityByEntityIDV2)
# 1. [Replace](#Replace)
# 1. [replaceRecord](#replaceRecord)
# 1. [replaceRecordWithInfo](#replaceRecordWithInfo)
# 1. [Re-evaluate](#Re-evaluate)
# 1. [reevaluateRecord](#reevaluateRecord)
# 1. [reevaluateRecordWithInfo](#reevaluateRecordWithInfo)
# 1. [reevaluateEntity](#reevaluateEntity)
# 1. [reevaluateEntityWithInfo](#reevaluateEntityWithInfo)
# 1. [Reporting](#Reporting)
# 1. [exportJSONEntityReport](#exportJSONEntityReport)
# 1. [fetchNext](#fetchNext)
# 1. [closeExport](#closeExport)
# 1. [exportCSVEntityReport](#exportCSVEntityReport)
# 1. [Redo Processing](#Redo-Processing)
# 1. [countRedoRecords](#countRedoRecords)
# 1. [getRedoRecord](#getRedoRecord)
# 1. [process](#process)
# 1. [processWithInfo](#processWithInfo)
# 1. [processRedoRecord](#processRedoRecord)
# 1. [processRedoRecordWithInfo](#processRedoRecordWithInfo)
# 1. [Delete](#Delete)
# 1. [deleteRecord](#deleteRecord)
# 1. [deleteRecordWithInfo](#deleteRecordWithInfo)
# 1. [Cleanup](#Cleanup)
# 1. [purgeRepository](#purgeRepository)
# 1. [destroy](#destroy)
# ## Prepare environment
# ### Initialize Senzing configuration
#
# Run [senzing-G2ConfigMgr-reference.ipynb](senzing-G2ConfigMgr-reference.ipynb)
# to install a Senzing Engine configuration in the database.
# ### Initialize python environment
# +
import os
import sys
import json
# For RenderJSON
import uuid
from IPython.display import display_javascript, display_html, display
# -
# ### Helper class for JSON rendering
#
# A class for pretty-printing JSON.
# Not required by Senzing,
# but helps visualize JSON.
class RenderJSON(object):
def __init__(self, json_data):
if isinstance(json_data, dict):
self.json_str = json.dumps(json_data)
elif isinstance(json_data, bytearray):
self.json_str = json_data.decode()
else:
self.json_str = json_data
self.uuid = str(uuid.uuid4())
def _ipython_display_(self):
display_html('<div id="{}" style="height:100%; width:100%; background-color: LightCyan"></div>'.format(self.uuid), raw=True)
display_javascript("""
require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() {
document.getElementById('%s').appendChild(renderjson(%s))
});
""" % (self.uuid, self.json_str), raw=True)
# ### System path
#
# Update system path.
python_path = "{0}/python".format(
os.environ.get("SENZING_G2_DIR", "/opt/senzing/g2"))
sys.path.append(python_path)
# ### Initialize variables
#
# Create variables used for G2Engine.
# +
module_name = 'pyG2EngineForAddRecord'
config_path = os.environ.get("SENZING_ETC_DIR", "/etc/opt/senzing")
support_path = os.environ.get("SENZING_DATA_VERSION_DIR", "/opt/senzing/data")
resource_path = "{0}/resources".format(
os.environ.get("SENZING_G2_DIR", "/opt/senzing/g2"))
sql_connection = os.environ.get(
"SENZING_SQL_CONNECTION", "sqlite3://na:na@/var/opt/senzing/sqlite/G2C.db")
verbose_logging = False
senzing_config_dictionary = {
"PIPELINE": {
"CONFIGPATH": config_path,
"SUPPORTPATH": support_path,
"RESOURCEPATH": resource_path
},
"SQL": {
"CONNECTION": sql_connection,
}
}
senzing_config_json = json.dumps(senzing_config_dictionary)
# -
# ## G2Engine
from G2Engine import G2Engine
import G2Exception
# ### G2Engine initialization
#
# To start using Senzing G2Engine, create and initialize an instance.
# This should be done once per process.
# The `initV2()` method accepts the following parameters:
#
# - **module_name:** A short name given to this instance of the G2Engine
# object.
# - **senzing_config_json:** A JSON string containing configuration parameters.
# - **verbose_logging:** A boolean which enables diagnostic logging.
# - **config_id:** (optional) The identifier value for the engine configuration
# can be returned here.
#
# Calling this function will return "0" upon success.
# +
g2_engine = G2Engine()
return_code = g2_engine.initV2(
module_name,
senzing_config_json,
verbose_logging)
print("Return Code: {0}".format(return_code))
# -
# ### initWithConfigIDV2
#
# Alternatively `initWithConfigIDV2()` can be used to specify a configuration.
# For more information, see
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#engine).
# ### reinitV2
#
# The `reinitV2()` function may be used to reinitialize the engine
# using a specified initConfigID. See
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#engine).
# ### primeEngine
#
# The `primeEngine()` method may optionally be called to pre-initialize
# some of the heavier weight internal resources of the G2 engine.
return_code = g2_engine.primeEngine()
print("Return Code: {0}".format(return_code))
# ### getActiveConfigID
#
# Call `getActiveConfigID()` to return an identifier for the loaded
# Senzing engine configuration.
# The call will assign a long integer to a user-designated variable
# -- the function itself will return "0" upon success.
# The `getActiveConfigID()` method accepts one parameter as input:
#
# - **configuration_id:** The identifier value for the engine configuration.
# The result of function call is returned here
# +
configuration_id = bytearray()
return_code = g2_engine.getActiveConfigID(configuration_id)
print("Return code: {0}\nConfiguration id: {1}".format(
return_code,
configuration_id.decode()))
# -
# ### exportConfig
#
# Call `exportConfig()` to retrieve your Senzing engine's configuration.
# The call will assign a JSON document to a user-designated buffer,
# containing all relevant configuration information
# -- the function itself will return "0" upon success.
# The exportConfig function accepts the following parameters as input:
#
# - **response_bytearray:** The memory buffer to retrieve the JSON
# configuration document
# - **config_id_bytearray:** The identifier value for the engine configuration
# can be returned here.
# +
response_bytearray = bytearray()
config_id_bytearray = bytearray()
return_code = g2_engine.exportConfig(response_bytearray, config_id_bytearray)
print("Return Code: {0}\nConfiguration ID: {1}".format(
return_code,
config_id_bytearray.decode()))
RenderJSON(response_bytearray)
# -
# ### stats
#
# Call `stats()` to retrieve workload statistics for the current process.
# These statistics will automatically reset after retrieval.
#
# - **response_bytearray:** A memory buffer for returning the response
# document. If an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.stats(response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### getRepositoryLastModifiedTime
#
# Call `getRepositoryLastModifiedTime()` to obtain the last modified time of
# the Senzing repository,measured in the number of seconds between the last
# modified time and January 1, 1970 12:00am GMT (epoch time).
# The call will assign a long integer to a user-designated buffer
# -- the function itself will return "0" upon success.
# The getRepositoryLastModifiedTime() method accepts one parameter as input:
#
# - **last_modified_unixtime:** The last modified time. The result of function
# call is returned here
#
# +
last_modified_timestamp = bytearray()
return_code = g2_engine.getRepositoryLastModifiedTime(last_modified_timestamp)
# Human readable output.
from datetime import datetime
last_modified_unixtime = int(int(last_modified_timestamp.decode()) / 1000)
last_modified_datetime = datetime.fromtimestamp(last_modified_unixtime)
print("Return Code: {0}\nLast modified timestamp: {1}\nLast modified time: {2}"
.format(
return_code,
last_modified_timestamp.decode(),
last_modified_datetime))
# -
# ## Insert
# ### Insert parameters
#
# The following variables are used as parameters to the Senzing API.
# Documentation for `g2_engine_flags` values is at
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#engine-control-flags)
# +
datasource_code_1 = "TEST"
record_id_1 = "1"
datasource_code_2 = "TEST"
record_id_2 = "2"
datasource_code_3 = "TEST"
record_id_3 = "3"
datasource_code_4 = "TEST"
record_id_4 = "4"
datasource_code_5 = "TEST"
record_id_5 = "5"
datasource_code_6 = "TEST"
record_id_6 = "6"
datasource_code_7 = "TEST"
record_id_7 = "7"
load_id = None
g2_engine_flags = G2Engine.G2_EXPORT_DEFAULT_FLAGS
# -
# Initial data.
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Smith",
"NAME_FIRST": "John",
"NAME_MIDDLE": "M"
}],
"PASSPORT_NUMBER": "PP11111",
"PASSPORT_COUNTRY": "US",
"DRIVERS_LICENSE_NUMBER": "DL11111",
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
# ### addRecord
#
# Once the Senzing engine is initialized, use `addRecord()` to load a record
# into the Senzing repository
# -- `addRecord()` can be called as many times as desired and from multiple
# threads at the same time.
# The `addRecord()` function returns "0" upon success, and accepts four
# parameters as input:
#
# - **datasource_code:** The name of the data source the record
# is associated with.
# This value is configurable to the system
# - **record_id:** The record ID, used to identify distinct records
# - **data_as_json:** A JSON document with the attribute data for the record
# - **load_id:** The observation load ID for the record;
# value can be null and will default to data_source
#
# +
return_code = g2_engine.addRecord(
datasource_code_1,
record_id_1,
data_as_json,
load_id)
print("Return Code: {0}".format(return_code))
# -
# ### addRecordWithReturnedRecordID
#
# Alternatively `addRecordWithReturnedRecordID()` can be used to add a record.
# For more information, see
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#data-manipulation).
# ### addRecordWithInfo
#
# Use if you would like to know what resolved entities were modified when
# adding the new record.
# It behaves identically to addRecord(),
# but returns a json document containing the IDs of the affected entities.
# It accepts the following parameters:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The record ID, used to identify distinct records
# - **data_as_json:** A JSON document with the attribute data for the record
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here
# - **load_id:** The observation load ID for the record;
# value can be null and will default to data_source
# - **g2_engine_flags:** Control flags for specifying what data about the
# entity to retrieve
#
# +
response_bytearray = bytearray()
return_code = g2_engine.addRecordWithInfo(
datasource_code_1,
record_id_1,
data_as_json,
response_bytearray,
load_id,
g2_engine_flags)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ## Search
# ### Record search
# #### getRecordV2
#
# Use `getRecordV2()` to retrieve a single record from the data repository;
# the record is assigned in JSON form to a user-designated buffer,
# and the function itself returns "0" upon success.
# Once the Senzing engine is initialized,
# `getRecordV2()` can be called as many times as desired and from multiple
# threads at the same time.
# The `getRecordV2()` function accepts the following parameters as input:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The record ID, used to identify the record for retrieval
# - **g2_engine_flags:** Control flags for specifying what data about the
# record to retrieve.
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.getRecordV2(
datasource_code_1,
record_id_1,
g2_engine_flags,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# The function `getRecordV2()` is an improved version of `getRecord()`
# that also allows you to use control flags.
# The `getRecord()` function has been deprecated.
# ### Entity Search
# #### getEntityByRecordIDV2
#
# Entity searching is a key component for interactive use of Entity Resolution
# intelligence.
# The core Senzing engine provides real-time search capabilities that are
# easily accessed via the Senzing API.
# Senzing offers methods for entity searching, all of which can be called
# as many times as desired and from multiple threads at the same time
# (and all of which return "0" upon success).
#
# Use `getEntityByRecordIDV2()` to retrieve entity data based on the ID of a
# resolved identity.
# This function accepts the following parameters as input:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The numeric ID of a resolved entity
# - **g2_engine_flags:** Control flags for specifying what data about the
# entity to retrieve.
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordIDV2(
datasource_code_1,
record_id_1,
g2_engine_flags,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_1 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### getEntityByEntityIDV2
#
# Entity searching is a key component for interactive use of Entity Resolution
# intelligence.
# The core Senzing engine provides real-time search capabilities that are
# easily accessed via the Senzing API.
# Senzing offers methods for entity searching, all of which can be called as
# many times
# as desired and from multiple threads at the same time (and all of which
# return "0" upon success).
#
# Use `getEntityByEntityIDV2()` to retrieve entity data based on the ID of a
# resolved identity.
# This function accepts the following parameters as input:
#
# - **entity_id:** The numeric ID of a resolved entity
# - **g2_engine_flags:** Control flags for specifying what data about the
# entity to retrieve.
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByEntityIDV2(
entity_id_1,
g2_engine_flags,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### Search By Attributes
# #### searchByAttributes
#
# Entity searching is a key component for interactive use of Entity Resolution
# intelligence.
# The core Senzing engine provides real-time search capabilities that are
# easily accessed via the Senzing API.
# Senzing offers a method for entity searching by attributes,
# which can be called as many times as desired and from multiple threads at the
# same time
# (and all of which return "0" upon success).
#
# Use `searchByAttributes()` to retrieve entity data based on a user-specified
# set of entity attributes.
# This function accepts the following parameters as input:
#
# - **data_as_json:** A JSON document with the attribute data to search for.
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.searchByAttributes(data_as_json, response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### searchByAttributesV2
#
# This function is similar but preferable to the searchByAttributes() function.
# This function has improved functionality and a better standardized output
# structure.
#
# Use `searchByAttributesV2()` to retrieve entity data based on
# a user-specified set of entity attributes.
# This function accepts the following parameters as input:
#
# - **data_as_json:** A JSON document with the attribute data to search for.
# - **g2_engine_flags:** Operational flags
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# +
response_bytearray = bytearray()
return_code = g2_engine.searchByAttributesV2(
data_as_json,
g2_engine_flags,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### Finding Paths
#
# The `findPathByEntityID()` and `findPathByRecordID()` functions
# can be used to find single relationship paths between two entities.
# Paths are found using known relationships with other entities.
#
# Entities can be searched for by either Entity ID or by Record ID,
# depending on which function is chosen.
#
# These functions have the following parameters:
#
# - **entity_id_2:** The entity ID for the starting entity of the search path
# - **entity_id_3:** The entity ID for the ending entity of the search path
# - **datasource_code_2:** The data source for the starting entity of the
# search path
# - **datasource_code_3:** The data source for the ending entity of the search
# path
# - **record_id_2:** The record ID for the starting entity of the search path
# - **record_id_3:** The record ID for the ending entity of the search path
# - **max_degree:** The number of relationship degrees to search
#
# The functions return a JSON document that identifies the path between the
# entities,
# and the information on the entities in question.
# The document contains a section called "ENTITY_PATHS" which gives
# the path from one entity to the other.
# Example:
#
# ```JSON
# {
# "START_ENTITY_ID": 10,
# "END_ENTITY_ID": 13,
# "ENTITIES": [10, 11, 12, 13]
# }
# ```
#
# If no path was found, then the value of ENTITIES will be an empty list.
#
# The response document also contains a separate ENTITIES section,
# with the full information about the resolved entities along that path.
# First you will need to create some records so that you have some that you can
# compare.
# Can you see what is the same between this record and the previous one?
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Miller",
"NAME_FIRST": "Max",
"NAME_MIDDLE": "W"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_2,
record_id_2,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# -
# Replace values for Record #3
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Miller",
"NAME_FIRST": "Mildred"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_3,
record_id_3,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# -
# Locate "entity identifier" for Record #1
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_1,
record_id_1,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_1 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_1))
# -
# Locate "entity identifier" for Record #2
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_2,
record_id_2,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_2 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_2))
RenderJSON(response_bytearray)
# -
# Locate "entity identifier" for Record #3
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_3,
record_id_3,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_3 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_3))
RenderJSON(response_bytearray)
# -
# #### findPathByEntityID
# +
# Define search variables.
max_degree = 3
# Find the path by entity ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathByEntityID(
entity_id_2,
entity_id_3,
max_degree,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findPathByEntityIDV2
# The function `findPathByEntityIDV2()` is an improved version of
# `findPathByEntityID()` that also allow you to use control flags.
# +
# Define search variables.
max_degree = 3
# Find the path by entity ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathByEntityIDV2(
entity_id_2,
entity_id_3,
max_degree,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findPathByRecordID
# +
# Define search variables.
max_degree = 3
# Find the path by record ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathByRecordID(
datasource_code_2, record_id_2,
datasource_code_3, record_id_3,
max_degree,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findPathByRecordIDV2
#
# The function `findPathByRecordIDV2()` is an improved version of
# `findPathByRecordID()` that also allow you to use control flags.
# +
# Define search variables.
max_degree = 3
# Find the path by record ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathByRecordIDV2(
datasource_code_2, record_id_2,
datasource_code_3, record_id_3,
max_degree,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### Finding Paths with Exclusions
#
# The `findPathExcludingByEntityID()` and `findPathExcludingByRecordID()`
# functions can be used to find single relationship paths between two
# entities.
# Paths are found using known relationships with other entities.
# In addition, it will find paths that exclude certain entities from being on
# the path.
#
# Entities can be searched for by either Entity ID or by Record ID,
# depending on which function is chosen.
# Additionally, entities to be excluded can also be specified by either Entity
# ID or by Record ID.
#
# When excluding entities, the user may choose to either (a) strictly exclude
# the entities,
# or (b) prefer to exclude the entities, but still include them if no other
# path is found.
# By default, entities will be strictly excluded.
# A "preferred exclude" may be done by specifying the
# `G2_FIND_PATH_PREFER_EXCLUDE` control flag.
#
# These functions have the following parameters:
#
# - **entity_id_2:** The entity ID for the starting entity of the search path
# - **entity_id_3:** The entity ID for the ending entity of the search path
# - **datasource_code_2:** The data source for the starting entity of the
# search path
# - **datasource_code_3:** The data source for the ending entity of the search
# path
# - **record_id_2:** The record ID for the starting entity of the search path
# - **record_id_3:** The record ID for the ending entity of the search path
# - **max_degree:** The number of relationship degrees to search
# - **excluded_entities_as_json:** Entities that should be avoided on the path
# (JSON document)
# - **g2_engine_flags:** Operational flags
# #### findPathExcludingByEntityID
# +
# Define search variables.
max_degree = 4
excluded_entities = {
"ENTITIES": [{
"ENTITY_ID": entity_id_1
}]}
excluded_entities_as_json = json.dumps(excluded_entities)
# Find the path by entity ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathExcludingByEntityID(
entity_id_2,
entity_id_3,
max_degree,
excluded_entities_as_json,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findPathExcludingByRecordID
# +
# Define search variables.
excluded_records = {
"RECORDS": [{
"RECORD_ID": record_id_1,
"DATA_SOURCE": datasource_code_1
}]}
excluded_records_as_json = json.dumps(excluded_records)
# Find the path by record ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathExcludingByRecordID(
datasource_code_2, record_id_2,
datasource_code_3, record_id_3,
max_degree,
excluded_records_as_json,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### Finding Paths with Required Sources
#
# The `findPathIncludingSourceByEntityID()` and
# `findPathIncludingSourceByRecordID()` functions
# can be used to find single relationship paths between two entities.
# In addition, one of the enties along the path must include a specified data
# source.
#
# Entities can be searched for by either Entity ID or by Record ID,
# depending on which function is chosen.
# The required data source or sources are specified by a json document list.
#
# Specific entities may also be excluded, using the same methodology as the
# `findPathExcludingByEntityID()` and `findPathExcludingByRecordID()`
# functions use.
#
# These functions have the following parameters:
#
# - **entity_id_2:** The entity ID for the starting entity of the search path
# - **entity_id_3:** The entity ID for the ending entity of the search path
# - **datasource_code_2:** The data source for the starting entity of the
# search path
# - **datasource_code_3:** The data source for the ending entity of the search
# path
# - **record_id_2:** The record ID for the starting entity of the search
# path
# - **record_id_3:** The record ID for the ending entity of the search path
# - **max_degree:** The number of relationship degrees to search
# - **excluded_entities_as_json:** Entities that should be avoided on the path
# (JSON document)
# - **required_dsrcs_as_json:** Entities that should be avoided on the path
# (JSON document)
# - **g2_engine_flags:** Operational flags
# #### findPathIncludingSourceByEntityID
# +
# Define search variables.
max_degree = 4
excluded_entities = {
"ENTITIES": [{
"ENTITY_ID": entity_id_1
}]}
excluded_entities_as_json = json.dumps(excluded_entities)
required_dsrcs = {
"DATA_SOURCES": [
datasource_code_1
]}
required_dsrcs_as_json = json.dumps(excluded_entities)
# Find the path by entity ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathIncludingSourceByEntityID(
entity_id_2,
entity_id_3,
max_degree,
excluded_entities_as_json,
required_dsrcs_as_json,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findPathIncludingSourceByRecordID
# +
# Define search variables.
excluded_records = {
"RECORDS": [{
"RECORD_ID": record_id_1,
"DATA_SOURCE": datasource_code_1
}]}
excluded_records_as_json = json.dumps(excluded_records)
# Find the path by record ID.
response_bytearray = bytearray([])
return_code = g2_engine.findPathIncludingSourceByRecordID(
datasource_code_2, record_id_2,
datasource_code_3, record_id_3,
max_degree,
excluded_records_as_json,
required_dsrcs_as_json,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### Finding Networks
#
# The `findNetworkByEntityID()` and `findNetworkByRecordID()` functions
# can be used to find all entities surrounding a requested set of entities.
# This includes the requested entities, paths between them, and relations to
# other nearby entities.
#
# Entities can be searched for by either Entity ID or by Record ID,
# depending on which function is chosen.
#
# These functions have the following parameters:
#
# - **entity_list_as_json:** A list of entities, specified by Entity ID
# (JSON document)
# - **record_list_as_json:** A list of entities, specified by Record ID
# (JSON document)
# - **max_degree:** The maximum number of degrees in paths between search
# entities
# - **buildout_degree:** The number of degrees of relationships to show around
# each search entity
# - **max_entities:** The maximum number of entities to return in the
# discovered network
#
# They also have various arguments used to return response documents
#
# The functions return a JSON document that identifies the path between the
# each set of search entities (if the path exists), and the information on the
# entities in question (search entities, path entities, and build-out entities.
# #### findNetworkByEntityID
# +
# Define search variables.
entity_list = {
"ENTITIES": [{
"ENTITY_ID": entity_id_1
}, {
"ENTITY_ID": entity_id_2
}, {
"ENTITY_ID": entity_id_3
}]}
entity_list_as_json = json.dumps(entity_list)
max_degree = 2
buildout_degree = 1
max_entities = 12
# Find the network by entity ID.
response_bytearray = bytearray()
return_code = g2_engine.findNetworkByEntityID(
entity_list_as_json,
max_degree,
buildout_degree,
max_entities,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findNetworkByEntityIDV2
#
# The function `findNetworkByEntityIDV2()` is an improved version of
# `findNetworkByEntityID()` that also allow you to use control flags.
# +
# Define search variables.
entity_list = {
"ENTITIES": [{
"ENTITY_ID": entity_id_1
}, {
"ENTITY_ID": entity_id_2
}, {
"ENTITY_ID": entity_id_3
}]}
entity_list_as_json = json.dumps(entity_list)
max_degree = 2
buildout_degree = 1
max_entities = 12
# Find the network by entity ID.
response_bytearray = bytearray()
return_code = g2_engine.findNetworkByEntityIDV2(
entity_list_as_json,
max_degree,
buildout_degree,
max_entities,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findNetworkByRecordID
# +
# Define search variables.
record_list = {
"RECORDS": [{
"RECORD_ID": record_id_1,
"DATA_SOURCE": datasource_code_1
}, {
"RECORD_ID": record_id_2,
"DATA_SOURCE": datasource_code_2
}, {
"RECORD_ID": record_id_3,
"DATA_SOURCE": datasource_code_3
}]}
record_list_as_json = json.dumps(record_list)
# Find the network by record ID.
response_bytearray = bytearray()
return_code = g2_engine.findNetworkByRecordID(
record_list_as_json,
max_degree,
buildout_degree,
max_entities,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# #### findNetworkByRecordIDV2
#
# The function `findNetworkByRecordIDV2()` is an improved version of
# `findNetworkByRecordID()` that also allow you to use control flags.
# +
# Define search variables.
record_list = {
"RECORDS": [{
"RECORD_ID": record_id_1,
"DATA_SOURCE": datasource_code_1
}, {
"RECORD_ID": record_id_2,
"DATA_SOURCE": datasource_code_2
}, {
"RECORD_ID": record_id_3,
"DATA_SOURCE": datasource_code_3
}]}
record_list_as_json = json.dumps(record_list)
# Find the network by record ID.
response_bytearray = bytearray()
return_code = g2_engine.findNetworkByRecordIDV2(
record_list_as_json,
max_degree,
buildout_degree,
max_entities,
g2_engine_flags,
response_bytearray)
# Print the results.
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ## Connection Details
# The `whyEntityByEntityID()` and `whyEntityByRecordID()` functions can be used
# to determine why records belong to their resolved entities.
# These functions will compare the record data within an entity against the
# rest of the entity data, and show why they are connected.
# This is calculated based on the features that record data represents.
#
# Records can be chosen by either Record ID or by Entity ID,
# depending on which function is chosen.
# If a single record ID is used,
# then comparison results for that single record will be generated, as part of
# its entity.
# If an Entity ID is used,
# then comparison results will be generated for every record within that
# entity.
#
# These functions have the following parameters:
#
# - **entity_id:** The entity ID for the entity to be analyzed
# - **datasource_code:** The data source for the record to be analyzed
# - **record_id:** The record ID for the record to be analyzed
# - **g2_engine_flags:** Control flags for outputting entities
#
# They also have various arguments used to return response documents.
#
# The functions return a JSON document that gives the results of the record
# analysis.
# The document contains a section called "WHY_RESULTS",
# which shows how specific records relate to the rest of the entity.
# It has a "WHY_KEY", which is similar to a match key, in defining the relevant
# connected data.
# It shows candidate keys for features that initially cause the records
# to be analyzed for a relationship,
# plus a series of feature scores that show how similar the feature data was.
#
# The response document also contains a separate ENTITIES section,
# with the full information about the resolved entity.
# (Note: When working with this entity data,
# Senzing recommends using the flags `G2_ENTITY_SHOW_FEATURES_EXPRESSED`
# and `G2_ENTITY_SHOW_FEATURES_STATS`.
# This will provide detailed feature data that is not included by default,
# but is useful for understanding the WHY_RESULTS data.)
#
# The functions `whyEntityByEntityIDV2()` and `whyEntityByRecordV2()` are
# enhanced versions of `whyEntityByEntityID()` and `whyEntityByRecordID()`
# that also allow you to use control flags.
# The `whyEntityByEntityID()` and `whyEntityByRecordID()` functions work in the
# same way, but use the default flag value `G2_WHY_ENTITY_DEFAULT_FLAGS`.
#
# For more information, see
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#connection-details)
# ### whyEntityByRecordID
# +
response_bytearray = bytearray()
return_code = g2_engine.whyEntityByRecordID(
datasource_code_1,
record_id_1,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### whyEntityByRecordIDV2
# +
response_bytearray = bytearray()
return_code = g2_engine.whyEntityByRecordIDV2(
datasource_code_1,
record_id_1,
g2_engine_flags,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### whyEntityByEntityID
# +
response_bytearray = bytearray()
return_code = g2_engine.whyEntityByEntityID(
entity_id_1,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### whyEntityByEntityIDV2
# +
response_bytearray = bytearray()
return_code = g2_engine.whyEntityByEntityIDV2(
entity_id_1,
g2_engine_flags,
response_bytearray)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ## Replace
# ### replaceRecord
#
# Use the `replaceRecord()` function to update or replace a record in the data
# repository.
# If record doesn't exist, a new record is added to the data repository.
# Like the above functions, `replaceRecord()` returns "0" upon success,
# and it can be called as many times as desired and from multiple threads at
# the same time.
# The `replaceRecord()` function accepts four parameters as input:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system
# - **record_id:** The record ID, used to identify distinct records
# - **data_as_json:** A JSON document with the attribute data for the record
# - **load_id:** The observation load ID for the record;
# value can be null and will default to datasource_code
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Miller",
"NAME_FIRST": "John",
"NAME_MIDDLE": "M"
}],
"PASSPORT_NUMBER": "PP11111",
"PASSPORT_COUNTRY": "US",
"DRIVERS_LICENSE_NUMBER": "DL11111",
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_1,
record_id_1,
data_as_json,
load_id)
print("Return Code: {0}".format(return_code))
# -
# ### replaceRecordWithInfo
#
# `replaceRecordWithInfo()` is available if you would like to know what
# resolved entities were modified when replacing a record.
# It behaves identically to `replaceRecord()`,
# but also returns a json document containing the IDs of the affected entities.
# It accepts the following parameters:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The record ID, used to identify distinct records
# - **data_as_json:** A JSON document with the attribute data for the record
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
# - **load_id:** The observation load ID for the record;
# value can be null and will default to datasource_code
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Jones",
"NAME_FIRST": "John",
"NAME_MIDDLE": "M"
}],
"PASSPORT_NUMBER": "PP11111",
"PASSPORT_COUNTRY": "US",
"DRIVERS_LICENSE_NUMBER": "DL11111",
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
response_bytearray = bytearray()
return_code = g2_engine.replaceRecordWithInfo(
datasource_code_1,
record_id_1,
data_as_json,
response_bytearray,
load_id)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ## Re-evaluate
# ### reevaluateRecord
# +
return_code = g2_engine.reevaluateRecord(
datasource_code_1,
record_id_1,
g2_engine_flags)
print("Return Code: {0}".format(return_code))
# -
# ### reevaluateRecordWithInfo
# +
response_bytearray = bytearray()
return_code = g2_engine.reevaluateRecordWithInfo(
datasource_code_1,
record_id_1,
response_bytearray,
g2_engine_flags)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ### reevaluateEntity
# Find an entity.
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordIDV2(
datasource_code_1,
record_id_1,
g2_engine_flags,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_1 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# Re-evaluate the entity.
# +
return_code = g2_engine.reevaluateEntity(entity_id_1, g2_engine_flags)
print("Return Code: {0}".format(return_code))
# -
# ### reevaluateEntityWithInfo
# +
response_bytearray = bytearray()
return_code = g2_engine.reevaluateEntityWithInfo(
entity_id_1,
response_bytearray,
g2_engine_flags)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# ## Reporting
#
# Exporting entity data from resolved entities is one of the core purposes of
# Senzing software.
# In just a few short steps, the Senzing engine allows users to export entity
# data in either JSON or CSV format.
# ### exportJSONEntityReport
#
# There are three steps to exporting resolved entity data from the G2Engine
# object in JSON format.
# First, use the `exportJSONEntityReport()` method to generate a long integer,
# referred to here as an `export_handle`.
# The `exportJSONEntityReport()` method accepts one parameter as input:
#
# - **g2_engine_flags**: An integer specifying which entity details should be
# included in the export. See the "Entity Export Flags" section for further
# details.
export_handle = g2_engine.exportJSONEntityReport(g2_engine_flags)
# ### fetchNext
#
# Second, use the `fetchNext()` method to read the exportHandle and export a
# row of JSON output containing the entity data for a single entity.
# Note that successive calls of `fetchNext()` will export successive rows of
# entity data.
# The `fetchNext()` method accepts the following parameters as input:
#
# - **export_handle:** A long integer from which resolved entity data may be
# read and exported.
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here.
#
# For more information, see
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#reporting).
while True:
response_bytearray = bytearray()
g2_engine.fetchNext(export_handle, response_bytearray)
if not response_bytearray:
break
response_dictionary = json.loads(response_bytearray)
response = json.dumps(response_dictionary, sort_keys=True, indent=4)
print(response)
# ### closeExport
g2_engine.closeExport(export_handle)
# ### exportCSVEntityReport
#
# There are three steps to exporting resolved entity data from the G2Engine
# object in CSV format.
# First, use the `exportCSVEntityReportV2()` method to generate a long integer,
# referred to here as an 'export_handle'.
#
# The `exportCSVEntityReportV2()` method accepts these parameter as input:
#
# - **csv_column_list:** A comma-separated list of column names for the CSV
# export. (These are listed a little further down.)
# - **g2_engine_flags:** An integer specifying which entity details should be
# included in the export.
# See the "Entity Export Flags" section for further details.
#
# Second, use the `fetchNext()` method to read the exportHandle and export a
# row of CSV output containing the entity data for a single entity.
# Note that the first call of `fetchNext()` will yield a header row,
# and that successive calls of `fetchNext()` will export successive rows of
# entity data.
# The `fetchNext()` method accepts the following parameters as input:
#
# - **export_handle:** A long integer from which resolved entity data may be
# read and exported
# - **response_bytearray:** A memory buffer for returning the response
# document; if an error occurred, an error response is stored here
#
# For more information, see
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#reporting).
# +
export_handle = g2_engine.exportCSVEntityReport(g2_engine_flags)
while True:
response_bytearray = bytearray()
g2_engine.fetchNext(export_handle, response_bytearray)
if not response_bytearray:
break
print(response_bytearray.decode())
g2_engine.closeExport(export_handle)
# -
# ## Redo Processing
#
# Redo records are automatically created by Senzing when certain conditions
# occur where it believes more processing may be needed.
# Some examples:
#
# - A value becomes generic and previous decisions may need to be revisited
# - Clean up after some record deletes
# - Detected related entities were being changed at the same time
# - A table inconsistency exists, potentially after a non-graceful shutdown
#
# First we will need to have a total of 6 data sources so let's add 4 more.
# Create Record and Entity #6
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Owens",
"NAME_FIRST": "Lily"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_4,
record_id_4,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_4,
record_id_4,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_6 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_6))
RenderJSON(response_bytearray)
# -
# Create Record and Entity #7
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Bauler",
"NAME_FIRST": "August",
"NAME_MIDDLE": "E"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_5,
record_id_5,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_5,
record_id_5,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_7 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_7))
RenderJSON(response_bytearray)
# -
# Create Record and Entity #8
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Barcy",
"NAME_FIRST": "Brian",
"NAME_MIDDLE": "H"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_6,
record_id_6,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_6,
record_id_6,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_8 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_8))
RenderJSON(response_bytearray)
# -
# Create Record and Entity #9
# +
data = {
"NAMES": [{
"NAME_TYPE": "PRIMARY",
"NAME_LAST": "Miller",
"NAME_FIRST": "Jack",
"NAME_MIDDLE": "H"
}],
"SSN_NUMBER": "111-11-1111"
}
data_as_json = json.dumps(data)
return_code = g2_engine.replaceRecord(
datasource_code_7,
record_id_7,
data_as_json,
None)
print("Return Code: {0}".format(return_code))
# +
response_bytearray = bytearray()
return_code = g2_engine.getEntityByRecordID(
datasource_code_7,
record_id_7,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
entity_id_9 = response_dictionary["RESOLVED_ENTITY"]["ENTITY_ID"]
print("Return Code: {0}\nEntity ID: {1}".format(return_code, entity_id_9))
RenderJSON(response_bytearray)
# -
# ### countRedoRecords
#
# Once the Senzing engine is initialized, use `countRedoRecords()`
# to return the remaining internally queued maintenance records in the
# Senzing repository.
# `countRedoRecords()` takes no arguments and returns <0 for errors.
# +
return_code = g2_engine.countRedoRecords()
print("Return Code: {0}".format(return_code))
# -
# ### getRedoRecord
#
# Once the Senzing engine is initialized,
# use `getRedoRecord()` to retrieve the next internally queued maintenance
# record into the Senzing repository
# -- `getRedoRecord()` can be called as many times as desired and from multiple
# threads at the same time but all threads are required to be in the same
# process.
# `getRedoRecord()` should not be called from multiple processes.
# Unlike `processRedoRecord()`, `getRedoRecord()` does not actually process the
# record.
# To process the record, you would use the G2Engine `process()` function.
# The `getRedoRecord()` function returns "0" upon success and an empty response
# if there is nothing to do.
#
# - **response_bytearray:** A memory buffer for returning the maintenance
# document (may be XML or JSON).
# The format is internal to Senzing.
# If empty it means there are no maintenance records to return.
# +
response_bytearray = bytearray()
return_code = g2_engine.getRedoRecord(response_bytearray)
print("Return Code: {0}".format(return_code))
# -
# ### processWithInfo
if (return_code == 0 and response_bytearray):
process_response_bytearray = bytearray()
process_return_code = g2_engine.processWithInfo(
response_bytearray.decode(),
process_response_bytearray)
print("Return Code: {0}".format(process_return_code))
RenderJSON(process_response_bytearray)
# ### process
if (return_code == 0 and response_bytearray):
g2_engine.process(response_bytearray.decode())
# ### processRedoRecord
#
# This processes the next redo record and returns it
# (If `processRedoRecord()` "response" returns 0
# and "response_bytearray" is blank then there are no more redo records to
# process and if you do `count.RedoRecords()` again it will return 0)
# Has potential to create more redo records in certian situations.
#
# - **response_bytearray:** A buffer that returns a JSON object that summaries
# the changes cased by adding the record.
# Also contains the recordID.
# +
response_bytearray = bytearray()
return_code = g2_engine.processRedoRecord(response_bytearray)
print("Return Code: {0}".format(return_code))
# Pretty-print XML.
xml_string = response_bytearray.decode()
if len(xml_string) > 0:
import xml.dom.minidom
xml = xml.dom.minidom.parseString(xml_string)
xml_pretty_string = xml.toprettyxml()
print(xml_pretty_string)
# -
# ### processRedoRecordWithInfo
#
# `processRedoRecordWithInfo()` is available if you would like to know what
# resolved entities were modified when processing a redo record.
# It behaves identically to `processRedoRecord()`,
# but also returns a json document containing the IDs of the affected entities.
# It accepts the following parameters:
#
# - **response_bytearray:** A buffer that returns a JSON object that summaries
# the changes cased by adding the record. Also contains the recordID.
# - **response_bytearray:** A buffer that returns a JSON object that summaries
# the changes cased by adding the record. Also contains the recordID.
# +
response_bytearray = bytearray()
info_bytearray = bytearray()
return_code = g2_engine.processRedoRecordWithInfo(
response_bytearray,
info_bytearray)
print("Return Code: {0}".format(return_code))
# Pretty-print XML.
xml_string = response_bytearray.decode()
if len(xml_string) > 0:
import xml.dom.minidom
xml = xml.dom.minidom.parseString(xml_string)
xml_pretty_string = xml.toprettyxml()
print(xml_pretty_string)
# Pretty-print JSON
RenderJSON(info_bytearray)
# -
# ## Delete
# ### deleteRecord
#
# Use `deleteRecord()` to remove a record from the data repository
# (returns "0" upon success);
# `deleteRecord()` can be called as many times as desired and from multiple
# threads at the same time.
# The `deleteRecord()` function accepts three parameters as input:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The record ID, used to identify distinct records
# - **load_id:** The observation load ID for the record;
# value can be null and will default to dataSourceCode
# +
return_code = g2_engine.deleteRecord(datasource_code_1, record_id_1, load_id)
print("Return Code: {0}".format(return_code))
# -
# ### deleteRecordWithInfo
#
# `deleteRecordWithInfo()` behaves the same as `deleteRecord()`
# but also returns a json document containing the IDs of the affected entities.
# It accepts the following parameters:
#
# - **datasource_code:** The name of the data source the record is associated
# with. This value is configurable to the system.
# - **record_id:** The record ID, used to identify distinct records.
# - **response_bytearray:** A buffer that returns a JSON object that summaries
# the changes cased by adding the record. Also contains the recordID.
# - **load_id:** The observation load ID for the record;
# value can be null and will default to dataSourceCode
# +
response_bytearray = bytearray()
return_code = g2_engine.deleteRecordWithInfo(
datasource_code_2,
record_id_2,
response_bytearray,
load_id,
g2_engine_flags)
print("Return Code: {0}".format(return_code))
RenderJSON(response_bytearray)
# -
# Attempt to get the record again.
# It should error and give an output similar to "Unknown record".
try:
response_bytearray = bytearray()
return_code = g2_engine.getRecord(
datasource_code_1,
record_id_1,
response_bytearray)
response_dictionary = json.loads(response_bytearray)
response = json.dumps(response_dictionary, sort_keys=True, indent=4)
print("Return Code: {0}\n{1}".format(return_code, response))
except G2Exception.G2ModuleGenericException as err:
print("Exception: {0}".format(err))
# ## Cleanup
#
# To purge the G2 repository, use the aptly named `purgeRepository()` method.
# This will remove every record in your current repository.
# ### purgeRepository
g2_engine.purgeRepository()
# ### destroy
#
# Once all searching is done in a given process,
# call `destroy()` to uninitialize Senzing and clean up resources.
# You should always do this once at the end of each process. See
# [http://docs.senzing.com/?python](http://docs.senzing.com/?python#engine).
# +
return_code = g2_engine.destroy()
print("Return Code: {0}".format(return_code))
| notebooks/senzing-examples/python/senzing-G2Engine-reference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from pdpbox.pdp import pdp_isolate, pdp_plot, pdp_interact, pdp_interact_plot
from shap import TreeExplainer, initjs, force_plot
from sklearn.linear_model import LinearRegression, Ridge
from numpy.random import permutation
from sklearn.inspection import permutation_importance
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
import plotly.graph_objects as go
import chart_studio
DATA_PATH = './data/'
# +
###DATA WRANGLING/EDA
def wrangle(data):
df = pd.read_csv(data,
parse_dates=['last_scraped','host_since']
).set_index('last_scraped')
#Dropping columns with high similarity columns
df.drop(columns=['id','host_id','host_url','host_location',
'host_about','host_response_rate','host_listings_count',
'host_neighbourhood','host_total_listings_count',
'calendar_last_scraped','calculated_host_listings_count',
'calculated_host_listings_count_entire_homes',
'calculated_host_listings_count_private_rooms',
'calculated_host_listings_count_shared_rooms',
'host_has_profile_pic','bathrooms','has_availability'], inplace=True)
#Dropping columns about reviews and columns with low gini importance
df.drop(columns=['review_scores_accuracy','review_scores_cleanliness',
'review_scores_checkin', 'review_scores_communication','review_scores_location',
'review_scores_value','reviews_per_month','minimum_nights','maximum_nights',
'availability_30','availability_60','availability_90',
'availability_365','number_of_reviews_ltm','number_of_reviews_l30d','minimum_minimum_nights', 'maximum_minimum_nights',
'minimum_maximum_nights', 'maximum_maximum_nights',
'minimum_nights_avg_ntm', 'maximum_nights_avg_ntm', 'longitude', 'latitude','neighbourhood'], inplace=True)
#Converted target as object into float
df['price'] = df['price'].str.strip('$')
df['price'] = df['price'].str.replace(',','').astype(float)
#Dropped outliers
df = df.loc[df['price'] < 50000]
numcard = 50
highcard = [col for col in df.select_dtypes(exclude='number')
if df[col].nunique() > numcard]
lowcard = [col for col in df
if df[col].nunique() <= 1]
df.drop(columns=highcard, inplace=True)
df.drop(columns=lowcard, inplace=True)
#Converted t and f object type into 1 and 0
df.replace('t',1, inplace=True)
df.replace('f',0, inplace=True)
#Preprocessed NaN values
df['host_response_time'].fillna('within an hour',inplace=True)
df['bedrooms'].fillna(1,inplace=True)
#Dropping rows with NA values
df = df.dropna(axis=0, subset=['bedrooms', 'beds'])
return df
# +
df = wrangle(DATA_PATH+'listings.csv')
# -
dfc = wrangle(DATA_PATH+'chicago.csv')
dfc_ = pd.read_csv(DATA_PATH+'chicago.csv')
dfc_.info()
dfc.drop(columns=['bathrooms_text','number_of_reviews','review_scores_rating','instant_bookable'])
len(dfc['host_response_time'].value_counts())
df.info()
###SPLITTING DATA
target = 'price'
y = df[target]
X = df.drop(columns=target)
cutoff = '2021-01-04'
mask = X.index < cutoff
X_train, y_train = X.loc[mask], y.loc[mask]
X_test, y_test = X.loc[~mask], y.loc[~mask]
###ESTABLISHING BASELINE
print('Mean AirBnB Price:', y_train.mean())
y_pred = [y_train.mean()] * len(y_train)
print('Baseline MAE:', mean_absolute_error(y_train, y_pred))
print('Baseline RMSE:', mean_squared_error(y_train, y_pred, squared=False))
# +
###BUILDING MODELS
#Ridge Regressor
model_rr = make_pipeline(
OneHotEncoder(),
SimpleImputer(),
Ridge(alpha=10)
)
model_rr.fit(X_train, y_train);
# +
#RandomForestRegressor model
model_rf = make_pipeline(
OneHotEncoder(),
SimpleImputer(),
RandomForestRegressor(random_state=42)
)
model_rf.fit(X_train, y_train);
# +
#XGboostRegressor model
model_xgb = make_pipeline(
OneHotEncoder(),
SimpleImputer(),
XGBRegressor(random_state=42,
n_jobs=-1,
verbose=True)
);
model_xgb.fit(X_train, y_train);
# +
###CHECKING METRICS
#Ridge model metrics
y_pred_train_rr = model_rr.predict(X_train)
y_pred_test_rr = model_rr.predict(X_test)
train_MAE_rr = mean_absolute_error(y_train, y_pred_train_rr)
test_MAE_rr = mean_absolute_error(y_test, y_pred_test_rr)
train_RMSE_rr = mean_squared_error(y_train, y_pred_train_rr, squared=False)
test_RMSE_rr = mean_squared_error(y_test, y_pred_test_rr, squared=False)
print('RIDGE REGRESSOR METRICS:')
print('Train MAE:',train_MAE_rr)
print('Test MAE:', test_MAE_rr)
print('Train RMSE:', train_RMSE_rr)
print('Test RMSE:', test_RMSE_rr)
print('Training R^2:', r2_score(y_train, model_rr.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_rr.predict(X_test)))
print('Training Accuracy:', model_rr.score(X_train, y_train))
print('Test Accuracy:', model_rr.score(X_test, y_test))
#RF Regressor metrics
y_pred_train_rf = model_rf.predict(X_train)
y_pred_test_rf = model_rf.predict(X_test)
train_MAE_rf = mean_absolute_error(y_train, y_pred_train_rf)
test_MAE_rf = mean_absolute_error(y_test, y_pred_test_rf)
train_RMSE_rf = mean_squared_error(y_train, y_pred_train_rf, squared=False)
test_RMSE_rf = mean_squared_error(y_test, y_pred_test_rf, squared=False)
print('')
print('RF REGRESSOR METRICS:')
print('Train MAE:',train_MAE_rf)
print('Test MAE:', test_MAE_rf)
print('Train RMSE:', train_RMSE_rf)
print('Test RMSE:', test_RMSE_rf)
print('Training R^2:', r2_score(y_train, model_rf.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_rf.predict(X_test)))
print('Training Accuracy:', model_rf.score(X_train, y_train))
print('Test Accuracy:', model_rf.score(X_test, y_test))
#XGB model metrics
y_pred_train_xgb = model_xgb.predict(X_train)
y_pred_test_xgb = model_xgb.predict(X_test)
train_MAE_xgb = mean_absolute_error(y_train, y_pred_train_xgb)
test_MAE_xgb = mean_absolute_error(y_test, y_pred_test_xgb)
train_RMSE_xgb = mean_squared_error(y_train, y_pred_train_xgb, squared=False)
test_RMSE_xgb = mean_squared_error(y_test, y_pred_test_xgb, squared=False)
print('')
print('XGBOOST METRICS:')
print('Train MAE:',train_MAE_xgb)
print('Test MAE:', test_MAE_xgb)
print('Train RMSE:', train_RMSE_xgb)
print('Test RMSE:', test_RMSE_xgb)
print('Training R^2:', r2_score(y_train, model_xgb.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_xgb.predict(X_test)))
print('Training Accuracy:', model_xgb.score(X_train, y_train))
print('Test Accuracy:', model_xgb.score(X_test, y_test))
# +
###TUNING (HYPERPARAM TUNING AND COMPARING GINI IMPORTANCE AND PERMUTATION IMPORTANCE)
#Hyperparam tuning for ridge model
rr_params = {
'simpleimputer__strategy': ['mean', 'median'],
'ridge__alpha': [1,5,10,15,20,25],
}
model_rr_gs = GridSearchCV(
model_rr,
param_grid=rr_params,
cv=5,
n_jobs=-1,
verbose=1
)
model_rr_gs.fit(X_train, y_train);
# +
rf_params = {
'simpleimputer__strategy': ['mean', 'median'],
'randomforestregressor__n_estimators': [100,150,200,250,300],
'randomforestregressor__max_depth': range(5, 36, 3)
}
model_rf_gs = GridSearchCV(
model_rf,
param_grid=rf_params,
cv=5,
n_jobs=-1,
verbose=1
)
model_rf_gs.fit(X_train, y_train);
# +
xgb_params = {
'simpleimputer__strategy': ['mean', 'median'],
'xgbregressor__max_depth': range(3,15,3),
'xgbregressor__gamma': range(1,5,1),
'xgbregressor__n_estimators': range(5,30,5)
}
model_xgb_gs = GridSearchCV(
model_xgb,
param_grid=xgb_params,
cv=5,
n_jobs=-1,
verbose=1
)
model_xgb_gs.fit(X_train, y_train);
# +
#Ridge modelv2 metrics
y_pred_train_rr_gs = model_rr_gs.predict(X_train)
y_pred_test_rr_gs = model_rr_gs.predict(X_test)
train_MAE_rr_gs = mean_absolute_error(y_train, y_pred_train_rr_gs)
test_MAE_rr_gs = mean_absolute_error(y_test, y_pred_test_rr_gs)
train_RMSE_rr_gs = mean_squared_error(y_train, y_pred_train_rr_gs, squared=False)
test_RMSE_rr_gs = mean_squared_error(y_test, y_pred_test_rr_gs, squared=False)
print('RIDGE REGRESSORv2 METRICS:')
print('Train MAE:',train_MAE_rr_gs)
print('Test MAE:', test_MAE_rr_gs)
print('Train RMSE:', train_RMSE_rr_gs)
print('Test RMSE:', test_RMSE_rr_gs)
print('Training R^2:', r2_score(y_train, model_rr_gs.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_rr_gs.predict(X_test)))
#RF modelv2 metrics
y_pred_train_rf_gs = model_rf_gs.predict(X_train)
y_pred_test_rf_gs = model_rf_gs.predict(X_test)
train_MAE_rf_gs = mean_absolute_error(y_train, y_pred_train_rf_gs)
test_MAE_rf_gs = mean_absolute_error(y_test, y_pred_test_rf_gs)
train_RMSE_rf_gs = mean_squared_error(y_train, y_pred_train_rf_gs, squared=False)
test_RMSE_rf_gs = mean_squared_error(y_test, y_pred_test_rf_gs, squared=False)
print('')
print('RF REGRESSORv2 METRICS:')
print('Train MAE:',train_MAE_rf_gs)
print('Test MAE:', test_MAE_rf_gs)
print('Train RMSE:', train_RMSE_rf_gs)
print('Test RMSE:', test_RMSE_rf_gs)
print('Training R^2:', r2_score(y_train, model_rf_gs.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_rf_gs.predict(X_test)))
#XGB modelv2 metrics
y_pred_train_xgb_gs = model_xgb_gs.predict(X_train)
y_pred_test_xgb_gs = model_xgb_gs.predict(X_test)
train_MAE_xgb_gs = mean_absolute_error(y_train, y_pred_train_xgb_gs)
test_MAE_xgb_gs = mean_absolute_error(y_test, y_pred_test_xgb_gs)
train_RMSE_xgb_gs = mean_squared_error(y_train, y_pred_train_xgb_gs, squared=False)
test_RMSE_xgb_gs = mean_squared_error(y_test, y_pred_test_xgb_gs, squared=False)
print('')
print('XGBoost Regressorv2 METRICS:')
print('Train MAE:',train_MAE_xgb_gs)
print('Test MAE:', test_MAE_xgb_gs)
print('Train RMSE:', train_RMSE_xgb_gs)
print('Test RMSE:', test_RMSE_xgb_gs)
print('Training R^2:', r2_score(y_train, model_xgb_gs.predict(X_train)))
print('Test R^2:', r2_score(y_test, model_xgb_gs.predict(X_test)))
print('Model best score:', model_xgb_gs.best_score_)
# +
#Gini importance
importances = model_xgb.named_steps['xgbregressor'].feature_importances_
feat_imp = pd.Series(importances, index=X.columns).sort_values()
feat_imp.tail(10).plot(kind='barh')
plt.xlabel('Gini importance')
plt.ylabel('Feature')
plt.title('Feature importance for model_xgb');
# +
#Permutation importance
perm_imp = permutation_importance(model_xgb_gs,
X_test,
y_test,
n_jobs=-1,
random_state=42)
data_ = {'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']}
permdf = pd.DataFrame(data_, index=X_test.columns)
permdf.sort_values(by='importances_mean', inplace=True)
permdf['importances_mean'].tail(10).plot(kind='barh')
plt.xlabel('Importance (drop in accuracy)')
plt.ylabel('Feature')
plt.title('Permutation importance for model_xgb');
# +
###COMMUNICATING RESULTS
feature = 'accommodates'
#Plotting partial dependency for 'accommodates' feature
isolate = pdp_isolate(
model=model_xgb,
dataset=X_test,
model_features=X_test.columns,
feature=feature
)
pdp_plot(isolate, feature_name=feature);
# +
#Plotting partial dependency for the first top 2 features:
#('bedroom' and 'accomodates')
top2feat = ['accommodates', 'bedrooms']
interact = pdp_interact(
model=model_xgb,
dataset=X_test,
model_features=X_test.columns,
features=top2feat
)
pdp_interact_plot(interact, plot_type='grid',feature_names=top2feat);
# +
location = ['longitude', 'latitude']
interact = pdp_interact(
model=model_xgb,
dataset=X_test,
model_features=X_test.columns,
features = location
)
pdp_interact_plot(interact, plot_type='grid',feature_names=location);
# -
df.corr()
# !pip install joblib
# +
from joblib import dump
dump(model_rf_gs, "pipeline2.joblib", compress=True)
# -
| Notebook/Tokyo-AirBnB-Dataset-U2Project2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import mlxtend as mle
import pickle
import numpy as np
from lightgbm import LGBMRegressor
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
import matplotlib.pyplot as plt
# %matplotlib inline
# +
split = 1
tgt = 'Water_norm'
with open(f'/home/anton/Repos/untapped-nrj/data/interim/{tgt}_for_sfe.pck','rb') as f:
dataset = pickle.load(f)
X_train = dataset[f'X_{split}']
y_train = dataset[f'y_{split}']
X_holdout = dataset[f'X_holdout_{split}']
y_holdout = dataset[f'y_holdout_{split}']
class LogLGBM(LGBMRegressor):
def fit(self, X, Y, **kwargs):
y_train = np.log1p(Y)
super(LogLGBM, self).fit(X, y_train, **kwargs)
return self
def predict(self, X):
preds = super(LogLGBM, self).predict(X)
preds = np.expm1(preds)
return preds
model = LogLGBM(
learning_rate=0.08,
n_estimators=500,
objective="mse",
num_leaves=32,
random_state=123
)
# -
sfs =SFS(estimator=model,k_features = (8,40),
floating=True,
scoring='neg_mean_absolute_error',cv=5,n_jobs=-1,verbose= 1)
sfs = sfs.fit(X_train, y_train)
fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev',figsize = (14,7))
plt.ylim([-1.8,-1.3])
feats = sfs.get_metric_dict()[27]['feature_names']
set(X_train.columns) -set(feats)
from itertools import product
import pandas as pd
b = pd.Series(['aasd123','b123','c1','dasd22'])
b.str.count("[0-9]")
| notebooks/SFE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="TrVxV1ee9muO" colab_type="code" outputId="ad2ed9d5-a21c-4181-f290-8cb273e09897" executionInfo={"status": "ok", "timestamp": 1553169712336, "user_tz": -180, "elapsed": 1249, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
data_dir = './data'
# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
#data_dir = '/input'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)
# + id="VMekMqJR94GF" colab_type="code" colab={}
show_n_images=25
import os
from glob import glob
import matplotlib.pyplot as plt
# + id="Vjt2TIiR-rF6" colab_type="code" outputId="428991b3-08ee-4c43-86c2-2c5061cb978f" executionInfo={"status": "ok", "timestamp": 1553169714249, "user_tz": -180, "elapsed": 2115, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 367}
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
plt.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
# + id="q0sub6OD-0lm" colab_type="code" outputId="71795f9c-3acf-4c1d-98c8-d778f3ca76f6" executionInfo={"status": "ok", "timestamp": 1553169715201, "user_tz": -180, "elapsed": 2443, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 367}
show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
face_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
plt.imshow(helper.images_square_grid(face_images, 'RGB'))
# + id="09yztrGz-8sb" colab_type="code" outputId="ed3c0a4b-d574-4aa4-8d25-69bcc19c9a52" executionInfo={"status": "ok", "timestamp": 1553169715682, "user_tz": -180, "elapsed": 2309, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# + id="dy5qDLwtEEa3" colab_type="code" outputId="07bc0738-4f04-43af-f8dd-e2b55ec1b252" executionInfo={"status": "ok", "timestamp": 1553169715685, "user_tz": -180, "elapsed": 1680, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.<KEY>tUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
import problem_unittests as tests
def model_inputs(image_width, image_height, image_channels, z_dim):
"""
Create the model inputs
:param image_width: The input image width
:param image_height: The input image height
:param image_channels: The number of image channels
:param z_dim: The dimension of Z
:return: Tuple of (tensor of real input images, tensor of z data, learning rate)
"""
# TODO: Implement Function
real_input=tf.placeholder(tf.float32,(None,image_width,image_height,image_channels),name='real_input')
z_input=tf.placeholder(tf.float32,(None,z_dim),name='z_input')
learning_rate=tf.placeholder(tf.float32)
return real_input, z_input, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
# + id="NDynoETOFIG3" colab_type="code" outputId="5ac30b08-ca2d-4224-a442-4b43f081b35c" executionInfo={"status": "ok", "timestamp": 1553185048450, "user_tz": -180, "elapsed": 2790, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
def discriminator(images, reuse=False):
"""
Create the discriminator network
:param images: Tensor of input image(s)
:param reuse: Boolean if the weights should be reused
:return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)
"""
# TODO: Implement Function
with tf.variable_scope('discriminator',reuse=reuse):
#conv1
conv1=tf.layers.conv2d(images,64,3,strides=2,padding='same',use_bias=False)
conv1=tf.nn.leaky_relu(conv1)
#conv2
conv2_2=tf.layers.conv2d(conv1,128,3,strides=2,padding='same',use_bias=False)
conv2_2=tf.layers.batch_normalization(conv2_2,training=True)
conv2_2=tf.nn.leaky_relu(conv2_2)
#conv3
conv2_2=tf.layers.conv2d(conv2_2,256,3,strides=1,padding='same',use_bias=False)
conv2_2=tf.layers.batch_normalization(conv2_2,training=True)
conv2_2=tf.nn.leaky_relu(conv2_2)
#reshape the images
flat=tf.layers.flatten(conv2_2)
logits=tf.layers.dense(flat,1)
out=tf.sigmoid(logits)
return out, logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(discriminator, tf)
# + id="mw5aH-8QPAGx" colab_type="code" outputId="68715d2f-469a-42d0-d7a0-3d12406315f9" executionInfo={"status": "ok", "timestamp": 1553185232680, "user_tz": -180, "elapsed": 5189, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
def generator(z, out_channel_dim, is_train=True):
"""
Create the generator network
:param z: Input z
:param out_channel_dim: The number of channels in the output image
:param is_train: Boolean if generator is being used for training
:return: The tensor output of the generator
"""
# TODO: Implement Function
with tf.variable_scope('generator',reuse=not is_train):
weights=tf.contrib.layers.xavier_initializer_conv2d()
x=tf.layers.dense(z,2*2*512)
x=tf.reshape(x,(-1,2,2,512))
x=tf.layers.batch_normalization(x,training=is_train)
x=tf.nn.leaky_relu(x)
#conv1
""""
conv1=tf.layers.conv2d_transpose(x,512,3,strides=2,padding='valid',kernel_initializer=weights)
conv1=tf.layers.batch_normalization(conv1,training=is_train)
conv1=tf.nn.leaky_relu(conv1)
"""
#conv2_1
conv2_1=tf.layers.conv2d_transpose(x,256,5,strides=2,padding='valid',use_bias=False)
conv2_1=tf.layers.batch_normalization(conv2_1,training=is_train)
conv2_1=tf.nn.leaky_relu(conv2_1)
#conv3_1
conv3_1=tf.layers.conv2d_transpose(conv2_1,128,3,strides=2,padding='same',use_bias=False)
conv3_1=tf.layers.batch_normalization(conv3_1,training=is_train)
conv3_1=tf.nn.leaky_relu(conv3_1)
#conv4_1
conv4_1=tf.layers.conv2d_transpose(conv3_1,64,3,strides=2,padding='same',use_bias=False)
conv4_1=tf.layers.batch_normalization(conv4_1,training=is_train)
conv4_1=tf.nn.leaky_relu(conv4_1)
#conv4_2
logits=tf.layers.conv2d_transpose(conv4_1,out_channel_dim,5,strides=1,padding='same')
out=tf.tanh(logits)
return out
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_generator(generator, tf)
# + id="4cpF9b7tZdoy" colab_type="code" outputId="6e448f97-2b1a-40d7-cf09-f877b79e342e" executionInfo={"status": "ok", "timestamp": 1553185240384, "user_tz": -180, "elapsed": 6167, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
def model_loss(input_real, input_z, out_channel_dim):
"""
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
"""
# TODO: Implement Function
g_model=generator(input_z,out_channel_dim)
d_model_real,d_logits_real=discriminator(input_real)
d_model_fake,d_logits_fake=discriminator(g_model,reuse=True)
d_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,labels=tf.ones_like(d_model_real)*0.9))
d_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,labels=tf.zeros_like(d_model_fake)))
d_loss=d_real_loss+d_fake_loss
g_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,labels=tf.ones_like(d_model_fake)*0.9))
return d_loss, g_loss
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_loss(model_loss)
# + id="n3_NVe4YIlxG" colab_type="code" outputId="2d4e233c-5eaa-4de3-964e-1b60703fe47d" executionInfo={"status": "ok", "timestamp": 1553185240386, "user_tz": -180, "elapsed": 5702, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
"""
# TODO: Implement Function
t_vars=tf.trainable_variables()
d_var=[var for var in t_vars if var.name.startswith('discriminator')]
g_var=[var for var in t_vars if var.name.startswith("generator")]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1).minimize(d_loss,var_list=d_var)
g_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1).minimize(g_loss,var_list=g_var)
return d_opt, g_opt
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_opt(model_opt, tf)
# + id="kGOjMh97L3Bu" colab_type="code" colab={}
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
"""
Show example output for the generator
:param sess: TensorFlow session
:param n_images: Number of Images to display
:param input_z: Input Z Tensor
:param out_channel_dim: The number of channels in the output image
:param image_mode: The mode to use for images ("RGB" or "L")
"""
cmap = None if image_mode == 'RGB' else 'gray'
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
images_grid = helper.images_square_grid(samples, image_mode)
plt.imshow(images_grid, cmap=cmap)
plt.show()
# + id="AwHobBwpM3FR" colab_type="code" colab={}
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):
"""
Train the GAN
"""
_ , img_w, img_h, img_d = data_shape
input_real, input_z, lr = model_inputs(img_w, img_h, img_d, z_dim)
d_loss, g_loss = model_loss(input_real, input_z, img_d)
d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
# values range from -0.5 to 0.5, therefore scale to range -1, 1
batch_images = batch_images * 2
steps += 1
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
_ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z, lr: learning_rate})
_ = sess.run(g_opt, feed_dict={input_real: batch_images, input_z: batch_z, lr: learning_rate})
if steps % 400 == 0:
# At the end of every 10 epochs, get the losses and print them out
train_loss_d = d_loss.eval({input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
_ = show_generator_output(sess, 4, input_z, data_shape[3],data_image_mode)
# + id="SNZEa4WuISfN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="8586eebd-e614-4e6f-9f75-883352367ac4" executionInfo={"status": "error", "timestamp": 1553183144108, "user_tz": -180, "elapsed": 1502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}}
batch_size = 64
z_dim = 100
learning_rate = 0.0002
beta1 = 0.5
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
epochs = 2
mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches,
mnist_dataset.shape, mnist_dataset.image_mode)
# + id="CTuHV8ltR0n8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 5237} outputId="eb79c332-24d8-423e-e907-c860a82dfcba" executionInfo={"status": "ok", "timestamp": 1553186244162, "user_tz": -180, "elapsed": 994786, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-9dxO4m5l-Kg/AAAAAAAAAAI/AAAAAAAASKQ/EUtUPyNpv-Q/s64/photo.jpg", "userId": "00221067971932357642"}}
batch_size = 64
z_dim = 150
learning_rate =0.0002
beta1 = 0.5
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
epochs =2
celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches,
celeba_dataset.shape, celeba_dataset.image_mode)
# + id="b-wSLTJ3VaSI" colab_type="code" colab={}
| face_generation/Untitled0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:hodemulator]
# language: python
# name: conda-env-hodemulator-py
# ---
# I've implemented the integral of wt in pearce. This notebook verifies it works as I believe it should.
from pearce.mocks import cat_dict
import numpy as np
from os import path
from astropy.io import fits
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
# Load up the tptY3 buzzard mocks.
fname = '/u/ki/jderose/public_html/bcc/measurement/y3/3x2pt/buzzard/flock/buzzard-2/tpt_Y3_v0.fits'
hdulist = fits.open(fname)
hdulist.info()
hdulist[0].header
z_bins = np.array([0.15, 0.3, 0.45, 0.6, 0.75, 0.9])
zbin=1
a = 0.81120
z = 1.0/a - 1.0
# Load up a snapshot at a redshift near the center of this bin.
print z
# This code load a particular snapshot and and a particular HOD model. In this case, 'redMagic' is the Zheng07 HOD with the f_c variable added in.
# +
cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[a]}
cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog!
cat.load_catalog(a, particles = True)
# -
cat.load_model(a, 'redMagic')
# +
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0 = 100, Om0 = 0.3, Tcmb0=2.725)
# +
#cat.cosmology = cosmo # set to the "standard" one
#cat.h = cat.cosmology.h
# -
# Take the zspec in our selected zbin to calculate the dN/dz distribution. The below cell calculate the redshift distribution prefactor
#
# $$ W = \frac{2}{c}\int_0^{\infty} dz H(z) \left(\frac{dN}{dz} \right)^2 $$
hdulist[8].columns
# +
nz_zspec = hdulist[8]
zbin_edges = [row[0] for row in nz_zspec.data]
zbin_edges.append(nz_zspec.data[-1][2]) # add the last bin edge
zbin_edges = np.array(zbin_edges)
Nz = np.array([row[2+zbin] for row in nz_zspec.data])
N_total = np.sum(Nz)
dNdz = Nz/N_total
# -
W = cat.compute_wt_prefactor(zbin_edges, dNdz)
print W
# If we happened to choose a model with assembly bias, set it to 0. Leave all parameters as their defaults, for now.
# +
params = cat.model.param_dict.copy()
params['mean_occupation_centrals_assembias_param1'] = 0
params['mean_occupation_satellites_assembias_param1'] = 0
params['logMmin'] = 12.0
params['sigma_logM'] = 0.2
params['f_c'] = 0.19
params['alpha'] = 1.21
params['logM1'] = 13.71
params['logM0'] = 11.39
print params
# -
cat.populate(params)
nd_cat = cat.calc_analytic_nd()
print nd_cat
cat.cosmology
# +
area = 4635.4 #sq degrees
full_sky = 41253 #sq degrees
volIn, volOut = cat.cosmology.comoving_volume(z_bins[zbin-1]), cat.cosmology.comoving_volume(z_bins[zbin])
fullsky_volume = volOut-volIn
survey_volume = fullsky_volume*area/full_sky
nd_mock = N_total/survey_volume
print nd_mock
# -
volIn.value, volOut
correct_nds = np.array([1e-3, 1e-3, 1e-3, 4e-4, 1e-4])
# + active=""
# for zbin in xrange(1,6):
# nz_table = hdulist[8]
# Nz = np.array([row[2+zbin] for row in nz_table.data])# if z_bins[zbin-1] < row[1] < z_bins[zbin] ])
# N_total = np.sum(Nz)
# #volIn, volOut = cosmo.comoving_volume(z_bins[zbin-1]), cosmo.comoving_volume(z_bins[zbin])
# volIn, volOut = cat.cosmology.comoving_volume(z_bins[zbin-1]), cat.cosmology.comoving_volume(z_bins[zbin])
# #volIn, volOut = cat.cosmology.comoving_volume(z_bins[0]), cat.cosmology.comoving_volume(z_bins[-1])
#
# fullsky_volume = volOut-volIn
# survey_volume = fullsky_volume*area/full_sky
# nd_mock = N_total/survey_volume
# print nd_mock.value#, correct_nds[zbin-1]#/nd_mock.value, nd_mock.value**2
#
# + language="bash"
# ls ~jderose/public_html/bcc/catalog/redmagic/y3/buzzard/flock/buzzard-0/a/buzzard-0_1.6_y3_run_redmapper_v6.4.20_redmagic_*vlim_area.fit
# -
vol_fname = '/u/ki/jderose/public_html/bcc/catalog/redmagic/y3/buzzard/flock/buzzard-0/a/buzzard-0_1.6_y3_run_redmapper_v6.4.20_redmagic_highlum_1.0_vlim_area.fit'
vol_hdulist = fits.open(vol_fname)
# + active=""
# print vol_hdulist[0]
# vol_hdulist[1].data
# + active=""
# for zbin in xrange(1,6):
# plt.plot(nz_table.data['Z_MID'], nz_table.data['BIN%d'%zbin], label = str(zbin))
#
# plt.vlines(z_bins, 0, 5e6)
# plt.legend(loc='best')
# #plt.xscale('log')
# plt.xlim([0, 1.0])
# plt.show()
# -
nd_mock.value/nd_cat
# +
#compute the mean mass
mf = cat.calc_mf()
HOD = cat.calc_hod()
mass_bin_range = (9,16)
mass_bin_size = 0.01
mass_bins = np.logspace(mass_bin_range[0], mass_bin_range[1], int( (mass_bin_range[1]-mass_bin_range[0])/mass_bin_size )+1 )
mean_host_mass = np.sum([mass_bin_size*mf[i]*HOD[i]*(mass_bins[i]+mass_bins[i+1])/2 for i in xrange(len(mass_bins)-1)])/\
np.sum([mass_bin_size*mf[i]*HOD[i] for i in xrange(len(mass_bins)-1)])
print mean_host_mass
# -
theta_bins = np.logspace(np.log10(2.5), np.log10(2000), 25)/60 #binning used in buzzard mocks
tpoints = (theta_bins[1:]+theta_bins[:-1])/2
# + active=""
# ?? cat.calc_wt
# -
r_bins = np.logspace(-0.5, 1.7, 16)/cat.h
rpoints = (r_bins[1:]+r_bins[:-1])/2
r_bins
wt = cat.calc_wt(theta_bins, r_bins, W)
wt
r_bins
# Use my code's wrapper for halotools' xi calculator. Full source code can be found [here](https://github.com/mclaughlin6464/pearce/blob/31e2f6a3598217dcf1be3d2606b69c4e529fc791/pearce/mocks/cat.py#L688).
xi = cat.calc_xi(r_bins, do_jackknife=False)
# Interpolate with a Gaussian process. May want to do something else "at scale", but this is quick for now.
import george
from george.kernels import ExpSquaredKernel
kernel = ExpSquaredKernel(0.05)
gp = george.GP(kernel)
gp.compute(np.log10(rpoints))
print xi
xi[xi<=0] = 1e-2 #ack
from scipy.stats import linregress
m,b,_,_,_ = linregress(np.log10(rpoints), np.log10(xi))
# +
plt.plot(rpoints, (2.22353827e+03)*(rpoints**(-1.88359)))
#plt.plot(rpoints, b2*(rpoints**m2))
plt.scatter(rpoints, xi)
plt.loglog();
# +
plt.plot(np.log10(rpoints), b+(np.log10(rpoints)*m))
#plt.plot(np.log10(rpoints), b2+(np.log10(rpoints)*m2))
#plt.plot(np.log10(rpoints), 90+(np.log10(rpoints)*(-2)))
plt.scatter(np.log10(rpoints), np.log10(xi) )
#plt.loglog();
# -
print m,b
# +
rpoints_dense = np.logspace(-0.5, 2, 500)
plt.scatter(rpoints, xi)
plt.plot(rpoints_dense, np.power(10, gp.predict(np.log10(xi), np.log10(rpoints_dense))[0]))
plt.loglog();
# + active=""
# bias = cat.calc_bias(r_bins)
# print np.sqrt(bias)
# + active=""
# # plt.plot(rpoints, bias)
# plt.xscale('log')
# plt.ylabel(r'$b^2$')
# plt.xlabel('r [Mpc]')
# plt.title('Bias, "Updated" Values')
# plt.ylim([0,8]);
# -
# This plot looks bad on large scales. I will need to implement a linear bias model for larger scales; however I believe this is not the cause of this issue. The overly large correlation function at large scales if anything should increase w(theta).
#
# This plot shows the regimes of concern. The black lines show the value of r for u=0 in the below integral for each theta bin. The red lines show the maximum value of r for the integral I'm performing.
# Perform the below integral in each theta bin:
#
# $$ w(\theta) = W \int_0^\infty du \xi \left(r = \sqrt{u^2 + \bar{x}^2(z)\theta^2} \right) $$
#
# Where $\bar{x}$ is the median comoving distance to z.
print zbin
#a subset of the data from above. I've verified it's correct, but we can look again.
zbin = 1
wt_redmagic = np.loadtxt('/u/ki/swmclau2/Git/pearce/bin/mcmc/buzzard2_wt_%d%d.npy'%(zbin,zbin))
# The below plot shows the problem. There appears to be a constant multiplicative offset between the redmagic calculation and the one we just performed. The plot below it shows their ratio. It is near-constant, but there is some small radial trend. Whether or not it is significant is tough to say.
from scipy.special import gamma
def wt_analytic(m,b,t,x):
return W*b*np.sqrt(np.pi)*(t*x)**(1 + m)*(gamma(-(1./2) - m/2.)/(2*gamma(-(m/2.))) )
theta_bins_rm = np.logspace(np.log10(2.5), np.log10(250), 21)/60 #binning used in buzzard mocks
tpoints_rm = (theta_bins_rm[1:]+theta_bins_rm[:-1])/2
# +
plt.plot(tpoints, wt, label = 'My Calculation')
plt.plot(tpoints_rm, wt_redmagic, label = 'Buzzard Mock')
#plt.plot(tpoints_rm, W.to("1/Mpc").value*mathematica_calc, label = 'Mathematica Calc')
#plt.plot(tpoints, wt_analytic(m,10**b, np.radians(tpoints), x),label = 'Mathematica Calc' )
plt.ylabel(r'$w(\theta)$')
plt.xlabel(r'$\theta \mathrm{[degrees]}$')
plt.loglog();
plt.legend(loc='best')
# + active=""
# xi = cat.calc_xi(r_bins, do_jackknife=False)
# xi_mm = cat._xi_mm
# bias2 = np.mean(xi[-3:]/xi_mm[-3:]) #estimate the large scale bias from the box
# -
print bias2
plt.plot(rpoints, xi/xi_mm)
plt.plot(rpoints, cat.calc_bias(r_bins))
plt.plot(rpoints, bias2*np.ones_like(rpoints))
plt.xscale('log')
plt.plot(rpoints, xi, label = 'Galaxy')
plt.plot(rpoints, xi_mm, label = 'Matter')
plt.loglog()
plt.legend(loc ='best')
from astropy import units
from scipy.interpolate import interp1d
cat.cosmology
import pyccl as ccl
ob = 0.047
om = cat.cosmology.Om0
oc = om - ob
sigma_8 = 0.82
h = cat.h
ns = 0.96
cosmo = ccl.Cosmology(Omega_c =oc, Omega_b=ob, h=h, n_s=ns, sigma8=sigma_8 )
big_rbins = np.logspace(1, 2.1, 21)
big_rbc = (big_rbins[1:] + big_rbins[:-1])/2.0
xi_mm2 = ccl.correlation_3d(cosmo, cat.a, big_rbc)
plt.plot(rpoints, xi)
plt.plot(big_rbc, xi_mm2)
plt.vlines(30, 1e-3, 1e2)
plt.loglog()
plt.plot(np.logspace(0,1.5, 20), xi_interp(np.log10(np.logspace(0,1.5,20))))
plt.plot(np.logspace(1.2,2.0, 20), xi_mm_interp(np.log10(np.logspace(1.2,2.0,20))))
plt.vlines(30, -3, 2)
#plt.loglog()
plt.xscale('log')
# +
xi_interp = interp1d(np.log10(rpoints), np.log10(xi))
xi_mm_interp = interp1d(np.log10(big_rbc), np.log10(xi_mm2))
print xi_interp(np.log10(30))/xi_mm_interp(np.log10(30))
# +
#xi = cat.calc_xi(r_bins)
xi_interp = interp1d(np.log10(rpoints), np.log10(xi))
xi_mm_interp = interp1d(np.log10(big_rbc), np.log10(xi_mm2))
#xi_mm = cat._xi_mm#self.calc_xi_mm(r_bins,n_cores='all')
#if precomputed, will just load the cache
bias2 = np.mean(xi[-3:]/xi_mm[-3:]) #estimate the large scale bias from the box
#print bias2
#note i don't use the bias builtin cuz i've already computed xi_gg.
#Assume xi_mm doesn't go below 0; will fail catastrophically if it does. but if it does we can't hack around it.
#idx = -3
#m,b,_,_,_ =linregress(np.log10(rpoints), np.log10(xi))
#large_scale_model = lambda r: bias2*(10**b)*(r**m) #should i use np.power?
large_scale_model = lambda r: (10**b)*(r**m) #should i use np.power?
tpoints = (theta_bins[1:] + theta_bins[:-1])/2.0
wt_large = np.zeros_like(tpoints)
wt_small = np.zeros_like(tpoints)
x = cat.cosmology.comoving_distance(cat.z)*cat.a/cat.h
assert tpoints[0]*x.to("Mpc").value/cat.h >= r_bins[0]
#ubins = np.linspace(10**-6, 10**4.0, 1001)
ubins = np.logspace(-6, 3.0, 1001)
ubc = (ubins[1:]+ubins[:-1])/2.0
def integrate_xi(bin_no):#, w_theta, bin_no, ubc, ubins)
int_xi = 0
t_med = np.radians(tpoints[bin_no])
for ubin_no, _u in enumerate(ubc):
_du = ubins[ubin_no+1]-ubins[ubin_no]
u = _u*units.Mpc*cat.a/cat.h
du = _du*units.Mpc*cat.a/cat.h
r = np.sqrt((u**2+(x*t_med)**2))#*cat.h#not sure about the h
#if r > (units.Mpc)*cat.Lbox/10:
try:
int_xi+=du*bias2*(np.power(10, \
xi_mm_interp(np.log10(r.value))))
except ValueError:
int_xi+=du*0
#else:
#int_xi+=du*0#(np.power(10, \
#xi_interp(np.log10(r.value))))
wt_large[bin_no] = int_xi.to("Mpc").value/cat.h
def integrate_xi_small(bin_no):#, w_theta, bin_no, ubc, ubins)
int_xi = 0
t_med = np.radians(tpoints[bin_no])
for ubin_no, _u in enumerate(ubc):
_du = ubins[ubin_no+1]-ubins[ubin_no]
u = _u*units.Mpc*cat.a/cat.h
du = _du*units.Mpc*cat.a/cat.h
r = np.sqrt((u**2+(x*t_med)**2))#*cat.h#not sure about the h
#if r > (units.Mpc)*cat.Lbox/10:
#int_xi+=du*large_scale_model(r.value)
#else:
try:
int_xi+=du*(np.power(10, \
xi_interp(np.log10(r.value))))
except ValueError:
try:
int_xi+=du*bias2*(np.power(10, \
xi_mm_interp(np.log10(r.value))))
except ValueError:
int_xi+=0*du
wt_small[bin_no] = int_xi.to("Mpc").value/cat.h
#Currently this doesn't work cuz you can't pickle the integrate_xi function.
#I'll just ignore for now. This is why i'm making an emulator anyway
#p = Pool(n_cores)
map(integrate_xi, range(tpoints.shape[0]));
map(integrate_xi_small, range(tpoints.shape[0]));
# -
#wt_large[wt_large<1e-10] = 0
wt_small[wt_small<1e-10] = 0
wt_large
# +
plt.plot(tpoints, wt, label = 'My Calculation')
plt.plot(tpoints_rm, wt_redmagic, label = 'Buzzard Mock')
#plt.plot(tpoints, W*wt_large, label = 'LS')
plt.plot(tpoints, W*wt_small, label = "My Calculation")
#plt.plot(tpoints, wt+W*wt_large, label = "both")
#plt.plot(tpoints_rm, W.to("1/Mpc").value*mathematica_calc, label = 'Mathematica Calc')
#plt.plot(tpoints, wt_analytic(m,10**b, np.radians(tpoints), x),label = 'Mathematica Calc' )
plt.ylabel(r'$w(\theta)$')
plt.xlabel(r'$\theta \mathrm{[degrees]}$')
plt.loglog();
plt.legend(loc='best')
# -
wt/wt_redmagic
wt_redmagic/(W.to("1/Mpc").value*mathematica_calc)
import cPickle as pickle
with open('/u/ki/jderose/ki23/bigbrother-addgals/bbout/buzzard-flock/buzzard-0/buzzard0_lb1050_xigg_ministry.pkl') as f:
xi_rm = pickle.load(f)
xi_rm.metrics[0].xi.shape
xi_rm.metrics[0].mbins
xi_rm.metrics[0].cbins
# +
#plt.plot(np.log10(rpoints), b2+(np.log10(rpoints)*m2))
#plt.plot(np.log10(rpoints), 90+(np.log10(rpoints)*(-2)))
plt.scatter(rpoints, xi)
for i in xrange(3):
for j in xrange(3):
plt.plot(xi_rm.metrics[0].rbins[:-1], xi_rm.metrics[0].xi[:,i,j,0])
plt.loglog();
# -
plt.subplot(211)
plt.plot(tpoints_rm, wt_redmagic/wt)
plt.xscale('log')
#plt.ylim([0,10])
plt.subplot(212)
plt.plot(tpoints_rm, wt_redmagic/wt)
plt.xscale('log')
plt.ylim([2.0,4])
xi_rm.metrics[0].xi.shape
xi_rm.metrics[0].rbins #Mpc/h
# The below cell calculates the integrals jointly instead of separately. It doesn't change the results significantly, but is quite slow. I've disabled it for that reason.
# +
x = cat.cosmology.comoving_distance(z)*a
#ubins = np.linspace(10**-6, 10**2.0, 1001)
ubins = np.logspace(-6, 2.0, 51)
ubc = (ubins[1:]+ubins[:-1])/2.0
#NLL
def liklihood(params, wt_redmagic,x, tpoints):
#print _params
#prior = np.array([ PRIORS[pname][0] < v < PRIORS[pname][1] for v,pname in zip(_params, param_names)])
#print param_names
#print prior
#if not np.all(prior):
# return 1e9
#params = {p:v for p,v in zip(param_names, _params)}
#cat.populate(params)
#nd_cat = cat.calc_analytic_nd(parmas)
#wt = np.zeros_like(tpoints_rm[:-5])
#xi = cat.calc_xi(r_bins, do_jackknife=False)
#m,b,_,_,_ = linregress(np.log10(rpoints), np.log10(xi))
#if np.any(xi < 0):
# return 1e9
#kernel = ExpSquaredKernel(0.05)
#gp = george.GP(kernel)
#gp.compute(np.log10(rpoints))
#for bin_no, t_med in enumerate(np.radians(tpoints_rm[:-5])):
# int_xi = 0
# for ubin_no, _u in enumerate(ubc):
# _du = ubins[ubin_no+1]-ubins[ubin_no]
# u = _u*unit.Mpc*a
# du = _du*unit.Mpc*a
#print np.sqrt(u**2+(x*t_med)**2)
# r = np.sqrt((u**2+(x*t_med)**2))#*cat.h#not sure about the h
#if r > unit.Mpc*10**1.7: #ignore large scales. In the full implementation this will be a transition to a bias model.
# int_xi+=du*0
#else:
# the GP predicts in log, so i predict in log and re-exponate
# int_xi+=du*(np.power(10, \
# gp.predict(np.log10(xi), np.log10(r.value), mean_only=True)[0]))
# int_xi+=du*(10**b)*(r.to("Mpc").value**m)
#print (((int_xi*W))/wt_redmagic[0]).to("m/m")
#break
# wt[bin_no] = int_xi*W.to("1/Mpc")
wt = wt_analytic(params[0],params[1], tpoints, x.to("Mpc").value)
chi2 = np.sum(((wt - wt_redmagic[:-5])**2)/(1e-3*wt_redmagic[:-5]) )
#chi2=0
#print nd_cat
#print wt
#chi2+= ((nd_cat-nd_mock.value)**2)/(1e-6)
#mf = cat.calc_mf()
#HOD = cat.calc_hod()
#mass_bin_range = (9,16)
#mass_bin_size = 0.01
#mass_bins = np.logspace(mass_bin_range[0], mass_bin_range[1], int( (mass_bin_range[1]-mass_bin_range[0])/mass_bin_size )+1 )
#mean_host_mass = np.sum([mass_bin_size*mf[i]*HOD[i]*(mass_bins[i]+mass_bins[i+1])/2 for i in xrange(len(mass_bins)-1)])/\
# np.sum([mass_bin_size*mf[i]*HOD[i] for i in xrange(len(mass_bins)-1)])
#chi2+=((13.35-np.log10(mean_host_mass))**2)/(0.2)
print chi2
return chi2 #nll
# -
print nd_mock
print wt_redmagic[:-5]
import scipy.optimize as op
# + active=""
# args = ([p for p in params],wt_redmagic, nd_mock)
# PRIORS = {'f_c': (0, 0.45),
# 'alpha': (0.6, 1.4),
# 'logMmin':(10.9,13.6),
# 'logM1': (13., 14.1),
# 'logM0': (9,16),
# 'sigma_logM': (0.01, 0.5)}
# + active=""
# results = op.minimize(liklihood, np.array([v for v in params.values()]) ,args,method = 'L-BFGS-B', bounds = [PRIORS[p] for p in params])
# -
results = op.minimize(liklihood, np.array([-2.2, 10**1.7]),(wt_redmagic,x, tpoints_rm[:-5]))
results
# +
#plt.plot(tpoints_rm, wt, label = 'My Calculation')
plt.plot(tpoints_rm, wt_redmagic, label = 'Buzzard Mock')
plt.plot(tpoints_rm, wt_analytic(-1.88359, 2.22353827e+03,tpoints_rm, x.to("Mpc").value), label = 'Mathematica Calc')
plt.ylabel(r'$w(\theta)$')
plt.xlabel(r'$\theta \mathrm{[degrees]}$')
plt.loglog();
plt.legend(loc='best')
# -
plt.plot(np.log10(rpoints), np.log10(2.22353827e+03)+(np.log10(rpoints)*(-1.88)))
plt.scatter(np.log10(rpoints), np.log10(xi) )
np.array([v for v in params.values()])
# + active=""
# #Try integrating over z and u jointly, explicitly
# nz_zspec = hdulist[8]
# #N = 0#np.zeros((5,))
# N_total = np.sum([row[2+zbin] for row in nz_zspec.data])
# dNdzs = []
# zs = []
# W = 0
# wt2 = np.zeros_like(tpoints_rm)
# ubins = np.linspace(10**-6, 10**2.0, 1001)
# for bin_no, t_med in enumerate(np.radians(tpoints_rm)):
# print bin_no
# int_xi = 0
# for row in nz_zspec.data:
#
# N = row[2+zbin]
#
# dN = N*1.0/N_total
#
# dz = row[2] - row[0]
# dNdz = dN/dz
#
# H = cat.cosmology.H(row[1])
#
# x = cat.cosmology.comoving_distance(row[1])
# for ubin_no, _u in enumerate(ubins[:-1]):
# _du = ubins[ubin_no+1]-ubins[ubin_no]
# u = _u*unit.Mpc
# du = _du*unit.Mpc
# r = a*np.sqrt((u**2+(x*t_med)**2).value)#*cat.h#not sure about the h
# #print r
# if r <= 10**1.7:
# int_xi+=du*(np.power(10, \
# gp.predict(np.log10(xi), np.log10(r), mean_only=True)[0]))*dNdz*dN*H*2.0/const.c
#
# wt2[bin_no] = int_xi
#
# + active=""
# plt.plot(tpoints_rm, wt2, label = 'My Calculation')
# plt.plot(tpoints_rm, wt_redmagic, label = 'Buzzard Mock')
# plt.ylabel(r'$w(\theta)$')
# plt.xlabel(r'$\theta \mathrm{[degrees]}$')
# plt.loglog();
# plt.legend(loc='best')
# + active=""
# wt_redmagic/wt2
# -
| notebooks/Pearce wt integral tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Write your FIRST NAME in the quotation marks below in ALL CAPITALS
student_name="CANERK" #example: student_name = "JOHN"
#Now, click on the "run" button.
#DO NOT ALTER THE CODE BELOW.
import urllib.request, urllib.parse, urllib.error
import random
import numpy as np
import networkx as nx
random.seed(student_name)
name_url = 'https://raw.githubusercontent.com/arineng/arincli/master/lib/male-first-names.txt'
binary_file = urllib.request.urlopen(name_url).read()
# fhand = open('first_names.txt','wb+') #open in wb+ (writing+reading) binary mode
# fhand.write(binary_file)
file = binary_file.decode() #type string
name_list = file.split()
NODE_NUMBER = 10
EDGE_NUMBER = 20
node_list = random.sample(name_list,NODE_NUMBER-1)
node_list.append(student_name)
edge_list = []
for i in range(EDGE_NUMBER):
node_choice=random.sample(range(0,NODE_NUMBER),2)
edge_list.append((node_list[node_choice[0]],node_list[node_choice[1]]))
G=nx.Graph()
G.add_nodes_from(node_list)
G.add_edges_from(edge_list)
#nx.draw(G, with_labels=True, node_size=1000, alpha=1, edge_color='r', node_color='w', font_size=11, font_color='k', node_shape='s')
adj_list = nx.generate_adjlist(G)
for line in adj_list:
words_in_line=line.split()
if len(words_in_line)==2:
print(words_in_line[0]+ ' is friends with ', words_in_line[1]+".")
elif len(words_in_line)>=3:
print(words_in_line[0]+ ' is friends with ', end='')
for i in range(1,len(words_in_line)):
if i !=len(words_in_line)-1:
print(words_in_line[i]+ ", ", end='')
else:
print("and "+ words_in_line[i]+".")
| random_social_network_creator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="rgKwnBYn_fk0"
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from matplotlib import style
style.use("ggplot")
# + id="Md1AbPFLAC35"
df = pd.read_csv("TSLA.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="5rp5KKjYALsy" outputId="b430a3dc-75a8-42ea-a877-6f8cae3ab097"
df.head()
# + id="7vEa7Bi0ANJ6"
X = df[["High","Open","Low","Volume"]]
Y = df["Close"]
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="3i_rW83eAXy9" outputId="35e82c79-3d88-49d0-87a9-d308b26c1c76"
X.head()
# + id="N9fSR75EAY3K"
X_train , X_test , Y_train , Y_test = train_test_split(X,Y,test_size=0.2,random_state=2014)
# + colab={"base_uri": "https://localhost:8080/"} id="vXNojb5EAklR" outputId="73c558aa-6150-4338-f3b7-6b67974f59df"
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
# + id="jJURKFC9Asqt"
Y_pred = regressor.predict(X_test)
result = pd.DataFrame({"Actual":Y_test,"Predicted":Y_pred})
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="hchwnetpA8aV" outputId="6b1f0a45-2417-4a1c-8ea5-ef569a099db8"
result.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="3WfB3xlxBCmZ" outputId="b8c73c21-f7ce-41b3-adea-efbfc9b48d08"
import math
graph = result.head(30)
graph.plot(kind="bar")
# + colab={"base_uri": "https://localhost:8080/"} id="Bm81urfmBQO5" outputId="990d3cc0-0409-4375-b6c3-6ed87fe1af31"
#No of trading days
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 517} id="uWktaAMJBjLU" outputId="b36c39cb-4c79-492a-b852-be85fecb43b2"
#Visualize the Close Price
import matplotlib.pyplot as plt
plt.figure(figsize=(16,8))
plt.title("Tesla Stocks")
plt.xlabel("Days")
plt.ylabel("Close Price in $")
plt.plot(df['Close'])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="xOesZehgB68S" outputId="8c3c423c-c55b-4246-cf9d-60e0b5c732e8"
future_days = 25
df = df[["Close"]]
df["Prediction"] = df[["Close"]].shift(-future_days)
df.tail()
# + id="fRj70oDPCbZy"
X = np.array(df.drop(["Prediction"],1))[:-future_days]
# + colab={"base_uri": "https://localhost:8080/"} id="YA9YNnKwCyFX" outputId="ccd98a45-2580-401e-b218-91ae5c2d0596"
X
# + id="sBhT99tjCylA"
Y = np.array(df["Prediction"])[:-future_days]
# + colab={"base_uri": "https://localhost:8080/"} id="7px-53R8DPkQ" outputId="6e4484bb-ee43-4b1c-c0f7-a596327eb5f2"
Y
# + id="D5jVFYekDQIq"
X_train , X_test , Y_train , Y_test = train_test_split(X,Y,test_size=0.2)
# + id="hGz2TvgJDcL4"
X_future = df.drop(["Prediction"],1)[:-future_days]
X_future = X_future.tail(future_days)
X_future = np.array(X_future)
# + colab={"base_uri": "https://localhost:8080/"} id="U8U6zgxJDtgH" outputId="9dcb659d-7874-4e55-8370-1853cec9bb57"
X_future
# + id="H3o2GRqGDt_F"
lr = LinearRegression().fit(X_train,Y_train)
lr_pred = lr.predict(X_future)
# + colab={"base_uri": "https://localhost:8080/", "height": 618} id="jikKUvh_EEMC" outputId="1210ee06-7c91-436e-896c-7bebb44df697"
valid = df[X.shape[0]:]
valid["Prediction"] = lr_pred
plt.figure(figsize=(16,8))
plt.title("Tesla Stocks")
plt.xlabel("Days")
plt.ylabel("Close Price in $")
plt.plot(df['Close'])
plt.plot(valid[['Close','Prediction']])
plt.legend(["Orig","Val","Pred"])
plt.show()
# + id="k8hM2965ErNv"
| WeekLong/Financial Hack/TeslaStockAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NGL view - Vector representation
#
# In this example a vector representation is added to a NGLviewer object.
#
# Our example needs `Numpy`, `NGLview` and our own `ngl_vector_rep` packages.
# + code_folding=[0]
# Import packages
import numpy as np
import nglview as nv
from ngl_vector_rep import add_vector_rep
# -
# ### Demo tube
# Let's generate the positions and directions of transition dipole moments on a tube
# + code_folding=[0]
# Demo tube creation
def create_tube(N1, N2, R, h, gamma, alpha, beta, mu):
pos = np.zeros((N1 * N2, 3))
dip = np.zeros((N1 * N2, 3))
theta = 2 * np.pi / N2
idx = 0
for n1 in range(N1):
for n2 in range(N2):
pos[idx, :] = [
R * np.cos(n2 * theta + n1 * gamma),
R * np.sin(n2 * theta + n1 * gamma),
n1 * h,
]
dip[idx, :] = [
-mu * np.sin(beta) * np.sin(n2 * theta + n1 * gamma - alpha),
mu * np.sin(beta) * np.cos(n2 * theta + n1 * gamma - alpha),
mu * np.cos(beta),
]
idx += 1
return pos, dip
# -
# ### Let's generate our tube
# We create a demo array of vectors. Then we setup an empty `NGLview` instance using the `NGLWidget()` function. You could also add a vector rep to, for example, an `nv.show_mdanalysis()` view. At last we add our vector representation.
# + code_folding=[0]
# Generate tube and add vector representation in NGLview
r, e = create_tube(100, 10, 100, 10, 0.25, np.pi / 4, np.pi / 4, 10)
r = r - np.mean(r, axis=0)
view = nv.NGLWidget()
add_vector_rep(view, r, e)
view
| Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# %env CUDA_VISIBLE_DEVICES=1
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.tensorflow.data import _parse_function
# +
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
# %matplotlib inline
from IPython import display
import pandas as pd
# the nightly build of tensorflow_probability is required as of the time of writing this
import tensorflow_probability as tfp
ds = tfp.distributions
# -
print(tf.__version__, tfp.__version__)
TRAIN_SIZE=101726
BATCH_SIZE=64
TEST_SIZE=10000
DIMS = (128, 128, 1)
N_TRAIN_BATCHES =int((TRAIN_SIZE-TEST_SIZE)/BATCH_SIZE)
N_TEST_BATCHES = int(TEST_SIZE/BATCH_SIZE)
TRAIN_BUF = 1000
TEST_BUF = 1000
network_type = 'AE'
DATASET_ID = 'european_starling_gentner_segmented'
record_loc = DATA_DIR / 'tfrecords' / "starling.tfrecords"
# read the dataset
raw_dataset = tf.data.TFRecordDataset([record_loc.as_posix()])
data_types = {
"spectrogram": tf.uint8,
"index": tf.int64,
"indv": tf.string,
}
# parse each data type to the raw dataset
dataset = raw_dataset.map(lambda x: _parse_function(x, data_types=data_types))
spec, index, indv = next(iter(dataset))
plt.matshow(spec.numpy().reshape(128,128))
test_dataset = dataset.take(TEST_SIZE).shuffle(TRAIN_BUF).batch(BATCH_SIZE)
train_dataset = dataset.skip(TEST_SIZE).take(TRAIN_SIZE-TEST_SIZE).shuffle(TEST_BUF).batch(BATCH_SIZE)
# +
N_Z = 128
encoder = [
tf.keras.layers.InputLayer(input_shape=DIMS),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Conv2D(
filters=128, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Conv2D(
filters=256, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Conv2D(
filters=256, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=N_Z),
]
decoder = [
tf.keras.layers.Dense(units=4 * 4 * 256, activation="relu"),
tf.keras.layers.Reshape(target_shape=(4, 4, 256)),
tf.keras.layers.Conv2DTranspose(
filters=256, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=256, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=128, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME", activation="tanh"
),
]
# -
from avgn.tensorflow.AE import AE, plot_reconstruction
# the optimizer for the model
optimizer = tf.keras.optimizers.Adam(1e-3)
# train the model
model = AE(
enc = encoder,
dec = decoder,
optimizer = optimizer,
)
# exampled data for plotting results
example_data = next(iter(test_dataset))
example_data = (
tf.cast(tf.reshape(example_data[0], [BATCH_SIZE] + list(DIMS)), tf.float32)
/ 255
)
plot_reconstruction(model, example_data, N_Z)
# a pandas dataframe to save the loss information to
losses = pd.DataFrame(columns = ['recon_loss'])
# +
#N_TRAIN_BATCHES = 50
#N_TEST_BATCHES = 50
# -
n_epochs = 500
for epoch in range(n_epochs):
# train
for batch, train_x in tqdm(
zip(range(N_TRAIN_BATCHES), train_dataset), total=N_TRAIN_BATCHES
):
x = tf.cast(tf.reshape(train_x[0], [BATCH_SIZE] + list(DIMS)), tf.float32) / 255
model.train_net(x)
# test on holdout
loss = []
for batch, test_x in tqdm(
zip(range(N_TEST_BATCHES), test_dataset), total=N_TEST_BATCHES
):
x = tf.cast(tf.reshape(test_x[0], [BATCH_SIZE] + list(DIMS)), tf.float32) / 255
loss.append(model.compute_loss(x))
losses.loc[len(losses)] = np.mean(loss, axis=0)
# plot results
display.clear_output()
print(
"Epoch: {} | recon_loss: {} | latent_loss: {}".format(
epoch, losses.recon_loss.values[-1], losses.latent_loss.values[-1]
)
)
plot_reconstruction(model, example_data)
plt.plot(losses.recon_loss.values)
plt.show()
save_loc = DATA_DIR / 'models' / network_type / DATASET_ID
ensure_dir(save_loc)
# +
# Save the entire model to a HDF5 file.
# The '.h5' extension indicates that the model shuold be saved to HDF5.
#model.save_weights((save_loc / (str(epoch).zfill(4))).as_posix())
# Recreate the exact same model, including its weights and the optimizer
#new_model = tf.keras.models.load_model('my_model.h5')
# -
model.load_weights('/local/home/tsainbur/github_repos/avgn_paper/data/models/AE/european_starling_gentner_segmented/0347')
z = model.encode(example_data).numpy()
xmax, ymax = np.max(z, axis=0)
xmin, ymin = np.min(z, axis=0)
# sample from grid
nx = ny =10
meshgrid = np.meshgrid(np.linspace(xmin, xmax, nx), np.linspace(ymin, ymax, ny))
meshgrid = np.array(meshgrid).reshape(2, nx*ny).T
x_grid = model.decode(meshgrid)
x_grid = x_grid.numpy().reshape(nx, ny, DIMS[0], DIMS[1], DIMS[2])
# fill canvas
canvas = np.zeros((nx*DIMS[0], ny*DIMS[1]))
for xi in range(nx):
for yi in range(ny):
canvas[xi*DIMS[0]:xi*DIMS[0]+DIMS[0], yi*DIMS[1]:yi*DIMS[1]+DIMS[1]] = x_grid[xi, yi,:,:,:].squeeze()
fig, ax = plt.subplots(figsize=(10,10))
ax.matshow(canvas, cmap=plt.cm.Greys, origin = 'lower')
ax.axis('off')
recon = model.decode(model.encode(example_data))
np.max(np.squeeze(example_data.numpy()[0]))
np.max(np.squeeze(recon.numpy()[0]))
plt.matshow(np.squeeze(recon.numpy()[0]))
plot_reconstruction(model, example_data)
np.max(recon.numpy().flatten())
import tensorflow_probability as tfp
x = tf.random.normal(shape=(100, 2, 3))
y = tf.random.normal(shape=(100, 2, 3))
plot_reconstruction(model, example_data, N_Z)
tf.metrics.streaming_pearson_correlation
tf.contrib.metrics.streaming_pearson_correlation(y_pred, y_true)
| notebooks/06.0-neural-networks/starling_figs/.ipynb_checkpoints/Starling-AE-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sentiment Analysis Assessment - Solution
#
# ## Task #1: Perform vector arithmetic on your own words
# Write code that evaluates vector arithmetic on your own set of related words. The goal is to come as close to an expected word as possible. Please feel free to share success stories in the Q&A Forum for this section!
# Import spaCy and load the language library. Remember to use a larger model!
import spacy
nlp = spacy.load('en_core_web_md')
# Choose the words you wish to compare, and obtain their vectors
word1 = nlp.vocab['wolf'].vector
word2 = nlp.vocab['dog'].vector
word3 = nlp.vocab['cat'].vector
# +
# Import spatial and define a cosine_similarity function
from scipy import spatial
cosine_similarity = lambda x, y: 1 - spatial.distance.cosine(x, y)
# -
# Write an expression for vector arithmetic
# For example: new_vector = word1 - word2 + word3
new_vector = word1 - word2 + word3
new_vector.shape
# +
# List the top ten closest vectors in the vocabulary to the result of the expression above
computed_similarities = []
for word in nlp.vocab:
if word.has_vector:
if word.is_lower:
if word.is_alpha:
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
print([w[0].text for w in computed_similarities[:10]])
# -
# #### CHALLENGE: Write a function that takes in 3 strings, performs a-b+c arithmetic, and returns a top-ten result
def vector_math(a,b,c):
new_vector = nlp.vocab[a].vector - nlp.vocab[b].vector + nlp.vocab[c].vector
computed_similarities = []
for word in nlp.vocab:
if word.has_vector:
if word.is_lower:
if word.is_alpha:
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
return [w[0].text for w in computed_similarities[:10]]
# Test the function on known words:
vector_math('king','man','woman')
# ## Task #2: Perform VADER Sentiment Analysis on your own review
# Write code that returns a set of SentimentIntensityAnalyzer polarity scores based on your own written review.
# +
# Import SentimentIntensityAnalyzer and create an sid object
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# -
# Write a review as one continuous string (multiple sentences are ok)
review_neutral = 'This movie portrayed real people, and was based on actual events.'
review_negative = 'This movie was awful, the worst movie ever done !'
# Obtain the sid scores for your review
sid.polarity_scores(review_neutral)
# Obtain the sid scores for your review
sid.polarity_scores(review_negative)
# ### CHALLENGE: Write a function that takes in a review and returns a score of "Positive", "Negative" or "Neutral"
def review_rating(string):
scores = sid.polarity_scores(string)
if scores['compound'] == 0:
return 'Neutral_{:2.4}'.format(scores['compound'])
elif scores['compound'] > 0:
return 'Positive_{:2.4}'.format(scores['compound'])
else:
return 'Negative_{:2.4}'.format(scores['compound'])
# Test the function on your review above:
review_rating(review_neutral)
review_rating(review_negative)
my_text = 'we love you'
review_rating(my_text)
# ### LEt's compare this with [transformers](https://huggingface.co/transformers/task_summary.html#sequence-classification) classification !
# +
from transformers import pipeline
nlp = pipeline("sentiment-analysis")
def print_transformer_sentiment_scores(nlp_pipe, phrase):
result = nlp_pipe(phrase)[0]
print(f"{phrase:<{20}}\nlabel: {result['label']}, with score: {round(result['score'], 4)}")
# -
print_transformer_sentiment_scores(nlp, my_text)
print_transformer_sentiment_scores(nlp, review_neutral)
print_transformer_sentiment_scores(nlp, review_negative)
print_transformer_sentiment_scores(nlp, 'I hate you')
print_transformer_sentiment_scores(nlp, 'I love you')
# ## LEt's use the Transformers to check the movie dataset !
# +
import numpy as np
import pandas as pd
CONST_DATA_FILE = 'data/moviereviews.zip'
df = pd.read_csv(CONST_DATA_FILE, sep='\t', compression='zip', )
df.head()
# -
# REMOVE NaN VALUES AND EMPTY STRINGS:
df.dropna(inplace=True)
df.describe()
print(f"Number of empty reviews : {len(df[df['review'].str.strip()==u''])}")
df.drop(df[df['review'].str.strip()==u''].index, inplace=True)
df.describe()
# +
from transformers import pipeline
nlp = pipeline("sentiment-analysis")
def print_transformer_sentiment_scores(nlp_pipe, phrase):
result = nlp_pipe(phrase)[0]
print(f"{phrase:<{20}}\nlabel: {result['label']}, with score: {round(result['score'], 4)}")
# -
def get_transformer_sentiment_scores(nlp_pipe, phrase):
return nlp_pipe(phrase)[0]
#df.iloc[0]['review']
df.iloc[0]
get_transformer_sentiment_scores(nlp, df.iloc[0]['review'])
df.head()
df['scores'] = None
df['sentiment'] =None
df.at[0,'scores'] = 0
df.head()
s='123456'
s[:3]
# +
for i,lb,review,score,sentiment in df.itertuples(): # iterate over the DataFrame
result = get_transformer_sentiment_scores(nlp,review[:500]) # truncate review to first 500
df.at[i,'scores'] = round(result['score'], 4)
df.at[i,'sentiment'] = result['label']
df.head()
# -
df['comp_score'] = df['sentiment'].apply(lambda c: 'pos' if c =='POSITIVE' else 'neg')
df.head()
# ### Perform a comparison analysis between the original label and comp_score¶
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
accuracy_score(df['label'],df['comp_score'])
# ##### we got an accuracy_score of 0.6357 with nltk VADER so the huggingface transformers is better here
print(classification_report(df['label'],df['comp_score']))
print(confusion_matrix(df['label'],df['comp_score']))
| 008-Sentiment-Analysis-nltk-VADER_vs_transformers-CGIL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
# !{sys.executable} -m pip install mrestimator
import numpy as np
import matplotlib.pyplot as plt
import mrestimator as mre
import sys
import os
sys.path.append('/Users/harangju/Developer/WiltingPriesemann2018/')
os.chdir('/Users/harangju/Downloads/data/')
os.makedirs('./output', exist_ok=True)
auto = mre.full_analysis(
data='/Users/harangju/Downloads/data/full.tsv',
targetdir='/Users/harangju/Downloads/data/output',
title='Full Analysis',
dt=4, dtunit='ms',
tmin=0, tmax=8000,
fitfuncs=['exp', 'exp_offs', 'complex'],
)
plt.show()
auto = mre.full_analysis(
data='/Users/harangju/Developer/wilting/beggs*.tsv',
targetdir='/Users/harangju/Developer/wilting/output',
title='Full Analysis',
dt=4, dtunit='ms',
tmin=0, tmax=8000,
fitfuncs=['exp', 'exp_offs', 'complex'],
)
plt.show()
auto = mre.full_analysis(
data='/Users/harangju/Developer/wilting/beggs4.tsv',
targetdir='/Users/harangju/Developer/wilting/output',
title='Full Analysis',
dt=1, dtunit='ms',
tmin=0, tmax=8000,
fitfuncs=['exp', 'exp_offs', 'complex'],
)
plt.show()
auto = mre.full_analysis(
data='/Users/harangju/Developer/avalanche paper data/mr estimation/activity/activity_i1_j1.txt',
targetdir='/Users/harangju/Developer/avalanche paper data/mr estimation/output',
title='Full Analysis',
dt=1, dtunit='ms',
tmin=0, tmax=100000,
fitfuncs=['exp', 'exp_offs', 'complex'],
)
plt.show()
# ## Example manual analysis
srcsub = mre.input_handler('/Users/harangju/Developer/activity_i5_j2.txt')
print('imported trials from wildcard: ', srcsub.shape[0])
oful = mre.OutputHandler()
oful.add_ts(srcsub)
rk = mre.coefficients(srcsub)
print(rk.coefficients)
print('this guy has the following attributes: ', rk._fields)
m = mre.fit(rk)
m.mre
# ## Full analysis
# m = np.zeros((16,12))
mact = np.zeros((16,12))
for i in range(0,16):
for j in range(0,12):
fname = '/Users/harangju/Developer/avalanche paper data/mr estimation/activity/activity_i' +\
str(i+1) + '_j' + str(j+1) + '.txt'
act = np.loadtxt(fname)
mact[i,j] = max(act)
# srcsub = mre.input_handler(fname)
# rk = mre.coefficients(srcsub, steps=(1,10000))
# me = mre.fit(rk)
# m[i,j] = me.mre
import scipy.io as sio
sio.savemat('/Users/harangju/Developer/mre.mat',{'mre':m,'mact':mact})
| figures/supplement/sfig6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sparkify Project Workspace
# This workspace contains a tiny subset (128MB) of the full dataset available (12GB). Feel free to use this workspace to build your project, or to explore a smaller subset with Spark before deploying your cluster on the cloud. Instructions for setting up your Spark cluster is included in the last lesson of the Extracurricular Spark Course content.
#
# You can follow the steps below to guide your data analysis and model building portion of this project.
# +
# import libraries
import datetime
import seaborn as sns
import time
import matplotlib.pyplot as plt
import pandas as pd
import pyspark.sql.functions as F
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages "com.microsoft.ml.spark:mmlspark_2.11:1.0.0-rc1" --repositories "https://mmlspark.azureedge.net/maven" pyspark-shell'
from lightgbm import LGBMClassifier
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier, LogisticRegression, GBTClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import StandardScaler, OneHotEncoderEstimator, StringIndexer, VectorAssembler # PCA, IDF,
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.sql.functions import col, udf
from pyspark.sql import SparkSession, Window
from pyspark.sql.types import IntegerType, StringType, TimestampType
# -
# create a Spark session
spark = SparkSession.builder \
.master("local") \
.appName("Sparkify") \
.config("spark.jars.packages", "com.microsoft.ml.spark:mmlspark_2.11:1.0.0-rc1") \
.config("spark.jars.repositories", "https://mmlspark.azureedge.net/maven") \
.getOrCreate()
# # Load and Clean Dataset
# In this workspace, the mini-dataset file is `mini_sparkify_event_data.json`. Load and clean the dataset, checking for invalid or missing data - for example, records without userids or sessionids.
events = spark.read.json("mini_sparkify_event_data.json")
# The userId contains values with an empty string. These entries need to be removed.
# events where a userId is an empty string are not valid, remove these
valid_events = events.where(col("userId") != "")
print("Number of total events: {}; number of valid events {}".format(events.count(), valid_events.count()))
print("Number of users: {}".format(valid_events.select("userId").distinct().count()))
# registration and ts can both be converted to timestamps
get_date = udf(lambda x: datetime.datetime.fromtimestamp(x / 1000.0), TimestampType()) # udf to convert to timestamp/date
valid_events = valid_events.withColumn("log_date", get_date(col("ts"))) # date when the log entry was done
# add a column "churn" to the dataframe indicating that a cancellation was confirmed
find_churn = udf(lambda x: 1 if x == "Cancellation Confirmation" else 0, IntegerType())
valid_events = valid_events.withColumn("churn", find_churn(col("page")))
user_length = valid_events.groupBy("userId").agg({"length": "mean"}) \
.withColumnRenamed("avg(length)", "avg_length")
# some values can be set into relation to a certain time period, e.g. the duration the user is active
# hence we need to create a df with all users and their active time period
# first find the first and last log entry for each user and how much log entries exist per user (all actions)
time_df = valid_events.groupBy(["userId"]) \
.agg(F.sum("churn").alias("churned"), F.min("log_date").alias("first_log"),
F.max("log_date").alias("last_log"), F.count("page").alias("log_counts"), F.max("ts").alias("last_ts"))
# +
def get_time_difference(date_1, date_2):
"""TODO"""
# difference between the dates
delta = date_2 - date_1
# minimum difference is one 1
if delta.days == 0:
return 1
else:
return delta.days
# create a udf for this function
get_time_difference_udf = udf(get_time_difference, IntegerType())
time_df = time_df.withColumn("duration", get_time_difference_udf(col("first_log"), col("last_log"))) \
.drop("first_log", "last_log").withColumnRenamed("churned", "label")
# -
# create a dummy dataframe where each action (About, Thumbs Up, ...) from page is a new column with the number
# how often this action appeared in the data for each user
dummy_df = valid_events.select("userId", "page").groupBy("userId").pivot("page") \
.count().drop("Cancel", "Cancellation Confirmation")
# fill null values
dummy_df = dummy_df.na.fill(0)
user_level = valid_events.orderBy("log_date", ascending=False).groupBy("userId").agg(F.first("level").alias('valid_level'))
user_gender = valid_events.select(["userId", "gender"]).distinct()
# calculate the total amount of days the user listened to music
songs_per_date = valid_events.withColumn("date", F.to_date(col("log_date"))).where(col("page") == "NextSong") \
.groupBy(["userId", "date"]).agg(F.lit(1).alias("played_music"))
songs_per_day = songs_per_date.groupBy("userId").agg(F.sum("played_music").alias("music_days"))
# join user_df (time_df, dummy_df) with user_level and gender_level and user_length
df = time_df.join(dummy_df, on="userId").join(user_level, on="userId") \
.join(user_gender, on="userId").join(user_length, on="userId").join(songs_per_day, on="userId")
# divide the actions by the amount of logs or the overall duration of their registration
def divide_columns_by(df, columns, value, appendix):
"""TODO"""
for name in columns:
new_name = name+"_"+appendix
df = df.withColumn(new_name, col(name) / col(value))
return df
cols_to_divide = ['music_days', 'About', 'Add Friend', 'Add to Playlist', 'Downgrade', 'Error', 'Help', 'Home',
'Logout', 'NextSong', 'Roll Advert', 'Save Settings', 'Settings', 'Submit Downgrade',
'Submit Upgrade', 'Thumbs Down', 'Thumbs Up', 'Upgrade']
df_per = divide_columns_by(df, cols_to_divide, "duration", "per_day")
df_model = divide_columns_by(df_per, cols_to_divide, "log_counts", "per_log")
df_model.printSchema()
# ## Selecting Numerical Features
# load feature importance from csv
num_feature_df = pd.read_csv("feature_selection_df.csv").drop("Unnamed: 0", axis=1)
num_feature_df.head()
# +
# creating a list of features which do not contain any duplicates e.g. not both Thumbs Down_per_log and Thumbs Down
important_num_features = list(num_feature_df[num_feature_df["Total"] >= 2]["Feature"])
unique_num_features = []
final_num_features = []
for idx, f in enumerate(important_num_features):
if f.find("_per_day") > -1:
f_stripped = f.replace("_per_day", "")
elif f.find("_per_log") > -1:
f_stripped = f.replace("_per_log", "")
else:
f_stripped = f
# check if feature is already in or not
if f_stripped not in unique_num_features:
unique_num_features.append(f_stripped)
final_num_features.append(f)
print("Final {} numerical features selected by feature importance: \n{}".format(len(final_num_features), final_num_features))
# -
categorical_columns = ["gender", "valid_level"]
columns_for_modeling = ["label"] + categorical_columns + final_num_features
df_model = df_model.select(*columns_for_modeling)
df_model.printSchema()
# # Modeling
# Split the full dataset into train, test, and validation sets. Test out several of the machine learning methods you learned. Evaluate the accuracy of the various models, tuning parameters as necessary. Determine your winning model based on test accuracy and report results on the validation set. Since the churned users are a fairly small subset, I suggest using F1 score as the metric to optimize.
# no userId should be in here!
train = df_model.sampleBy("label", fractions={0: 0.8, 1: 0.5}, seed=42) # randomSplit([0.8, 0.2], seed=42)
test = df_model.subtract(train) # for balancing
# +
# handle categorical columns in pipeline
indexers = []
for cat in categorical_columns:
indexers.append(StringIndexer(inputCol = cat, outputCol = "{}_indexed".format(cat)))
encoder = OneHotEncoderEstimator(inputCols=[indexer.getOutputCol() for indexer in indexers],
outputCols=["{}_encoded".format(indexer.getOutputCol()) for indexer in indexers])
# +
# numerical_cols = [col.name for col in df.schema.fields if col.dataType != StringType()]
# numerical_cols.remove("label")
# numerical_cols
# +
numeric_assembler = VectorAssembler(inputCols=final_num_features, outputCol="numeric_vectorized")
#Lets scale the data
scaler = StandardScaler(inputCol = "numeric_vectorized", outputCol = "numeric_scaled", withStd = True, withMean = True)
#create final VectorAssembler to push data to ML models
assembler = VectorAssembler(inputCols=["numeric_scaled"] + encoder.getOutputCols(), outputCol="features")
# -
# ## Random Forst Model
model_rf = RandomForestClassifier(featuresCol="features", labelCol="label")
pipeline_rf = Pipeline(stages= indexers + [encoder, numeric_assembler, scaler, assembler, model_rf])
# +
paramGrid_rf = ParamGridBuilder() \
.addGrid(model_rf.numTrees, [20,75]) \
.addGrid(model_rf.maxDepth, [10, 20]) \
.build()
crossval_rf = CrossValidator(estimator=pipeline_rf,
estimatorParamMaps=paramGrid_rf,
evaluator=MulticlassClassificationEvaluator(metricName="f1"),
numFolds=3) # here you can set parallelism parameter
# +
#start_time = time.time()
#cvModel_rf = crossval_rf.fit(train)
#end_time = time.time()
#print("Fitting the model took {} s.".format(round(end_time - start_time,2)))
# -
# ## Logistic Regression
# +
# Initiate log regression model
model_lr = LogisticRegression()
# Make pipeline for lr
pipeline_lr = Pipeline(stages= indexers + [encoder, numeric_assembler, scaler, assembler, model_lr])
# Grid Search Params
paramGrid_lr = ParamGridBuilder() \
.addGrid(model_lr.maxIter, [10, 20]) \
.addGrid(model_lr.elasticNetParam, [0, 0.5]) \
.addGrid(model_lr.regParam,[0.1, 1]) \
.build()
crossval_lr = CrossValidator(estimator=pipeline_lr,
estimatorParamMaps=paramGrid_lr,
evaluator=MulticlassClassificationEvaluator(metricName="f1"),
numFolds=3) # here you can set parallelism parameter
# -
# ## GBTC Classifier
# +
# Initiate log regression model
model_gbtc = GBTClassifier()
# Make pipeline for lr
pipeline_gbtc = Pipeline(stages= indexers + [encoder, numeric_assembler, scaler, assembler, model_gbtc])
# Grid Search Params
paramGrid_gbtc = ParamGridBuilder() \
.addGrid(model_gbtc.maxIter, [10, 12]) \
.build()
crossval_gbtc = CrossValidator(estimator=pipeline_gbtc,
estimatorParamMaps=paramGrid_gbtc,
evaluator=MulticlassClassificationEvaluator(metricName="f1"),
numFolds=3) # here you can set parallelism parameter
# -
# ## LightGBM: either use the one from mmlspark or LGBMClassifier
#
# +
# Initiate
# from mmlspark.lightgbm import LightGBMClassifier
#model_lgbm = LGBMClassifier()
# Make pipeline for lr
#pipeline_lgbm = Pipeline(stages= indexers + [encoder, numeric_assembler, scaler, assembler, model_lgbm])
# Grid Search Params
#paramGrid_lgbm = ParamGridBuilder() \
# .addGrid(model_lgbm.learningRate, [0.1, 0.5]) \
# .addGrid(model_lgbm.numLeaves, [31, 76]) \
# .addGrid(model_lgbm.numIterations, [100]) \
# .addGrid(model_lgbm.objective, ["binary"]) \
# .build()
#crossval = CrossValidator(estimator=pipeline_lgbm,
# estimatorParamMaps=paramGrid_lgbm,
# evaluator=MulticlassClassificationEvaluator(metricName="f1"),
# numFolds=3)
# -
def fit_crossval(crossval, train):
"""TODO"""
start_time = time.time() # start
cv_model = crossval.fit(train) # fit
end_time = time.time() # end
print("Fitting the model took {} s.".format(round(end_time - start_time,2)))
return cv_model
def evaluate_model(model, train, test, metric = 'f1'):
"""TODO"""
# init evaluator
evaluator = MulticlassClassificationEvaluator(metricName = metric)
# make predictions
prediction_result_train = model.transform(train)
prediction_result_test = model.transform(test)
# calcualte the scores
score_train = evaluator.evaluate(prediction_result_train)
score_test = evaluator.evaluate(prediction_result_test)
print("{} score on training data is {}".format(metric, score_train))
print("{} score on test data is {}".format(metric, score_test))
# feature importance
#importance = {}
#for i in range(len(model.stages[-1].featureImportances)):
# importance[features_df.columns[i]] = model.stages[-1].featureImportances[i]
#
#sorted_importance = {k: v for k, v in sorted(importance.items(), key=lambda item: item[1])}
#print(sorted_importance)
try:
print(model.stages[-1].featureImportances)
# parameters of the model
print(model.extractParamMap())
except:
print("Could not derive feature importance from model.")
return score_train, score_test
# # Fit Models and Evaluate
# +
# todo: chache() something here?
crossval_dict = {}
crossval_dict["Random-Forst"] = crossval_rf
crossval_dict["Logistic Regression"] = crossval_lr
crossval_dict["GBTC"] = crossval_gbtc
result_dict = {}
if len(crossval_dict) > 1:
# worth caching
train.cache()
test.cache()
for cv_key in crossval_dict:
# fit the model
print("Fitting {} model ...".format(cv_key))
cv_model = fit_crossval(crossval_dict[cv_key], train)
# evaluate it
score_train, score_test = evaluate_model(cv_model.bestModel, train, test)
print(score_train, score_test)
result_dict[cv_key] = [score_train, score_test]
print("-------------Result Summary--------------")
print(result_dict)
# -
| .ipynb_checkpoints/Sparkify_model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Classification using `sklearn.svm`
# +
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib notebook
from sklearn import svm, metrics, datasets
from sklearn.utils import Bunch
from sklearn.model_selection import GridSearchCV, train_test_split
from skimage.io import imread
from skimage.transform import resize
# -
# ### Load images in structured directory like it's sklearn sample dataset
def load_image_files(container_path, dimension=(400, 300)):
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = plt.imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
print(i, direc, img.shape, img_resized.shape)
flat_data.append(img_resized.flatten())
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
return [flat_data, target]
# Bunch(data=flat_data,
# target=target,
# target_names=categories,
# images=images,
# DESCR=descr)
image_dataset = load_image_files("images/")
# ### Split data
# X_train, X_test, y_train, y_test = train_test_split(
# image_dataset[0], image_dataset[1], test_size=0.3,random_state=109)
X_train, y_train = image_dataset[0], image_dataset[1]
# ### Train data with parameter optimization
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
print("Training svm")
svc = svm.SVC()
print("Getting optimal param")
#clf = svm.SVC(gamma='auto')
clf = GridSearchCV(svc, param_grid,cv=3)
print("fitting model")
clf.fit(X_train, y_train)
# ### Predict
# +
import skimage
pred_0 = resize(np.load("squirtle_hold_short.npy")[0,:,:,:], (400,300), anti_aliasing=True, mode='reflect').flatten()
pred_2 = resize(np.load("bulbasaur_hold_short1.npy")[0,:,:,:], (400,300), anti_aliasing=True, mode='reflect').flatten()
# -
X_test = np.array([pred_1, pred_2])
y_pred = clf.predict(X_train)
# y_pred_2 = clf.predict(pred_2)
print(y_pred)
# 1 - charmander
# 0 - squirtle
# 2 - bulbasaur
# ### Report
# +
print("Classification report for - \n{}:\n{}\n".format(
clf, metrics.classification_report(y_train, y_pred)))
print("Confusion matrix")
conf = metrics.confusion_matrix(y_train, y_pred)
acc = np.diag(conf).sum()/conf.sum()
conf, acc
# -
test_dataset = load_image_files("test/")
X_rot_test, y_rot_test = test_dataset[0], test_dataset[1]
print("loaded")
y_rot_pred = clf.predict(X_rot_test)
print(y_rot_pred)
print("Confusion matrix")
conf_rot = metrics.confusion_matrix(y_rot_test, y_rot_pred)
acc_rot = np.diag(conf_rot).sum()/conf_rot.sum()
conf_rot, acc_rot
X_rot_test, y_rot_test = test_dataset[0], test_dataset[1]
print("loaded")
y_rot_pred = clf.predict(X_rot_test)
print("Confusion matrix")
conf_rot = metrics.confusion_matrix(y_rot_test, y_rot_pred)
acc_rot = np.diag(conf_rot).sum()/conf_rot.sum()
conf_rot, acc_rot
| Image Classification using scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this script is the data smoothen after the PC are constructed
# importing tools
import s3fs
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import math
#for removing warnings
import warnings
def open_s3fs(path):
# open the data with s3fs from the databucket
fsg = s3fs.S3FileSystem(anon=False,
client_kwargs={
'endpoint_url': 'https://karen.uiogeo-apps.sigma2.no'
})
data_path = f's3://data/{path}/*'
remote_files = fsg.glob(data_path)
fileset = [fsg.open(file) for file in remote_files]
#open the dataset
dset = xr.open_mfdataset(fileset, combine='by_coords',compat='override')
# rename variabel
dset = dset.rename({'__xarray_dataarray_variable__' : 'vel'})
return dset
dset = open_s3fs('PC-data')
dset
# Smoothen data, over every tenth point in both x and y direction, for all the data
vvar = dset.vomecrty.isel(y=slice(900,1800), x=slice(200,1400))
uvar = dset.vozocrtx.isel(y=slice(900,1800), x=slice(200,1400))
uv = dset.vel.isel(y=slice(900,1800), x=slice(200,1400))
# +
# Getting the bottom velocity
d = dset.mbathy.isel(y=slice(900,1800), x=slice(200,1400)).values-1
d[d==-1] = 0
d = xr.DataArray(d, dims=['y','x'])
uvar_b = uvar.isel(depth=d)
vvar_b = vvar.isel(depth=d)
uv_b = uv.isel(depth=d)
# +
# List of x,y elements
dx = 20
dy = 20
scale = 2e3
jmax, imax = uv_b.shape
i_list = np.arange(0,imax,dx)
j_list = np.arange(0,jmax,dy)
# Making the covmatrix and finding the eigenvalues and eigenvectors
# ----------------------------------------------------------------------
y = []
x = []
#ellipser:
theta_l = []
minor_l = []
major_l = []
indx = 0
# to get the loop to run faster
uvy = np.array(uv_b.y.values)
uvx = np.array(uv_b.x.values)
uvarb = np.array(uvar_b.values)
vvarb = np.array(vvar_b.values)
uvb = np.array(uv_b.values)
for i in i_list:
for j in j_list:
covm = np.array([[uvarb[j,i],uvb[j,i]],
[uvb[j,i],vvarb[j,i]]],dtype=float)
if np.isnan(covm).any():
continue
else:
# Find and sort eigenvalues and eigenvectors into descending order
values, vectors = np.linalg.eig(covm)
order = values.argsort()[::-1]
values, vectors = values[order], vectors[:, order]
# center of ellipse
y.append(uvy[j])
x.append(uvx[i])
# ellipser :
major = np.sqrt(values[0])*scale
minor = np.sqrt(values[1])*scale
major_l.append(major)
minor_l.append(minor)
#major_info.append(values[0])
# The anti-clockwise angle to rotate our ellipse by
vx, vy = vectors[:,0][0], vectors[:,0][1]
theta = np.arctan2(vy, vx)*(180/math.pi)
theta_l.append(theta)
indx = indx + 1
# converting to np.arrays
y = np.array(y,dtype=float)
x = np.array(x,dtype=float)
# +
from matplotlib.patches import Ellipse
fig, ax = plt.subplots(1,1,figsize=(15,15))
n = 2
plt.title(f'PC of the current at bottom')
ax.contour(dset.mbathy[900:1800,200:1400], colors='grey',levels=8, linewidths=0.4)
for i in range(int(len(x)/2)):
ellipse = Ellipse((x[i*2],y[i*2]),
width = major_l[i*2],
height = minor_l[i*2],
angle = theta_l[i*2]*(180/math.pi),
facecolor = 'none',
edgecolor='k')
ax.add_patch(ellipse)
#ax.plot((expr[::n]+xe[::n]),(eypr[::n]+ye[::n]),'r',(exmr[::2]+xe[::2]),(eymr[::n]+ye[::n]),'r')
ax.set_aspect('equal')
plt.show()
# -
# ### Figure of the bottom currents in the Arctic ocean.
#
# +
# open the data with s3fs from the databucket
fsg = s3fs.S3FileSystem(anon=False,
client_kwargs={
'endpoint_url': 'https://karen.uiogeo-apps.sigma2.no'
})
data_path1 = 's3://velocity-u.zarr/'
data_path2 = 's3://velocity-v.zarr/'
remote_files1 = fsg.glob(data_path1)
remote_files2 = fsg.glob(data_path2)
store1 = s3fs.S3Map(root=data_path1, s3=fsg, check=False)
store2 = s3fs.S3Map(root=data_path2, s3=fsg, check=False)
du = xr.open_zarr(store=store1, consolidated=True)
du = du.rename({'depthu' : 'depth'})
dv = xr.open_zarr(store=store2, consolidated=True)
dv = dv.rename({'depthv' : 'depth'})
# -
u = du.vozocrtx.isel(y=slice(900,1800), x=slice(200,1400))
v = dv.vomecrty.isel(y=slice(900,1800), x=slice(200,1400))
vel = np.sqrt(u**2 + v**2)
vel.shape
# +
# List of x,y elements
dx = 20
dy = 20
scale = 2e3
jmax, imax = uv_b.shape
i_list = np.arange(0,imax,dx)
j_list = np.arange(0,jmax,dy)
# Making the covmatrix and finding the eigenvalues and eigenvectors
# ----------------------------------------------------------------------
#ellipser:
theta_l = np.zeros((900,1200,73))
minor_l = np.zeros((900,1200,73,2))
major_l = np.zeros((900,1200,73,2))
indx = 0
# to get the loop to run faster
uvy = np.array(uv_b.y.values)
uvx = np.array(uv_b.x.values)
uvarb = np.array(uvar_b.values)
vvarb = np.array(vvar_b.values)
uvb = np.array(uv_b.values)
for i in i_list:
for j in j_list:
covm = np.cov(dyp.),bias=True)
if np.isnan(covm).any():
continue
else:
# Find and sort eigenvalues and eigenvectors into descending order
values, vectors = np.linalg.eig(covm)
order = values.argsort()[::-1]
values, vectors = values[order], vectors[:, order]
# center of ellipse
y = uvy[j]
x = uvx[i]
major_l[y,x,:] = vectors[:,0]
minor_l[y,x,:] = vectors[:,1]
# The anti-clockwise angle to rotate our ellipse by
vx, vy = vectors[:,0][0], vectors[:,0][1]
theta = np.arctan2(vy, vx)*(180/math.pi)
theta_l[y,x] = theta
indx = indx + 1
# converting to np.arrays
y = np.array(y,dtype=float)
x = np.array(x,dtype=float)
# -
vec_data = xr.merge([xr.DataArray(major_l, name='major_axis', dims = ['y', 'x', 'eig']), xr.DataArray(minor_l, name='minor_axis', dims = ['y', 'x', 'eig'])])
major_l.shape
# # Attempt
# +
x=820
y=200
major_l[y,x]
minor_l[y,x]
for i in vel[]:
# -
| .ipynb_checkpoints/PCz_coord_bunnellipse-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# # German Sign Classification
# The aim of the project is to classify various road signs into correct categories.
# ### 1. Importing Libraries and Loading Data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import pathlib
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score
from matplotlib import pyplot
from tensorflow.keras.preprocessing import image
from tensorflow import keras
from tensorflow.keras import models
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
# +
train_path = '../Dataset/Train'
data_dir = '../Dataset'
test_path = '../Dataset/Test'
# Number of Classes
NUM_CATEGORIES = len(os.listdir(train_path))
NUM_CATEGORIES
# -
# Showing the first image of each category
img_dir = pathlib.Path(train_path)
plt.figure(figsize=(14,14))
index = 0
for i in range(NUM_CATEGORIES):
plt.subplot(7, 7, i+1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
sign = list(img_dir.glob(f'{i}/*'))[0]
img = load_img(sign)
plt.imshow(img)
plt.show()
# +
# The number of images in each category
datadic = {}
for folder in os.listdir(train_path):
datadic[folder] = len(os.listdir(train_path + '/' + folder))
data_df = pd.Series(datadic)
plt.figure(figsize=(14,7))
data_df.plot(kind = 'bar')
plt.xlabel('Classes')
plt.ylabel('No. of images')
# -
# ### 2. Preparing the data for the model
# Function to load the images and labels into two lists
def load_data(data_dir):
images = list()
labels = list()
for category in range(NUM_CATEGORIES):
categories = os.path.join(data_dir, str(category))
for img in os.listdir(categories):
img = load_img(os.path.join(categories, img), target_size=(30, 30))
image = img_to_array(img)
images.append(image)
labels.append(category)
return images, labels
images, labels = load_data(train_path)
labels = to_categorical(labels)
# Split into training and validation data
x_train, x_val, y_train, y_val = train_test_split(np.array(images), labels, test_size=0.2)
x_train/=255
x_val/=255
x_train
print('x_train shape:',x_train.shape)
print('Number of images in x_train: ',x_train.shape[0])
print('x_val shape:', x_val.shape)
print('Number of images in x_val: ',x_val.shape[0])
# ### 3. Building the model
# +
model = models.Sequential()
# Input layer
model.add(Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=(30,30,3)))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
# 1st hidden layer
model.add(Conv2D(filters=128, kernel_size=3, activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
# 2nd hidden layer
model.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
# Flattening
model.add(Flatten())
model.add(Dense(units=64, activation='relu'))
#Softmax layer for output
model.add(Dense(NUM_CATEGORIES,activation='softmax'))
learn_rate = 0.001
epochs = 30
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# -
# Fit the model on the training and validation data
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=20)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# ### Observations:
# - A Dropout rate of 0.25 in each layer of the model gave a higher accuracy than at 0.3.
# - The val_accuracy peaks in the 19th epoch.
# ## Running the model on Test image
# ### Loading the test image
test_img = load_img(os.path.join(test_path,'00000.png'), target_size=(30, 30))
plt.imshow(test_img)
# ### Category classification of test image
n = np.array(test_img)
img_arr = n.reshape(1, 30, 30, 3)
y_pred = model.predict(img_arr).argmax()
print("The predicted category of the test image is:", y_pred)
# Checking the predicted category in the Training set
# +
category_path = os.path.join(train_path, str(y_pred))
# Display the first 5 images in the category decided by the model in the training set to verify
for i, file in enumerate(os.listdir(category_path)[0:5]):
fullpath = category_path + "/" + file
img=load_img(fullpath)
plt.subplot(2, 3, i+1)
plt.imshow(img)
# -
# ### Conclusions:
# - The model accurately classifies the test image in the correct category as seen above.
# - The model gave an accuracy of 99.16% after 20 epochs.
| GTSRB (German traffic sign recognition benchmark) Dataset/Model/German_traffic_sign_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyHC Newsletter: July 2019
#
# ## Introduction
# On the July 12th PyHC telecon, <NAME> suggested we initiate a newsletter for PyHC, that could be distributed or linked via community mailing lists including CEDAR and AGU-SPA.
# We thought we'd start with a Jupyter Notebook, as a common feature of the PyHC Newsletter could be plots and examples from PyHC packages.
# This will help introduce a wide audience to PyHC packages, as well as motivate the PyHC authors to create and document interesting plots.
#
# ## Package spotlight
# This month's spotlight includes a geomagnetic indices retriever and plotter, that covers a time span from 1930 to 2035 and computes smoothed indices for Ap, Kp and f10.7.
# These indices are distributed via various resources, and this package downloads, caches and presents the indices as
# [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html#dataframe).
#
# The
# [Geomag-Indices package](https://github.com/space-physics/geomagindices) may be installed by:
# ```sh
# pip install geomagindices
# ```
# and used as follows [](https://mybinder.org/v2/gh/space-physics/newsletter/master?filepath=https%3A%2F%2Fgithub.com%2Fspace-physics%2Fnewsletter%2Fblob%2Fmaster%2F2019-07.ipynb)
# +
import geomagindices as gi
date = '2015-01-28 12:30'
inds = gi.get_indices(date)
# -
print(inds)
# [Ranges of dates](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases) may be specified
import pandas
dates = pandas.date_range('2015-01-25 12:30', '2015-03-02', freq='H')
inds = gi.get_indices(dates)
# %matplotlib inline
inds.plot()
# ### Package ecosystem
# GeomagIndices uses the `[options.data_files]` section of
# [setup.cfg](https://github.com/space-physics/geomagindices/blob/master/setup.cfg) that requires
# [setuptools >= 40.5](https://setuptools.readthedocs.io/en/latest/history.html#v40-5-0).
# We use pyproject.toml to specify this requirement.
# This enacts a temporary install of the requested prerequisites, ensuring that a sufficiently new version of setuptools is installed before attempting the final install of geomagindices.
# The setup_requires field of setup.cfg or setup.py is not recommended, including because an internet connection would be required with setup_requires.
#
# In general, it is better from the security, stability and long-term reproducability aspects to specify package metadata as declarative (pyproject.toml, setup.cfg) whenever possible instead of imperative (setup.py).
# In our experience, most packages including in PyHC could switch at least some of their metadata to setup.cfg and pyproject.toml to help meet these goals.
# Alternative non-setuptools build backends such as
# [Poetry](https://poetry.eustace.io/)
# and
# [Flit](https://flit.readthedocs.io)
# discard much of the distutils/setuptools cruft left over from the early era of scientific Python and help avoid the arbitrary install code in setup.py.
# In our experience, setup.py is a frequent source of problems in PyHC packages, especially for cross-platform / OS-agnostic or non-CPython interpreters.
# ## Workshop / Conference updates
# The CEDAR 2019 workshop in Santa Fe, New Mexico was held in June. Over 100 scientists of all career stages including students attended the four software-oriented sessions:
#
# * [Heliophysics Hackathon](https://cedarweb.vsp.ucar.edu/wiki/index.php/2019_Workshop:Hackathon) (4+ hours)
# * [Software Engineering for Heliophyiscs](https://cedarweb.vsp.ucar.edu/wiki/index.php/2019_Workshop:Software_Engineering_for_Heliophysics)
# * [Python for Space Science](https://cedarweb.vsp.ucar.edu/wiki/index.php/2019_Workshop:Python_for_Space_Science)
# * [InGeo Python-Docker Resen](https://cedarweb.vsp.ucar.edu/wiki/index.php/InGeOWorkshop_2019)
#
# Each of these sessions had 30+ people in attendance and included at least some hands-on information that was usable the same day, if not in the session.
# A key question was how to make the person-to-person networking more sustainable between workshops, perhaps the perennial question of any conference.
#
# The GEM workshop ran the week after CEDAR in Santa Fe and at least one session there was Python-oriented.
#
# August 2019 brings the
# [SHINE conference](https://shinecon.org/CurrentMeeting.php)
# and we look forward to hearing the results of <NAME>'s PyHC session.
#
| 2019-07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import json
import glob
import numpy as np
import matplotlib.pyplot as plt
# +
import matplotlib
font = {'family' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
gray = (167/255, 168/255, 170/255, 1)
red = (179/255, 27/255, 27/255, 1)
blue = (0,47/255, 108/255,1)
markersize=20
# -
# # Mean Plots
# ls ../../../jlonevae/trainedModels/*/*/metrics
# combinedOutputs is the path from the root directory to the JL1-VAE models.
# If unzipping pre-trained models you may need to change this path.
# This path works for models trained and evaluated with
# ./experimentScripts/train_jlonevae/train_threeDots.bash
# ./experimentScripts/evaluate_jlonevae/evaluate_threeDots.bash
# which stores models and evaluations in directories like:
# ./trainedModels/defaultConv_lone_beta4_0000_ica0_1000_lat10_batch64_lr0_0001_anneal100000/20210604-014949/representation
combinedOutputs = "trainedModels"
# If unzipping pre-trained models you may need to change this path.
# For example, if you download "trainedThreeDotJL1Models.zip" and "trainedThreeDotJL2Models.zip"
# and unzip them into the directory "trainedThreeDotModelsBatches",
# then you should use filepath:
combinedOutputs = "trainedThreeDotModelsBatches"
# Gather evaluation results
evaluation_result_template = "{}/metrics/{}/results/aggregate/evaluation.json"
experiment_output_path = f"../../{combinedOutputs}"
for metric, metvalname in [("local_mig","evaluation_results.local_discrete_migs_samples"),
("local_modularity","evaluation_results.local_modularity_scores_samples")]:
for latdim in ["10"]:
f = plt.figure(figsize=(10,8))
bind = 1
beta = "4"
experiment_names = [
(f"defaultConv_lone_beta4_0000_ica0_0000_lat10_batch64_lr0_0001_anneal100000", "black", "beta-VAE","-"),
(f"defaultConv_lone_beta4_0000_ica0_0500_lat10_batch64_lr0_0001_anneal100000", red, "JL1-VAE(0.05)","--"),
(f"defaultConv_lone_beta4_0000_ica0_1000_lat10_batch64_lr0_0001_anneal100000", red, "JL1-VAE(0.1)","-."),
(f"defaultConv_lone_beta4_0000_ica0_2000_lat10_batch64_lr0_0001_anneal100000", red, "JL1-VAE(0.2)",(0, (3, 5, 1, 5, 1, 5))),
(f"defaultConv_lone_beta4_0000_ica0_4000_lat10_batch64_lr0_0001_anneal100000", red, "JL1-VAE(0.4)",(0, (3, 5, 1, 5, 1, 5, 1, 5))),
(f"defaultConv_jltwo_beta4_0000_ica0_0500_lat10_batch64_lr0_0001_anneal100000", blue, "JL2-VAE(0.05)",":"),
(f"defaultConv_jltwo_beta4_0000_ica0_1000_lat10_batch64_lr0_0001_anneal100000", blue, "JL2-VAE(0.1)",(0, (1,5))),
(f"defaultConv_jltwo_beta4_0000_ica0_2000_lat10_batch64_lr0_0001_anneal100000", blue, "JL2-VAE(0.2)",(0, (1,1, 1, 5))),
(f"defaultConv_jltwo_beta4_0000_ica0_4000_lat10_batch64_lr0_0001_anneal100000", blue, "JL2-VAE(0.4)",(0, (1,1,1,1,1,5)))
]
data = []
for experiment_name, color, metlabel, linestyle in experiment_names:
evaluation_filepaths = glob.glob(f"{experiment_output_path}/{experiment_name}/*/metrics/{metric}*/results/aggregate/evaluation.json")
for ind, filename in enumerate(evaluation_filepaths):
if "2021060" in filename:
continue
else:
pass#print(filename)
#print(filename,label)
evaluation_results = json.loads(
open(filename, "r").read())
locality = float(evaluation_results["evaluation_config.local_sample_factors.locality_proportion"])
met_samps = evaluation_results[metvalname]
data.append((metlabel, locality, np.mean(met_samps)))
#if bind == 0 and ind == 0 and enind < 2:
# plt.plot(locality, np.mean(met_samps), markerfacecolor=(1, 1, 1, 0), markeredgecolor=color, markeredgewidth=2, marker=marker,label=label, markersize=markersize,linestyle="None" )
#else:
# plt.plot(locality, np.mean(met_samps), markerfacecolor=(1, 1, 1, 0), markeredgecolor=color, markeredgewidth=2, marker=marker, markersize=markersize,linestyle="None" )
import scipy.stats
allLocalities = [0.1, 0.2, 0.3, 0.6, 1.0]
avgMetVals = []
lowMetVals = []
highMetVals = []
for desiredLocality in allLocalities:
metvals = [metval for (label, locality, metval) in data if label == metlabel and locality == desiredLocality]
print(len(metvals), metlabel)
avgMetVals.append(np.mean(metvals))
highMetVals.append(np.mean(metvals) + 2 * np.std(metvals))
lowMetVals.append(np.mean(metvals) - 2 * np.std(metvals))
#print(scipy.stats.ttest_ind(betametvals, lirjmetvals))
plt.plot(allLocalities, avgMetVals, color=color,label=metlabel, linestyle=linestyle)
plt.fill_between(allLocalities, lowMetVals, highMetVals, color=color, alpha=.1)
plt.ylabel(metric.replace("_"," ").replace("mig","MIG"))
plt.xlabel("Locality radius (fraction of factor range)");
plt.xlim(-0.05,1.05)
plt.legend()
plt.tight_layout()
f.savefig(f"L1L2_threeDots_{metric}_varyingRho.png")
plt.show();plt.close();
| experimentScripts/visualizations/AppendixLinePlotMIGandModularityL1VsL2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Final Report
# ## Introduction
# We were tasked with finding the "best" neighborhood in Pittsburgh using data from the WPRDC. Frankly, we browsed the website individually for data sets. After we each found one, they just happened to come together perfectly. We came across many different data sets - from air quality to green areas, but we ultimately decided against those data sets.
# ## The Metric
# We used two data sets: 'public safety' and 'fire incidents'. We combined these into the general metric of "safety". In general, the data sets described the amount and type of crime and fire-related incidents in each neighborhood, respectively.
# ## The Best Neighborhood
# First, we copy and compress all the code from each individual notebook and show the individual graphs for each metric.
# +
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
incidents = pd.read_csv("fireIncidents.csv")
incidents=incidents.dropna(subset=['neighborhood']).reset_index(drop='true') #drop incidents that are not associated with a neighborhood
rel_incidents=incidents[['type_description','neighborhood']]
count_incidents=rel_incidents['neighborhood'].value_counts()
plt.figure(figsize=(20,10)) # extra room for all the neighborhoods
#count_incidents.plot(kind='bar')
#graph with data as percentages
test=pd.DataFrame(count_incidents)
test=100*test/test.sum()
test.plot(kind='bar',figsize=(20,10),title="Frequency of Fire-related Incidents in Pittsburgh by Neighborhood")
# +
crimes = pd.read_csv("public-safety.csv")
del crimes["Sector #"]
del crimes["Population (2010)"]
del crimes["Part 1 Crime per 100 Persons (2010)"]
del crimes["Part 2 Crime per 100 Persons (2010)"]
#major crimes are weighted twice as heavily
crimes["#Part 1 (Major Crime) Reports (2010)"]=crimes["#Part 1 (Major Crime) Reports (2010)"]*2
#Non major crime reports stay as is (redundant to write here)
crimes["#Part 2 Reports (2010)"]=crimes["#Part 2 Reports (2010)"]*1
#other police reports are only weighted 0.6 as much
crimes["#Other Police Reports (2010)"]=crimes["#Other Police Reports (2010)"]*0.6
#murder reports are weighted as 3 times as important
crimes["#Murder (2010)"]=crimes["#Murder (2010)"]*3
#rape is similarly weighted 3 times as important
crimes["#Rape (2010)"]=crimes["#Rape (2010)"]*3
#robbery is moved down to 0.6 times as important
crimes["#Robbery (2010)"]=crimes["#Robbery (2010)"]*0.6
#Agr. Assult will be weighted by 1.5
crimes["#Agr. Assault (2010)"]=crimes["#Agr. Assault (2010)"]*1.5
#Burglary will remain the same at 1
crimes["#Burglary (2010)"]=crimes["#Burglary (2010)"]*1
#Auto theft will be slightly higher than burglary at 1.5
crimes["#Auto Theft (2010)"]=crimes["#Auto Theft (2010)"]*1.5
#Drug violations will be weighted at only 0.5
crimes["#Drug Violations (2010)"]=crimes["#Drug Violations (2010)"]*0.5
crime_totals = pd.DataFrame([],
index=[crimes['Neighborhood']],
columns=['Total Crime'])
total=crimes.sum(axis=1)
i=0
length=len(crime_totals)
while(i<length):
crime_totals['Total Crime'].iloc[i]=total[i]
i+=1;
crime_totals=crime_totals.sort_values(by=['Total Crime'], ascending=False)
#crime_totals.plot.bar(figsize=(20,10),stacked=True)
#graph of percentages
crime_totals=100*crime_totals/crime_totals.sum()
crime_totals.plot(kind='bar',figsize=(20,10),title="Crime Frequency in Pittsburgh by Neighborhood")
# -
crime_totals=crime_totals.sort_values('Neighborhood')
crime_totals
test=test.rename(columns={"neighborhood":"Total Incidents"}).rename_axis("Neighborhood").sort_values('Neighborhood')
test
test=test.reset_index()
num=pd.Series(crime_totals['Total Crime'].tolist())
test['Total Crime']=num
test['Avg']=''
test=test.set_index('Neighborhood')
test.head()
# +
for i in test.index:
test.loc[i,'Avg']=(test.loc[i,'Total Incidents']+test.loc[i,"Total Crime"])/2
test.head()
# -
avg=test[["Avg"]]
avg=avg.drop(avg.tail(1).index)
avg=avg.sort_values(["Avg"])
avg.plot(kind="bar",figsize=(20,10))
# So, by combining our data sets into a single metric, we see that Central Business District ranks the lowest among the neighborhoods in Pittsburgh when it comes to safety. The best neighborhoods are Mt. Oliver, East Carnegie, and Regent Square since they are all tied for the lowest proportion of incidents.
# ## Conclusion
#
# Luis: I would be inclined to say that South Oakland is the best neighborhood in Pittsburgh because that is that one that I know the best. I have never heard of several of these neighborhoods, so it would be impossible to compare their data-driven rank to any that I would assign to them. However, seeing that South Oakland is among the top 50% of the neighborhoods does somewhat agree with my personal view of it.
| .ipynb_checkpoints/Final_report-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# ## Demonstration of parallel MCMC
#
# This notebook shows how to run distributed/parallel MCMC parameter inference. We use the same dataset and recover similar results to [the main demo notebook](/songbird.ipynb).
#
# **Important Note:** We set `config[:num_threads] = 2` on this dataset for demonstration. For a dataset this small, adding additional threads is risky — as we describe in our paper, the parallel MCMC routine we use introduces boundary effects, because there is no data sharing across threads. Parallel inference is intended for very large datasets (e.g. containing hours of data). In this regime, the the boundary effects are extremely minor. However, on this small demo dataset they may not be.
# +
# Import PPSeq
import PPSeq
const seq = PPSeq
# Other Imports
import PyPlot: plt
import DelimitedFiles: readdlm
import Random
import StatsBase: quantile
# Songbird metadata
num_neurons = 75
max_time = 22.2
# Randomly permute neuron labels.
# (This hides the sequences, to make things interesting.)
_p = Random.randperm(num_neurons)
# Load spikes.
spikes = seq.Spike[]
for (n, t) in eachrow(readdlm("data/songbird_spikes.txt", '\t', Float64, '\n'))
push!(spikes, seq.Spike(_p[Int(n)], t))
end
# -
# ## Plot a spike raster
fig = seq.plot_raster(spikes; color="k") # returns matplotlib Figure
fig.set_size_inches([7, 3]);
# ## Specify PP-Seq model
# +
config = Dict(
# Model hyperparameters
:num_sequence_types => 2,
:seq_type_conc_param => 1.0,
:seq_event_rate => 1.0,
:mean_event_amplitude => 100.0,
:var_event_amplitude => 1000.0,
:neuron_response_conc_param => 0.1,
:neuron_offset_pseudo_obs => 1.0,
:neuron_width_pseudo_obs => 1.0,
:neuron_width_prior => 0.5,
:num_warp_values => 1,
:max_warp => 1.0,
:warp_variance => 1.0,
:mean_bkgd_spike_rate => 30.0,
:var_bkgd_spike_rate => 30.0,
:bkgd_spikes_conc_param => 0.3,
:max_sequence_length => Inf,
# MCMC Sampling parameters.
:num_threads => 6, # <--- This is the key parameter to include if you want to run parallel MCMC
:num_anneals => 10,
:samples_per_anneal => 100,
:max_temperature => 40.0,
:save_every_during_anneal => 10,
:samples_after_anneal => 2000,
:save_every_after_anneal => 10,
:split_merge_moves_during_anneal => 0, # SPLIT / MERGE not implemented for distributed MCMC
:split_merge_moves_after_anneal => 0, # SPLIT / MERGE not implemented for distributed MCMC
:split_merge_window => 1.0,
);
# -
# ## Train PPSeq model
# +
# Initialize all spikes to background process.
init_assignments = fill(-1, length(spikes))
# Construct model struct (PPSeq instance).
model = seq.construct_model(config, max_time, num_neurons)
# Run Gibbs sampling with an initial annealing period.
results = seq.easy_sample!(model, spikes, init_assignments, config);
# -
# ## Plot results
# +
# Grab the final MCMC sample
final_globals = results[:globals_hist][end]
final_events = results[:latent_event_hist][end]
final_assignments = results[:assignment_hist][:, end]
# Helpful utility function that sorts the neurons to reveal sequences.
neuron_ordering = seq.sortperm_neurons(final_globals)
# Plot model-annotated raster.
fig = seq.plot_raster(
spikes,
final_events,
final_assignments,
neuron_ordering;
color_cycle=["red", "blue"] # colors for each sequence type can be modified.
)
fig.set_size_inches([7, 3]);
# -
| demo/songbird-distributed-mcmc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Note: I love this video! It is a great intro/review for beginner Python.
#
# Python By Immersion : https://www.youtube.com/watch?v=9k5687mmnoc
# +
#using list comprehension,
#generate a list of multiples of 4 that are greater than 0
#and less than 420
my_list = [x for x in range(1,420) if x%4 == 0]
print(my_list)
# +
#using list comprehension,
#generate a list of multiples of 5
#that are greater than 0 and less than 310
#that are *not* multiples of 10
my_list = [x for x in range(1,310) if x%10 != 0]
print(my_list)
# +
#Build a list called odd_list of the first 1000 positive odd numbers
#print the last 10 elements of this list
odd_list = [x for x in range(0,2000) if x%2 != 0]
print(len(odd_list))
print(odd_list[-10:])
# -
#Build a list called even_list of the first 1000 positive even numbers
#print the first 10 elements of this list
even_list = [x for x in range(0,2000) if x%2 != 0]
print(len(even_list))
print(even_list[:10])
# +
#Build a list of the first 200 elements of fizz buzz, which I'm calling a fizz buzz list
def fizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return('fizzbuzz')
elif n % 3 == 0:
return('fizz')
elif n % 5 == 0:
return('buzz')
else:
return(n)
list = [fizzbuzz(n) for n in range(0,200)]
print(list)
# +
# make a list of the first 100 prime numbers greater than zero
# +
# what does prime mean? Divisible by 1 and itself.
#1 is defined as not prime. 2 is defined as prime.
# write it as a function!!
def is_prime(number):
if number == 1: return False
if number == 2: return True
for i in range(2,number):
if number%i == 0: return False
return True
# -
is_prime(-4)
# +
# make a list of the first 100 prime numbers greater than zero
#version 1
#Once you know where to stop, now can do a list comprehension using your function is_prime()
primes = [x for x in range(1,545) if is_prime(x)]
print(primes)
# +
# make a list of the first 100 prime numbers greater than zero
#version 2
#this looks like a function, but it is actually a generator!!
def number_generator():
start = 1
while True:
yield start
start += 1
num_gen = number_generator()
primes = []
while len(primes) < 100:
number = next(num_gen)
if is_prime(number):
primes.append(number)
print(primes)
# -
| Day2/list comprehensions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# # Extract car2db vehicles
# +
import mysql.connector
import matplotlib.pyplot as plt
import pandas as pd
cnx = mysql.connector.connect(user='romain', password='<PASSWORD>',
host='localhost',
database='truck')
cursor = cnx.cursor()
year = 2015
query_ID = ("SELECT DISTINCT ct.id_car_trim "
"FROM car_make c "
"INNER JOIN car_model cm ON cm.id_car_make = c.id_car_make "
"INNER JOIN car_trim ct ON ct.id_car_model = cm.id_car_model "
"INNER JOIN car_specification_value csv ON csv.id_car_trim = ct.id_car_trim "
"INNER JOIN car_specification cs ON cs.id_car_specification = csv.id_car_specification "
)
cursor.execute(query_ID)
l_ID = [l[0] for l in cursor.fetchall()]
cnx.close()
cnx = mysql.connector.connect(user='romain', password='<PASSWORD>',
host='localhost',
database='truck')
cursor = cnx.cursor()
query = ("SELECT ct.id_car_trim, ctype.name, c.name, cm.name, ct.name, cs.name, csv.value "
"FROM car_make c "
"INNER JOIN car_type ctype ON ctype.id_car_type = c.id_car_type "
"INNER JOIN car_model cm ON cm.id_car_make = c.id_car_make "
"INNER JOIN car_trim ct ON ct.id_car_model = cm.id_car_model "
"INNER JOIN car_specification_value csv ON csv.id_car_trim = ct.id_car_trim "
"INNER JOIN car_specification cs ON cs.id_car_specification = csv.id_car_specification "
"WHERE ct.id_car_trim IN (" + ','.join(map(str, l_ID)) + ")"
)
cursor.execute(query)
df = pd.DataFrame( [[ij for ij in i] for i in cursor.fetchall()])
df.rename(columns={0:'ID', 1:'Type', 2: 'Make name', 3: 'Model name', 4:'Trim', 5:'Spec', 6:'Spec value'}, inplace=True)
cnx.close()
df = df.pivot_table(index=['ID', 'Type', 'Make name', 'Model name', 'Trim'], columns='Spec', values='Spec value', aggfunc='first')
df['Curb weight'] = df['Curb weight'].astype('float64')
df['Curb vehicle weight'] = df['Curb weight'].astype('float64')
df['Engine power'] = df['Engine power'].astype('float64')
df['Useful volume'] = df['Useful volume'].str.extract('(\d+)').astype('float64')
df['Length'] = df['Length'].str.split(pat="-").str[0]
df['Length'] = df['Length'].astype('float64')
df["The length of the car"] = df["The length of the car"].str.extract('(\d+)').astype('float64')
df['Tractor width'] = df['Tractor width'].astype('float64')
df['Height'] = df['Height'].str.split(pat="-").str[0]
df["The height of the car"] = df["The height of the car"].str.extract('(\d+)').astype('float64')
#df['Wheelbase'] = df['Wheelbase'].str.extract('(\d+)').astype('float64')
df['Front track'] = df['Front track'].str.split(pat="/").str[0]
df['Front track'] = df['Front track'].astype('float64')
df['Rear track'] = df['Rear track'].str.split(pat="/").str[0]
df['Rear track'] = df['Rear track'].str.replace(" ","")
df['Rear track'] = df['Rear track'].astype('float64')
df['Payload'] = df['Payload'].str.split(pat="-").str[0]
df['Payload'] = df['Payload'].str.split(pat="(").str[0]
df['Payload'] = df['Payload'].str.split(pat="/").str[0]
df['Payload'] = df['Payload'].astype('float64')
df["The width of the car"] = df["The width of the car"].str.extract('(\d+)').astype('float64')
df["Curb weight of the trailer"] = df["Curb weight of the trailer"].astype('float64')
df["Length of the trailer"] = df["Length of the trailer"].str.split(pat="-").str[0]
df["Length of the trailer"] = df["Length of the trailer"].str.split(pat="+").str[0]
df["Length of the trailer"] = df["Length of the trailer"].str.replace(",",".")
df["Length of the trailer"] = df["Length of the trailer"].astype('float64')
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat="x").str[0]
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat="+").str[0]
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat=",").str[0]
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat="(").str[0]
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat="or").str[0]
df["Fuel tank capacity"] = df["Fuel tank capacity"].str.split(pat="*").str[1]
df["Fuel tank capacity"] = df["Fuel tank capacity"].astype('float64')
df["The full weight of the bus"] = df["The full weight of the bus"].astype('float64')
df["Gross vehicle weight"] = df["Gross vehicle weight"].str.extract('(\d+)').astype('float64')
df["Gross vehicle weight"] = df["Gross vehicle weight"].astype("float64")
df["Payload"] = df["Payload"].astype("float64")
df['Volume'] = (df['The width of the car'] * df['The width of the car'] * df['The length of the car']) / 1e9
df = df.reset_index()
df.to_excel("trucks_db.xlsx")
# -
# # Or load the data from an Excel file, if the SQL db is not accessible
df = pd.read_excel("trucks_db.xlsx")
df_bus = df.loc[df["Type"]=="Buses"]
df_medium = df.loc[df["Type"]=="Medium Trucks"]
df_heavy = df.loc[df["Type"]=="Heavy-Duty Tractors"]
df_trailer = df.loc[df["Type"]=="Trailers"]
# +
import numpy as np
df_bus.plot("Engine power", "Curb weight", kind="scatter")
ind = df_bus.loc[(~df_bus["Engine power"].isnull())&(~df_bus["Curb weight"].isnull()),"Engine power"]
vals = df_bus.loc[(~df_bus["Engine power"].isnull())&(~df_bus["Curb weight"].isnull()),"Curb weight"]
z = np.polyfit(ind, vals, 2)
f = np.poly1d(z)
# calculate new x's and y's
x_new = np.linspace(100, 450, 50)
y_new = f(x_new)
plt.plot(x_new, y_new, linestyle="-", color="orange")
plt.title("Buses")
# +
df_bus.plot("Curb weight", "The full weight of the bus", kind="scatter")
ind = df_bus.loc[(~df_bus["The full weight of the bus"].isnull())&(~df_bus["Curb weight"].isnull()),"Curb weight"]
vals = df_bus.loc[(~df_bus["The full weight of the bus"].isnull())&(~df_bus["Curb weight"].isnull()),"The full weight of the bus"]
z = np.polyfit(ind, vals, 2)
f = np.poly1d(z)
# calculate new x's and y's
x_new = np.linspace(2000, 17500, 50)
y_new = f(x_new)
plt.plot(x_new, y_new, linestyle="-", color="orange")
plt.title("Buses: curb weight vs. gross weight")
# -
for i,p in zip(df_heavy.count().index,
df_heavy.count()):
if p!= 0: print(i,p)
df_heavy["Environmental standard"].hist()
df_medium.plot("Gross vehicle weight", "Payload", kind="scatter")
plt.title("Trucks (incl.trailer): Driving mass vs. payload")
# # Trucks
for i,p in zip(df_medium.count().index,
df_medium.count()):
if p!= 0: print(i,p)
# Curb weight
df_medium = df_medium.sort_values('Gross vehicle weight')
bins = [3000, 4000, 6000, 8000, 15000, 20000, 24000, 28000, 38000, 42000, 55000, 65000]
ind = np.digitize(df_medium['Gross vehicle weight'],bins)
# Curb weight = Gross weight - payload
df_medium["Curb weight"] = df_medium["Gross vehicle weight"] - df_medium["Payload"]
# Median and quantiles of **curb weight** by gross weight interval (3.5, 7.5, 18, 26, 40 and 60t)
# +
ax=(df_medium.groupby(ind)[["Curb weight"]].describe().iloc[[1,3, 5, 7, 9, 11]]["Curb weight"].T\
.iloc[-5:].T/1000).plot(linewidth=0, marker="_", markersize=20)
ax.set_ylim(0,)
ax.set_xlim(-.5,11.5)
ax.grid()
ax.set_ylabel("Curb mass [ton]")
ax.set_xticks([1,3, 5, 7, 9, 11])
ax.set_xticklabels(["3.5t", "7.5t", "18t", "26t", "40t", "60t"])
#
ax1 = ax.twinx()
data = df_medium.groupby(ind)[["Curb weight"]].count().iloc[[1,3, 5, 7, 9, 11]]
ax1.bar([1,3, 5, 7, 9, 11], data["Curb weight"].values, label="count", width=.2, alpha=.5)
ax1.set_ylabel("Count")
ax1.legend(loc="center left")
# -
df_medium.groupby(ind)["Curb weight"].describe().iloc[[1,3, 5, 7, 9, 11]]
# Power-to-mass ratio
df_medium["power-to-mass ratio"] = ((df_medium["Engine power"]/df_medium["Curb weight"])*1000)
df_medium.groupby(ind)["power-to-mass ratio"].describe().iloc[[1,3, 5, 7, 9, 11]]
# +
ax=df_medium.groupby(ind)[["power-to-mass ratio"]].describe().iloc[[1,3, 5, 7, 9, 11]]["power-to-mass ratio"].T\
.iloc[-5:].T.plot(linewidth=0, marker="_", markersize=20)
ax.set_ylim(0,)
ax.set_xlim(-.5,11.5)
ax.grid()
ax.set_ylabel("Power-to-mass ratio [W/kg]")
ax.set_xticks([1,3, 5, 7, 9, 11])
ax.set_xticklabels(["3.5t", "7.5t", "18t", "26t", "40t", "60t"])
#
ax1 = ax.twinx()
data = df_medium.groupby(ind)[["power-to-mass ratio"]].count().iloc[[1,3, 5, 7, 9, 11]]
ax1.bar([1,3, 5, 7, 9, 11], data["power-to-mass ratio"].values, label="count", width=.2, alpha=.5)
ax1.set_ylabel("Count")
# -
# Frontal area = width * height (we assume a flat front!)
df_medium["frontal area"] = (df_medium["The width of the car"]*df_medium["The height of the car"]) / 1e6
df_medium.groupby(ind)["frontal area"].describe().iloc[[1,3, 5, 7, 9, 11]]
# +
ax=df_medium.groupby(ind)[["frontal area"]].describe().iloc[[1,3, 5, 7, 9, 11]]["frontal area"].T\
.iloc[-5:].T.plot(linewidth=0, marker="_", markersize=20)
ax.set_ylim(0,)
ax.set_xlim(-.5,11.5)
ax.grid()
ax.set_ylabel("Frontal area [m$_2$]")
ax.set_xticks([1,3, 5, 7, 9, 11])
ax.set_xticklabels(["3.5t", "7.5t", "18t", "26t", "40t", "60t"])
#
ax1 = ax.twinx()
data = df_medium.groupby(ind)[["frontal area"]].count().iloc[[1,3, 5, 7, 9, 11]]
ax1.bar([1,3, 5, 7, 9, 11], data["frontal area"].values, label="count", width=.2, alpha=.5)
ax1.set_ylabel("Count")
# -
# Payload
df_medium.groupby(ind)["Payload"].describe().iloc[[1,3, 5, 7, 9, 11]]
df_medium[["Curb weight of the trailer", "Curb weight", "Curb vehicle weight", ]].sum()
# Fuel tank capacity
df_trucks[["Curb weight of the trailer", "Curb weight", "Curb vehicle weight", ]].sum()
df_trucks.plot("Gross vehicle weight", "Payload", kind="scatter")
# +
ax=(df_medium.groupby(ind)[["Payload"]].describe().iloc[[1,3, 5, 7, 9, 11]]["Payload"].T\
.iloc[-5:].T/1000).plot(linewidth=0, marker="_", markersize=20)
ax.set_ylim(0,)
ax.set_xlim(-.5,11.5)
ax.grid()
ax.set_ylabel("Payload [ton]")
ax.set_xticks([1,3, 5, 7, 9, 11])
ax.set_xticklabels(["3.5t", "7.5t", "18t", "26t", "40t", "60t"])
#
ax1 = ax.twinx()
data = df_medium.groupby(ind)[["Payload"]].count().iloc[[1,3, 5, 7, 9, 11]]
ax1.bar([1,3, 5, 7, 9, 11], data["Payload"].values, label="count", width=.2, alpha=.5)
ax1.set_ylabel("Count")
# -
df.loc[~df["Fuel tank capacity"].isnull(), "Fuel tank capacity"]
df_trucks.plot("Gross vehicle weight", "Engine power", kind="scatter")
plt.title("Trucks (incl.trailer): Driving mass vs. engine power")
df_2.loc[(df_2["Type"] == "Buses")&(~df_2["Length"].isnull()),'Length'].hist()
# ### Trailers
df_trailer.loc[~df_trailer["Payload"].isnull()].to_excel("trailers.xlsx")
df_trailer.loc[
((df_trailer["Type of trailer"]=="Semitrailer curtain")
|(df_trailer["Type of trailer"]=="Trailer side"))
&(~df_trailer["Payload"].isnull())
]
df_trailer["Type of trailer"].value_counts()
| dev/car2db - truck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Unidad 2 - Lección 1: *['L', 'I', 'S', 'T', 'A', 'S']*
#
# Conociendo las Listas: Operaciones sobre listas
# +
# create a list of numbers named list_numbers & print the data type
# -
# Referencia a la misma lista
# +
# to use the = operator over list_numbers into list_copy & change the first position
# +
# print values of both lists
# +
# print id of both lists
# -
# Copiar una lista (método #1)
# +
# use copy function & change the first position
# +
# print values of both lists
# +
# print id of both lists
# -
# Copiar una lista (método #2)
# +
# use [:] & change the first position
# +
# print values of both lists
# +
# print id of both lists
# -
# Insertar un nuevo elemento
# +
# use the insert function for a new element & print it out
# +
# use the remove function in the same list & print it out
# -
# Buscar un elemento en la lista
# +
# create a new list of names & print it out
# +
# use the function index for an existent element
# +
# use the function index for non-existent element
# -
# Convertir un string a una lista
# +
# create a word & print its type
# +
# convert string to list
| U2_L1_SC5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from sqlalchemy import create_engine
# +
# db_host = '172.16.17.32'
# username = 'dm_team'
# user_pass = '<PASSWORD>#'
# db_name = 'ITSM'
# conn = create_engine('mysql+pymysql://'+username+':'+user_pass+'@'+db_host)
# stmt="show databases" #"select * from information_schema.tables"
# df0=pd.read_sql(stmt,conn)
# df0
# +
# db_host = '172.16.17.32'
# username = 'dm_team'
# user_pass = '<PASSWORD>#'
# db_name = 'project_itsm'
# conn = create_engine('mysql+pymysql://'+username+':'+user_pass+'@'+db_host+'/'+db_name)
# stmt="select * from information_schema.tables"
# df0=pd.read_sql(stmt,conn)
# df0.tail()
# +
# stmt="show tables"
# df1=pd.read_sql(stmt,conn)
# df1.tail()
# +
# stmt="select * from dataset_list"
# df1=pd.read_sql(stmt,conn)
# df1.tail()
# -
df1.to_csv("ABCdata.csv")
data=pd.read_csv("ABCdata.csv")
data.head(10)
data.shape
data.info()
# +
# data.isnull().sum()
# -
data['Impact'].replace(to_replace='NS', value=4, inplace=True)
# data['impact'].bfill
data.head()
data['Priority'].fillna(data['Priority'].mean(),inplace=True)
data.head()
data['Reopen_Time'].fillna(method='bfill',inplace=True)
data['Reopen_Time'].fillna(method='ffill',inplace=True)
data.sort_values(['Reopen_Time'], ascending=True)
data.info()
| ABCtech.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import requests
url = 'http://bad'
requests.head(url)
def web_response_ok(url):
try:
r = requests.head(url)
except Exception:
return False
return r.status_code == 200
def test_web():
test_pages = ['','ChooseTeams','UpcomingGames'] # add to this list for every route in webapp/main.py
project = os.environ.get('GCP_PROJECT_ID')
for page in test_pages:
result = web_response_ok(f'https://{project}.us.r.appspot.com/{page}')
assert result == True
test_web()
test_pages = ['','ChooseTeams','UpcomingGames'] # add to this list for every route in webapp/main.py
project = os.environ.get('GCP_PROJECT_ID')
for page in test_pages:
result = web_response_ok(f'https://{project}.us.r.appspot.com/{page}')
print(f'https://{project}.us.r.appspot.com/{page}')
#assert result == True
# +
import git
repo = git.Repo('.', search_parent_directories=True)
repo.working_tree_dir
# -
import requests
url = 'http://notavalidwebsite.com/'
try:
r = requests.head(url)
except:
print('false')
r.status_code
def web_response_ok(url):
try:
r = requests.head(url)
except:
return False
return r.status_code == 200
url = 'https://nba-predictions-dev.uc.r.appspot.com/'
web_response_ok(url)
# +
import pandas as pd
from google.cloud import firestore
from google.cloud import bigquery
from datetime import datetime, timezone
teams = {'HomeTeam':'Boston Celtics', 'AwayTeam':'Detroit Pistons', 'Model':'automl_regression'}
df = pd.DataFrame(teams, index=[0])
model = df['Model'][0]
# +
client = bigquery.Client()
inputs = client.query('''
SELECT
input
FROM
ML.FEATURE_INFO(MODEL `nba.%s`)
''' % (model)).to_dataframe()
# -
inputs
# +
db = firestore.Client()
home_team_data = db.collection('team_model_data').document(df['HomeTeam'][0]).get().to_dict()
away_team_data = db.collection('team_model_data').document(df['AwayTeam'][0]).get().to_dict()
# -
home_team_data
away_team_data
for key in home_team_data.keys():
datetime.now(timezone.utc)
if 'rest_days_difference' in inputs.values:
home_rest = (datetime.now(timezone.utc) - home_team_data['game_date']).days
away_rest = (datetime.now(timezone.utc) - away_team_data['game_date']).days
rest_days_difference = home_rest - away_rest
rest_days_difference
for column in inputs.input:
print (column)
inputs[inputs['input'] != 'is_home_team']
# +
query = f'SELECT predicted_spread FROM ML.PREDICT(MODEL `nba.{model}`, (SELECT '
for column in inputs.input:
key = column[9:]
if column == 'is_home_team':
query = query + '1 as is_home_team,'
elif column == 'rest_days_difference':
home_rest = (datetime.now(timezone.utc) - home_team_data['game_date']).days
away_rest = (datetime.now(timezone.utc) - away_team_data['game_date']).days
rest_days_difference = home_rest - away_rest
query = query + f'{rest_days_difference} as {column},'
elif column == 'incoming_is_win_streak':
query = query + f'{home_team_data["streak_counter_is_win"]} as {column},'
elif (column == 'opponent_incoming_is_win_streak') | (column == 'incoming_is_win_streak_opponent'):
query = query + f'{away_team_data["streak_counter_is_win"]} as {column},'
elif column[:12] == 'incoming_wma':
if column.split('_')[3] == 'opponent':
query = query + f'{away_team_data[key]} as {column},'
else:
query = query + f'{away_team_data[key]} as {column},'
else:
print(f'Error: Model input column {column} not in team data in firestore or not in logic in App Engine. Please try a different model')
# +
bq_query = query[:-1] + '))'
game = client.query('''
%s
''' % (bq_query)).to_dataframe()
pointspread = round(game['predicted_spread'][0],1)
if pointspread > 0:
winner = df['HomeTeam'][0]
loser = df['AwayTeam'][0]
else:
winner = df['AwayTeam'][0]
loser = df['HomeTeam'][0]
# -
f'I predict the {winner} will beat the {loser} by {abs(pointspread)} points using the {model} model!'
os.environ.get('GCP_PROJECT_ID')
import os
env_var = os.environ
env_var
os.environ['GCP_PROJECT'] = 'nba-predictions-dev'
# +
from google.cloud import bigquery
client = bigquery.Client(project='nba-predictions-dev')
dataset_id = 'nba'
models = client.list_models(dataset_id)
model_names = [model.model_id for model in models]
# print("Models contained in '{}':".format(dataset_id))
# for model in models:
# full_model_id = "{}.{}.{}".format(
# model.project, model.dataset_id, model.model_id
# )
# friendly_name = model.friendly_name
# print("{}: friendly_name='{}'".format(full_model_id, friendly_name))
# -
model_names = [model.model_id for model in models]
model_names
model_names = []
for model in models:
model_names.append(model.model_id)
model_names
model_names = [f'{model.project}.{model.dataset_id}.{model.model_id}' for model in models]
# +
from google.cloud import bigquery
# Construct a BigQuery client object.
#client = bigquery.Client()
# TODO(developer): Set model_id to the ID of the model to fetch.
model_id = 'nba-predictions-dev.nba.BOOSTED_TREE_CLASSIFIER'
model = client.get_model(model_id) # Make an API request.
full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id)
friendly_name = model.friendly_name
print(
"Got model '{}' with friendly_name '{}'.".format(full_model_id, friendly_name)
)
# -
model
# +
## Run this function locally after initial data load since it will take more memory than google cloud functions allows
import pandas as pd
import numpy as np
from google.cloud import bigquery
from google.cloud import firestore
import os
## TO DO: Replace your Project ID and change the table names if you chose a different dataset name than 'nba' ##
os.environ['GCP_PROJECT'] = 'nba-predictions-test'
my_project_id = os.environ.get('GCP_PROJECT')
client = bigquery.Client(project=my_project_id)
raw_game_data_table = f'{my_project_id}.nba.raw_basketballreference_game'
raw_player_data_table = f'{my_project_id}.nba.raw_basketballreference_playerbox'
games_to_load_to_model_view = f'{my_project_id}.nba.games_to_load_to_model'
model_table_name = f'{my_project_id}.nba.model_game'
# Enter columns you want to generate linearly weighted moving average calculations for and number of periods to use
wma_columns = [
'pace','efg_pct', 'tov_pct', 'ft_rate', 'off_rtg',
'opponent_efg_pct', 'opponent_tov_pct', 'opponent_ft_rate',
'opponent_off_rtg', 'starter_minutes_played_proportion',
'bench_plus_minus', 'opponnent_starter_minutes_played_proportion',
'opponent_bench_plus_minus']
W = 20
# +
def convert_to_seconds(x):
sp = int(x.split(':')[0]) * 60 + int(x.split(':')[1])
return sp
def switch_key(key):
new_key = key[:-1] + ('h' if key[-1] == 'a' else 'a')
return new_key
def generate_streak_info(data,column):
"""
Parameters
----------
data:
Dataframe with a specific column to generate streak data
column:
Stirng with specific column name to generate streak info
Returns
-------
data_with_streak_counter:
The original dataframe with a new column
`streak_counter_[column]` containing integers with
counts for each streak.
"""
data['start_of_streak'] = data[column].ne(data[column].shift())
data['streak_id'] = data.start_of_streak.cumsum()
data[f'streak_counter_{column}'] = data.groupby('streak_id').cumcount() + 1
data_with_streak_counter = data.drop(columns = ['start_of_streak','streak_id'] )
return data_with_streak_counter
def create_linear_weighted_moving_average(data,column,weight):
"""
Parameters
----------
data:
Dataframe with a specific column to generate weighted moving average.
column:
Stirng with specific column name to generate weighted moving average info.
Column must be ready to be converted to float data type.
Returns
-------
data_with_moving_average:
The original dataframe with a new column
`wma_[W]_[column]` containing float values with weighted moving average
values for the provided value with a weight of W.
"""
data_with_moving_average = data.copy()
data_with_moving_average[column] = data_with_moving_average[column].astype(float)
weights = np.arange(1,weight+1)
data_with_moving_average[f'wma_{weight}_{column}'] = data_with_moving_average[column].rolling(weight).apply(lambda col: np.dot(col, weights)/weights.sum(), raw=True)
return data_with_moving_average
## Load tables to dataframe
game_bq = client.query('''
SELECT game_date, visitor_team_name, visitor_pts, home_team_name, home_pts, games.game_key,
a_ff_pace, a_ff_efg_pct, a_ff_tov_pct, a_ff_orb_pct, a_ff_ft_rate, a_ff_off_rtg,
h_ff_pace, h_ff_efg_pct, h_ff_tov_pct,h_ff_orb_pct, h_ff_ft_rate, h_ff_off_rtg
FROM `%s` as games
''' % (raw_game_data_table)).to_dataframe()
if game_bq.empty:
print('Function ended early. No new data to load.')
player_bq = client.query('''
SELECT players.game_key, game_date, h_or_a, mp, plus_minus, starter_flag
FROM `%s` as players
WHERE mp is not NULL
''' % (raw_player_data_table)).to_dataframe()
## Create copies to avoid calling bigquery multiple times when testing - comment out delete while testing
game = game_bq.copy()
player = player_bq.copy()
#del game_bq
#del player_bq
## Create game variables needed for model
game['home_spread'] = game['home_pts'].astype(int) - game['visitor_pts'].astype(int)
game['season'] = ''
for i in range(len(game)):
if ((game['game_date'][i].year != 2020 and game['game_date'][i].month < 7) or (game['game_date'][i].year == 2020 and game['game_date'][i].month < 11)):
game.loc[i,'season'] = game['game_date'][i].year
else:
game.loc[i,'season'] = game['game_date'][i].year + 1
## Create game by team variables - stack home and away to team vs. opponent
games_by_team_home = pd.DataFrame()
games_by_team_home['season'] = game['season']
games_by_team_home['game_key'] = game['game_key']
games_by_team_home['game_key_team'] = game['game_key'] + 'h'
games_by_team_home['game_key_opponent'] = game['game_key'] + 'a'
games_by_team_home['game_date'] = pd.to_datetime(game['game_date'])
games_by_team_home['team'] = game['home_team_name']
games_by_team_home['opponent'] = game['visitor_team_name']
games_by_team_home['is_home_team'] = 1
games_by_team_home['spread'] = game['home_spread']
games_by_team_home['pace'] = game['h_ff_pace']
games_by_team_home['efg_pct'] = game['h_ff_efg_pct']
games_by_team_home['tov_pct'] = game['h_ff_tov_pct']
games_by_team_home['ft_rate'] = game['h_ff_ft_rate']
games_by_team_home['off_rtg'] = game['h_ff_off_rtg']
games_by_team_home['opponent_efg_pct'] = game['a_ff_efg_pct']
games_by_team_home['opponent_tov_pct'] = game['a_ff_tov_pct']
games_by_team_home['opponent_ft_rate'] = game['a_ff_ft_rate']
games_by_team_home['opponent_off_rtg'] = game['a_ff_off_rtg']
games_by_team_home['is_win'] = [1 if x > 0 else 0 for x in games_by_team_home['spread'].astype(int)]
games_by_team_visitor = pd.DataFrame()
games_by_team_visitor ['season'] = game['season']
games_by_team_visitor['game_key'] = game['game_key']
games_by_team_visitor ['game_key_team'] = game['game_key'] + 'a'
games_by_team_visitor ['game_key_opponent'] = game['game_key'] + 'h'
games_by_team_visitor ['game_date'] = pd.to_datetime(game['game_date'])
games_by_team_visitor ['team'] = game['visitor_team_name']
games_by_team_visitor ['opponent'] = game['home_team_name']
games_by_team_visitor ['is_home_team'] = 0
games_by_team_visitor ['spread'] = game['home_spread']*-1
games_by_team_visitor ['pace'] = game['a_ff_pace']
games_by_team_visitor ['efg_pct'] = game['a_ff_efg_pct']
games_by_team_visitor ['tov_pct'] = game['a_ff_tov_pct']
games_by_team_visitor ['ft_rate'] = game['a_ff_ft_rate']
games_by_team_visitor ['off_rtg'] = game['a_ff_off_rtg']
games_by_team_visitor['opponent_efg_pct'] = game['h_ff_efg_pct']
games_by_team_visitor['opponent_tov_pct'] = game['h_ff_tov_pct']
games_by_team_visitor['opponent_ft_rate'] = game['h_ff_ft_rate']
games_by_team_visitor['opponent_off_rtg'] = game['h_ff_off_rtg']
games_by_team_visitor['is_win'] = [1 if x > 0 else 0 for x in games_by_team_visitor['spread'].astype(int)]
games_by_team = pd.concat([games_by_team_home,games_by_team_visitor])
games_by_team.sort_values(by=['game_date'], ascending = True, inplace=True)
games_by_team['previous_game_date'] = games_by_team.groupby(['team'])['game_date'].shift(1)
games_by_team['incoming_rest_days'] = [(d - p).days for d,p in zip(games_by_team['game_date'],games_by_team['previous_game_date'])]
# Replace NaN with -99 so can be converted to int. These rows will be dropped later.
games_by_team['incoming_rest_days'].fillna(-99, inplace=True)
games_by_team['incoming_rest_days'] = games_by_team['incoming_rest_days'].astype(int)
games_by_team.set_index('game_key_team', inplace=True)
# del games_by_team_visitor
# del games_by_team_home
## Create player variables needed for model
# Make game key unique per home/away team
player['game_key_team'] = player['game_key'] + player['h_or_a']
#Only include players that actually played
player = player.dropna(subset=['mp', 'plus_minus']).reset_index(drop=True)
player['plus_minus'] = player['plus_minus'].astype(int)
player['seconds_played'] = player['mp'].apply(convert_to_seconds)
## Create dataframe for aggregated player stats per game
game_player_stats = pd.DataFrame()
game_player_stats['game_key_team'] = player['game_key_team'].unique()
total_seconds = player.groupby(['game_key_team'])['seconds_played'].sum()
starter_seconds = player[player['starter_flag']==True].groupby(['game_key_team'])['seconds_played'].sum()
seconds = pd.merge(total_seconds, starter_seconds, left_index=True, right_index=True, how='inner')
seconds['starter_minutes_played_proportion'] = seconds['seconds_played_y']/seconds['seconds_played_x']
game_player_stats.set_index('game_key_team',inplace=True)
game_player_stats = pd.merge(game_player_stats,seconds['starter_minutes_played_proportion'],left_index=True,right_index=True,how='inner')
bench_pl_min = player[player['starter_flag']==False].groupby(['game_key_team'])['plus_minus'].sum()
game_player_stats = pd.merge(game_player_stats,bench_pl_min, left_index=True, right_index=True, how='inner')
game_player_stats = game_player_stats.rename(columns={'plus_minus':'bench_plus_minus'})
## Merge aggregated stats in to games by team dataframe
games_by_team = pd.merge(games_by_team,game_player_stats, left_index=True, right_index=True,how='inner')
## Create dataframe to capture opponent aggregated stats
game_player_stats_opponent = game_player_stats.copy()
# del game_player_stats
# Reset index so it can be modified to temporarily swith 'h' with 'a'
game_player_stats_opponent.reset_index(drop=False, inplace=True)
game_player_stats_opponent['game_key_team'] = game_player_stats_opponent['game_key_team'].apply(switch_key)
#Rename columns to opponent columns
game_player_stats_opponent = game_player_stats_opponent.rename(columns={'starter_minutes_played_proportion':'opponnent_starter_minutes_played_proportion','bench_plus_minus':'opponent_bench_plus_minus'})
#Reset index and merge
game_player_stats_opponent.set_index('game_key_team', inplace=True)
games_by_team = pd.merge(games_by_team,game_player_stats_opponent,left_index=True,right_index=True,how='inner')
# del game_player_stats_opponent
games_by_team_with_wma = pd.DataFrame()
#Create data frame with stats needed for model
for team in games_by_team['team'].unique():
team_games = games_by_team.loc[games_by_team['team']==team].sort_values(by='game_date')
team_games = generate_streak_info(team_games,'is_win')
team_games['streak_counter_is_win'] = [x * -1 if y == 0 else x for x,y in zip(team_games['streak_counter_is_win'],team_games['is_win'])]
team_games['incoming_is_win_streak'] = team_games['streak_counter_is_win'].shift(fill_value=0)
for col in wma_columns:
team_games = create_linear_weighted_moving_average(team_games,col,W)
team_games[f'incoming_wma_{W}_{col}'] = team_games[f'wma_{W}_{col}'].shift()
games_by_team_with_wma = pd.concat([games_by_team_with_wma, team_games])
games_by_team_with_wma = (games_by_team_with_wma.merge(games_by_team_with_wma.reset_index(drop=False)[[
'game_key_team','incoming_rest_days','streak_counter_is_win','incoming_is_win_streak']],
left_on='game_key_opponent', right_on='game_key_team',
how='inner', suffixes=(None, '_opponent')))
#Drop first W rows for each team with no incoming weighted average
model_game_data = games_by_team_with_wma.dropna(subset=[f'incoming_wma_{W}_pace']).copy()
# del games_by_team_with_wma
# del games_by_team
#Convert data types to prepare for load to bigquery
model_game_data = model_game_data.astype({'season':int, 'is_win':int})
#Create data frame to create firestore collections with data to use in model call
most_recent_game = model_game_data.sort_values('game_date').drop_duplicates(['team'],keep='last')
most_recent_game = most_recent_game[['season', 'game_date', 'team','streak_counter_is_win']
+ [f'wma_{W}_{x}' for x in wma_columns]]
most_recent_game.reset_index(drop=True, inplace=True)
most_recent_game.set_index('team', inplace=True)
docs = most_recent_game.to_dict(orient='index')
db = firestore.Client(project=my_project_id)
for team in most_recent_game.index.unique():
doc_ref = db.collection('team_model_data').document(team.replace('/','\\')) #Teams that changed mid-season have a '/' which firestore interprets as new path
doc_ref.set(docs[team])
# del most_recent_game
#Create new client and load table to Big Query
bqclient = bigquery.Client(project=my_project_id)
#Publish model data
job_config = bigquery.LoadJobConfig()
job_config.autodetect='True'
job_config.create_disposition = 'CREATE_IF_NEEDED'
job_config.write_disposition = 'WRITE_TRUNCATE'
# job_config.time_partitioning = bigquery.TimePartitioning(
# type_=bigquery.TimePartitioningType.DAY,
# field="game_date")
## Set schema for specific columns where more information is needed (e.g. not NULLABLE or specific date/time)
job_config.schema = [
bigquery.SchemaField('game_key','STRING', 'REQUIRED'),
bigquery.SchemaField('team','STRING', 'REQUIRED'),
bigquery.SchemaField('opponent','STRING', 'REQUIRED'),
bigquery.SchemaField('game_date','DATE'),
]
job_model = bqclient.load_table_from_dataframe(model_game_data, model_table_name, job_config=job_config)
model_result = job_model.result()
model_message = (
f'Job ID: {model_result.job_id} '
f'was started {model_result.started} '
f'and ended {model_result.ended} '
f'loading {model_result.output_rows} row(s) '
f'to {model_result.destination}')
print(model_message)
# -
def remove_duplicates(table, distinct_column):
## Remove Duplicates - Always run at the end if you have had issues while loading ##
client = bigquery.Client(project=os.environ.get('GCP_PROJECT'))
## Get row count and distinct game count
game_count = client.query('''
select count(1) as row_count,
count(distinct %s) as game_count
from `%s.%s`
''' % (distinct_column,os.environ.get('GCP_PROJECT'),table)).to_dataframe()
if game_count['row_count'][0] == game_count['game_count'][0]:
return f'No duplicates in {table}!'
else:
deduplicate = client.query('''
CREATE OR REPLACE TABLE `%s.%s`
AS
SELECT * EXCEPT(row_num) FROM (
SELECT
*, ROW_NUMBER() OVER (PARTITION BY %s ORDER BY load_datetime desc) as row_num
FROM `%s.%s`
) WHERE row_num = 1
''' % (os.environ.get('GCP_PROJECT'),table,distinct_column,os.environ.get('GCP_PROJECT'),table))
return 'Duplicates removed from {table}'
# +
game_remove = remove_duplicates('nba.raw_basketballreference_game','game_key')
player_remove = remove_duplicates('nba.raw_basketballreference_playerbox', 'player_stat_key')
print(game_remove)
print(player_remove)
# -
os.environ['GCP_PROJECT'] = 'nba-predictions-dev'
# +
import os
from google.cloud import bigquery
## Setup
my_project_id = os.environ.get('GCP_PROJECT')
client = bigquery.Client(project=my_project_id)
raw_game_data_table = f'{my_project_id}.nba.raw_basketballreference_game'
raw_player_data_table = f'{my_project_id}.nba.raw_basketballreference_playerbox'
games_to_load_to_model_view = f'{my_project_id}.nba.games_to_load_to_model'
model_table_name = f'{my_project_id}.nba.model_game'
# Enter columns to created linearly weighted moving average calculations and number of periods to use
wma_columns = ['pace',
'efg_pct', 'tov_pct', 'ft_rate', 'off_rtg',
'opponent_efg_pct', 'opponent_tov_pct', 'opponent_ft_rate',
'opponent_off_rtg', 'starter_minutes_played_proportion',
'bench_plus_minus', 'opponnent_starter_minutes_played_proportion',
'opponent_bench_plus_minus']
W = 10
## Load tables to dataframe
game_bq = client.query('''
SELECT game_date, visitor_team_name, visitor_pts, home_team_name, home_pts, games.game_key,
a_ff_pace, a_ff_efg_pct, a_ff_tov_pct, a_ff_orb_pct, a_ff_ft_rate, a_ff_off_rtg,
h_ff_pace, h_ff_efg_pct, h_ff_tov_pct,h_ff_orb_pct, h_ff_ft_rate, h_ff_off_rtg
,NEEDS_TO_LOAD_TO_MODEL
FROM `%s` as games
INNER JOIN `%s` as load ON games.game_key = load.game_key
''' % (raw_game_data_table,games_to_load_to_model_view)).to_dataframe()
# -
if game_bq.empty:
print('empty')
# +
import pandas as pd
import numpy as np
from google.cloud import firestore
from google.cloud import bigquery
def predicted_pointspread(form_dict):
try:
teams = {'HomeTeam':'Atlanta Hawks', 'AwayTeam':'Milwaukee Bucks'}
df = pd.DataFrame(teams, index=[0])
db = firestore.Client()
home_team_data = db.collection('team_model_data').document(df['HomeTeam'][0]).get().to_dict()
away_team_data = db.collection('team_model_data').document(df['AwayTeam'][0]).get().to_dict()
query = 'SELECT predicted_spread FROM ML.PREDICT(MODEL `nba.automl_regression`, (SELECT 1 as is_home_team,'
for key in home_team_data.keys():
if key == 'streak_counter_is_win':
query = query + f'{home_team_data[key]} as incoming_is_win_streak,'
elif key not in ['season', 'game_date']:
query = query + f'{home_team_data[key]} as incoming_{key},'
for key in away_team_data.keys():
if key not in ['season', 'game_date', 'streak_counter_is_win']:
query = query + f'{away_team_data[key]} as incoming_opponent_{key},'
bq_query = query[:-1] + '))'
client = bigquery.Client()
game_bq = client.query('''
%s
''' % (bq_query))
game = game_bq.to_dataframe()
pointspread = round(game['predicted_spread'][0],1)
if pointspread > 0:
winner = df['HomeTeam'][0]
loser = df['AwayTeam'][0]
else:
winner = df['AwayTeam'][0]
loser = df['HomeTeam'][0]
return f'I predict the {winner} will beat the {loser} by {abs(pointspread)} points!'
except Exception as e:
raise ValueError('Sorry, there was a problem processing the data entered... Please try again with different teams') from e
# -
pointspread = np.random.randint(-1000,1000)
pointspread = 1 if pointspread == 0 else pointspread
if pointspread > 0:
winner = df['HomeTeam'][0]
loser = df['AwayTeam'][0]
else:
winner = df['AwayTeam'][0]
loser = df['HomeTeam'][0]
return f'I predict the {winner} will beat the {loser} by {abs(pointspread)} points!'
except Exception as e:
raise ValueError('Sorry, there was a problem processing the data entered... Please go back and double check your entries, thanks!') from e
# +
from flask import Flask, render_template, request#, url_for, redirect
from google.cloud import storage
from google.cloud import firestore
import json
import os
# -
# %env CLOUD_STORAGE_BUCKET nba-predictions-dev.appspot.com
os.environ.get("CLOUD_STORAGE_BUCKET")
client = storage.Client()
bucket_name = os.environ.get("CLOUD_STORAGE_BUCKET") #'nba-predictions-dev.appspot.com'
bucket = client.bucket(bucket_name)
blob = bucket.blob('static/monday.json').download_as_string()
data = json.loads(blob.decode("utf-8").replace("'",'"'))
home_teams = list(data['home_team_name'].values())
away_teams = list(data['visitor_team_name'].values())
game_day = list(data['game_day'].values())
game_date = list(data['game_date'].values())
game_start_time = list(data['game_start_time'].values())
games = []
for i in range(len(home_teams)-1):
games.append(f'{away_teams[i]} vs. {home_teams[i]} at {game_start_time[i]} on {game_day[i]}, {game_date[i]}')
return render_template('UpcomingGames.html', games=games, home_teams = home_teams, away_teams=away_teams, game_day=game_day, game_date = game_date, game_start_time = game_start_time)
import requests
from datetime import date, timedelta
request = {"StartDate":"2015-02-01"}
startDate = datetime.strptime(request['StartDate'], '%Y-%m-%d').date()
endDate_max = (datetime.now() + timedelta(days=-1)).date()
endDate = ''
url = 'https://us-central1-nba-predictions-dev.cloudfunctions.net/nba_basketball_reference_scraper'
#while endDate != endDate_max:
year, month= divmod(startDate.month+1, 12)
if month == 0:
month = 12
year = year -1
endDate = date(startDate.year + year, month, 1) + timedelta(days=-1)
if endDate >= endDate_max:
endDate = endDate_max
endDateformat = endDate.strftime('%Y-%m-%d')
startDateformat = startDate.strftime('%Y-%m-%d')
data = {"StartDate":startDateformat,"EndDate":endDateformat}
print(data)
# response = requests.post(url, data)
# print(response)
year, month= divmod(startDate.month+1, 12)
if month == 0:
month = 12
year = year -1
startDate = date(startDate.year + year, month, 1)
response = requests.post(url, data)
print(response)
startDate
form_dict = {'HomeTeam':'Atlanta Hawks', 'AwayTeam':'Boston Celtics'}
# +
import pandas as pd
import numpy as np
def predicted_pointspread(form_dict):
try:
df = pd.DataFrame(form_dict, index=[0])
pointspread = np.random.randint(-40,40)
pointspread = 1 if pointspread == 0 else pointspread
if pointspread > 0:
winner = df['HomeTeam'][0]
loser = df['AwayTeam'][0]
else:
winner = df['AwayTeam'][0]
loser = df['HomeTeam'][0]
return f'I predict the {winner} will beat the {loser} by {abs(pointspread)} points'
except:
return 'Sorry, there was a problem processing the data entered... Please go back and double check your entries, thanks!'
# -
predicted_pointspread(form_dict)
df = pd.DataFrame(form_dict, index=[0])
df['HomeTeam'][0]
# +
import requests
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
import pandas as pd
from google.cloud import bigquery
import pyarrow
request = {'StartDate': '2019-10-01'}
# +
def get_max_game_date():
client = bigquery.Client(project=projcet)
QUERY = (
"SELECT date_add(max(game_date), INTERVAL 1 day) as max_game_date FROM `nba.raw_basketballreference_game`"
)
query_job = client.query(QUERY) # API request
rows = query_job.result() # Waits for query to finish
for result in rows:
load_date = result.max_game_date
return load_date
def get_game_players(soup, player_game_data, id_string, game_key, stat_type, h_or_a, team_abbrev, game_date):
rows = soup.find('table', id=id_string).find('tbody').find_all('tr')
cnt = 1
#print(str(rows))
for player in rows:
game_players = {}
game_players['game_key'] = game_key
game_players['game_date'] = game_date
game_players['h_or_a'] = h_or_a
game_players['team_abbrev'] = team_abbrev
game_players['stat_period'] = stat_type
game_players['player'] = player.find('th',{"data-stat": "player"}).text
#print(game_players['player'])
player_node = player.find('th',{"data-stat": "player"})
# Ignore Header Line
if game_players['player'] != 'Reserves' and player_node.has_attr('data-append-csv'):
a = player.find('th',{"data-stat": "player"}).find('a',href=True)
if a is not None:
game_players['player_link'] = a['href']
else:
game_players['player_link'] = None
game_players['player_key'] = player_node['data-append-csv']
game_players['reason'] = get_text(player.find('td',{"data-stat": "reason"}))
game_players['mp'] = get_text(player.find('td',{"data-stat": "mp"}))
game_players['fg'] = get_text(player.find('td',{"data-stat": "fg"}))
game_players['fga'] = get_text(player.find('td',{"data-stat": "fga"}))
game_players['fg_pct'] = get_text(player.find('td',{"data-stat": "fg_pct"}))
game_players['fg3'] = get_text(player.find('td',{"data-stat": "fg3"}))
game_players['fg3a'] = get_text(player.find('td',{"data-stat": "fg3a"}))
game_players['fg3_pct'] = get_text(player.find('td',{"data-stat": "fg3_pct"}))
game_players['ft'] = get_text(player.find('td',{"data-stat": "ft"}))
game_players['fta'] = get_text(player.find('td',{"data-stat": "fta"}))
game_players['ft_pct'] = get_text(player.find('td',{"data-stat": "ft_pct"}))
game_players['orb'] = get_text(player.find('td',{"data-stat": "orb"}))
game_players['drb'] = get_text(player.find('td',{"data-stat": "drb"}))
game_players['trb'] = get_text(player.find('td',{"data-stat": "trb"}))
game_players['ast'] = get_text(player.find('td',{"data-stat": "ast"}))
game_players['stl'] = get_text(player.find('td',{"data-stat": "stl"}))
game_players['blk'] = get_text(player.find('td',{"data-stat": "blk"}))
game_players['tov'] = get_text(player.find('td',{"data-stat": "tov"}))
game_players['pf'] = get_text(player.find('td',{"data-stat": "pf"}))
game_players['pts'] = get_text(player.find('td',{"data-stat": "pts"}))
game_players['plus_minus'] = get_text(player.find('td',{"data-stat": "plus_minus"}))
game_players['player_stat_key'] = game_players['game_key'] + '|' + game_players['player_key'] + '|' + game_players['stat_period']
if cnt <= 5:
game_players['starter_flag'] = True
else:
game_players['starter_flag'] = False
game_players['load_datetime'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#print(game_players)
player_game_data.append(game_players)
cnt += 1
return player_game_data
def get_text(stat):
if stat is not None:
if stat.text != "":
txt = stat.text
else:
txt = None
else:
txt = None
return txt
# -
##########################################################################
# Input Data Check
##########################################################################
try:
request_json = request
if request_json and 'StartDate' in request_json:
startDate = datetime.strptime(request_json['StartDate'], '%Y-%m-%d').date()
else:
startDate = get_max_game_date()
if request_json and 'EndDate' in request_json:
endDate = datetime.strptime(request_json['EndDate'], '%Y-%m-%d').date()
else:
endDate = (datetime.now() + timedelta(days=-1)).date()
except Exception as e:
raise ValueError("Start & End dates must be in YYYY-MM-DD format") from e
# Distinct list of Months between start and end date
delta = endDate - startDate # as timedelta
if delta.days < 0:
raise ValueError("Start Date can't be before End Date")
##########################################################################
# Get Distinct Months for schedule to scrape
##########################################################################
yearmonths = []
for i in range(delta.days + 1):
r = {}
day = startDate + timedelta(days=i)
r['monthname'] = day.strftime('%B').lower()
if day.month > 9:
r['year'] = day.year + 1
else:
r['year'] = day.year
if r not in yearmonths:
yearmonths.append(r)
#print(yearmonths)
yearmonths
# +
##########################################################################
# Scrape Schedule
##########################################################################
player_game_rows_loaded = 0
game_rows_loaded = 0
schedule = []
v = yearmonths[12]
year = str(v['year'])
month = v['monthname']
if month == 'october' and (year == '2020' or year == '2021'):
url = f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}-{v["year"] - 1}.html'
else:
url = f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}.html'
print(url)
# +
html = requests.get(url)
if html.ok:
soup = BeautifulSoup(html.content, 'html.parser')
else:
print(f'No data for {month} {year} because enountered error code {html.status_code}')
#continue
rows = soup.find('table', id="schedule").find('tbody').find_all('tr')
#print(rows)
# -
soup
for row in rows:
game_date_node = row.find('th',{"data-stat": "date_game"})
if game_date_node is not None:
game_date = datetime.strptime(game_date_node.text, '%a, %b %d, %Y').date()
if game_date >= startDate and game_date <= endDate:
#cells = row.find_all(['td', 'th'])
r = {}
#r.setdefault(game_start_time, []).append(value)
v1 = row.find('th',{"data-stat": "date_game"})
#r[k1] = v1.text
r['game_date'] = datetime.strptime(v1.text, '%a, %b %d, %Y').strftime("%Y-%m-%d")
v2 = row.find('td',{"data-stat": "game_start_time"})
r['game_start_time'] = v2.text if v2 else None
v3 = row.find('td',{"data-stat": "visitor_team_name"})
r['visitor_team_name'] = v3.text
r['away_abbr'] = v3['csk'].split('.')[0]
v4 = row.find('td',{"data-stat": "visitor_pts"})
r['visitor_pts'] = v4.text if v4 else None
v5 = row.find('td',{"data-stat": "home_team_name"})
r['home_team_name'] = v5.text
r['home_abbr'] = v5['csk'].split('.')[0]
v6 = row.find('td',{"data-stat": "home_pts"})
r['home_pts'] = v6.text if v6 else None
v7 = row.find('td',{"data-stat": "box_score_text"}).find('a',href=True)
r['box_score_url'] = v7['href'] if v7 else None
v8 = row.find('td',{"data-stat": "attendance"})
r['attendance'] = v8.text if v8 else None
v9 = row.find('td',{"data-stat": "overtimes"})
r['overtimes'] = v9.text if v9 else None
if r['game_start_time']:
v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr'] + r['game_start_time'].replace(':','')
else:
v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr']
r['game_key'] = v12 if v12 else None
schedule.append(r)
print(schedule)
# +
##########################################################################
# Scrape Games in Schedule
##########################################################################
games_data = []
player_game_data = []
for game in schedule:
if 'box_score_url' in game and game['box_score_url'] != "" and game['box_score_url'] is not None:
url = "https://www.basketball-reference.com" + game['box_score_url']
#print(url)
r = requests.get(url)
#print('here2')
soup = BeautifulSoup(str(r.content).replace("<!--","").replace('-->',''), 'html.parser')
##############################################
# Line Score
rows = soup.find('table', id="line_score").find_all('tr')
# Away Line Score
r_num = 1
for away in rows[2].find_all('td'):
test_strong = away.find('strong') # Strong represents the total score ... ignore
if test_strong is None and r_num < 7:
k='a_g' + str(r_num) + '_score'
game[k] = away.text if away.text != "" else None
r_num+=1
# Home Line Score
r_num = 1
for home in rows[3].find_all('td'):
test_strong = home.find('strong') # Strong represents the total score ... ignore
if test_strong is None and r_num < 7:
k='h_g' + str(r_num) + '_score'
game[k] = home.text if home.text != "" else None
r_num+=1
##############################################
# Four Facts
rows = soup.find('table', id="four_factors").find_all('tr')
# Away Four Factors
game['a_ff_pace'] = rows[2].find('td',{"data-stat": "pace"}).text
game['a_ff_efg_pct'] = rows[2].find('td',{"data-stat": "efg_pct"}).text
game['a_ff_tov_pct'] = rows[2].find('td',{"data-stat": "tov_pct"}).text
game['a_ff_orb_pct'] = rows[2].find('td',{"data-stat": "orb_pct"}).text
game['a_ff_ft_rate'] = rows[2].find('td',{"data-stat": "ft_rate"}).text
game['a_ff_off_rtg'] = rows[2].find('td',{"data-stat": "off_rtg"}).text
# Home Four Factors
game['h_ff_pace'] = rows[3].find('td',{"data-stat": "pace"}).text
game['h_ff_efg_pct'] = rows[3].find('td',{"data-stat": "efg_pct"}).text
game['h_ff_tov_pct'] = rows[3].find('td',{"data-stat": "tov_pct"}).text
game['h_ff_orb_pct'] = rows[3].find('td',{"data-stat": "orb_pct"}).text
game['h_ff_ft_rate'] = rows[3].find('td',{"data-stat": "ft_rate"}).text
game['h_ff_off_rtg'] = rows[3].find('td',{"data-stat": "off_rtg"}).text
game['load_datetime'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#now = datetime.now() # current date and time
#now.strftime("%m/%d/%Y, %H:%M:%S")
#player_game_data = []
game_date = game['game_date']
##############################################
# Game Box - Home
#box-WAS-q1-basic
stat_type = "game"
h_or_a = "h"
team_abbrev = game['home_abbr']
id_string = "box-" + game['home_abbr'] + "-" + stat_type + "-basic"
player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
##############################################
# Game Box - Away
#box-WAS-q1-basic
stat_type = "game"
h_or_a = "a"
team_abbrev = game['away_abbr']
id_string = "box-" + game['away_abbr'] + "-" + stat_type + "-basic"
player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q1 Box - Home
# stat_type = "q1"
# h_or_a = "h"
# team_abbrev = game['home_abbr']
# id_string = "box-" + game['home_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q1 Box - Away
# stat_type = "q1"
# h_or_a = "a"
# team_abbrev = game['away_abbr']
# id_string = "box-" + game['away_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q2 Box - Home
# stat_type = "q2"
# h_or_a = "h"
# team_abbrev = game['home_abbr']
# id_string = "box-" + game['home_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q2 Box - Away
# stat_type = "q2"
# h_or_a = "a"
# team_abbrev = game['away_abbr']
# id_string = "box-" + game['away_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q3 Box - Home
# stat_type = "q3"
# h_or_a = "h"
# team_abbrev = game['home_abbr']
# id_string = "box-" + game['home_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q3 Box - Away
# stat_type = "q3"
# h_or_a = "a"
# team_abbrev = game['away_abbr']
# id_string = "box-" + game['away_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q4 Box - Home
# stat_type = "q4"
# h_or_a = "h"
# team_abbrev = game['home_abbr']
# id_string = "box-" + game['home_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
# ##############################################
# # Q4 Box - Away
# stat_type = "q4"
# h_or_a = "a"
# team_abbrev = game['away_abbr']
# id_string = "box-" + game['away_abbr'] + "-" + stat_type + "-basic"
# player_game_data = get_game_players(soup, player_game_data, id_string, game['game_key'], stat_type, h_or_a, team_abbrev, game_date)
games_data.append(game)
# -
pandas_games_data = pd.DataFrame(games_data)
pandas_games_data['game_start_time']
##########################################################################
# Check for empty game data
##########################################################################
# Continue to next month if there were no games in the month starting at the start date
if not games_data:
continue
project = 'nba-predictions-dev'
##########################################################################
# Save to BigQuery
##########################################################################
# print(player_game_data)
# print(games_data)
# Config
client = bigquery.Client(project=project)
print(f'Loading data for {month} {year}')
#player game data
pandas_player_game_data = pd.DataFrame(player_game_data)
pandas_player_game_data['game_date'] = pandas_player_game_data['game_date'].astype('datetime64[ns]')
pandas_player_game_data['load_datetime'] = pandas_player_game_data['load_datetime'].astype('datetime64[ns]')
job_config = bigquery.LoadJobConfig()
job_config.autodetect='True'
job_config.create_disposition = 'CREATE_IF_NEEDED'
job_config.write_disposition = 'WRITE_APPEND'
## Set schema for specific columns where more information is needed (e.g. not NULLABLE or specific date/time)
job_config.schema = [
bigquery.SchemaField('player_stat_key','STRING', 'REQUIRED'),
bigquery.SchemaField('game_date','DATE'),
bigquery.SchemaField('load_datetime','TIMESTAMP'),
bigquery.SchemaField('starter_flag','BOOL')
]
job_config.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field="game_date")
job_player = client.load_table_from_dataframe(pandas_player_game_data, 'nba.raw_basketballreference_playerbox' \
, job_config=job_config, project=project)
player_result = job_player.result()
player_message = (
f'Job ID: {player_result.job_id} '
f'was started {player_result.started} '
f'and ended {player_result.ended} '
f'loading {player_result.output_rows} row(s) '
f'to {player_result.destination}')
print(player_message)
player_game_rows_loaded = player_game_rows_loaded + player_result.output_rows
#game data
pandas_games_data = pd.DataFrame(games_data)
pandas_games_data['game_date'] = pandas_games_data['game_date'].astype('datetime64[ns]')
pandas_games_data['load_datetime'] = pandas_games_data['load_datetime'].astype('datetime64[ns]')
job_config = bigquery.LoadJobConfig()
job_config.autodetect='True'
job_config.create_disposition = 'CREATE_IF_NEEDED'
job_config.write_disposition = 'WRITE_APPEND'
## Set schema for specific columns where more information is needed (e.g. not NULLABLE or specific date/time)
job_config.schema = [
bigquery.SchemaField('game_key','STRING', 'REQUIRED'),
bigquery.SchemaField('game_date','STRING', 'REQUIRED'),
bigquery.SchemaField('home_team_name','STRING', 'REQUIRED'),
bigquery.SchemaField('home_abbr','STRING', 'REQUIRED'),
bigquery.SchemaField('visitor_team_name','STRING', 'REQUIRED'),
bigquery.SchemaField('away_abbr','STRING', 'REQUIRED'),
bigquery.SchemaField('game_date','DATE'),
bigquery.SchemaField('load_datetime','TIMESTAMP'),
]
job_config.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field="game_date")
job_game = client.load_table_from_dataframe(pandas_games_data, 'nba.raw_basketballreference_game', \
job_config=job_config, project=project)
game_result = job_game.result()
game_message = (
f'Job ID: {game_result.job_id} '
f'was started {game_result.started} '
f'and ended {game_result.ended} '
f'loading {game_result.output_rows} row(s) '
f'to {game_result.destination}')
print(game_message)
game_rows_loaded = game_rows_loaded + game_result.output_rows
print(f'Successfully loaded {player_game_rows_loaded} row(s) to raw_basketballreference_playerbox and {game_rows_loaded} to raw_basketballreference_game')
# +
import requests
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
import pandas as pd
from google.cloud import storage
def get_games(startDate,endDate):
##########################################################################
# Get Distinct Months for schedule to scrape
##########################################################################
delta = endDate - startDate
yearmonths = []
for i in range(delta.days + 1):
r = {}
day = startDate + timedelta(days=i)
r['monthname'] = day.strftime('%B').lower()
if day.month > 9:
r['year'] = day.year + 1
else:
r['year'] = day.year
if r not in yearmonths:
yearmonths.append(r)
schedule = []
for v in yearmonths:
year = str(v['year'])
month = v['monthname']
url = 'https://www.basketball-reference.com/leagues/NBA_' + year + '_games-' + month + '.html'
#print(url)
html = requests.get(url)
if html.ok:
soup = BeautifulSoup(html.content, 'html.parser')
else:
print(f'No data for {month} {year} because enountered error code {html.status_code}')
continue
rows = soup.find('table', id="schedule").find('tbody').find_all('tr')
for row in rows:
game_date_node = row.find('th',{"data-stat": "date_game"})
if game_date_node is not None:
game_date = datetime.strptime(game_date_node.text, '%a, %b %d, %Y').date()
if game_date >= startDate and game_date <= endDate:
#cells = row.find_all(['td', 'th'])
r = {}
#r.setdefault(game_start_time, []).append(value)
v1 = row.find('th',{"data-stat": "date_game"})
#r[k1] = v1.text
r['game_date'] = datetime.strptime(v1.text, '%a, %b %d, %Y').strftime("%Y-%m-%d")
r['game_day'] = datetime.strptime(v1.text, '%a, %b %d, %Y').strftime("%A")
v2 = row.find('td',{"data-stat": "game_start_time"})
r['game_start_time'] = v2.text if v2 else None
v3 = row.find('td',{"data-stat": "visitor_team_name"})
r['visitor_team_name'] = v3.text
r['away_abbr'] = v3['csk'].split('.')[0]
v4 = row.find('td',{"data-stat": "home_team_name"})
r['home_team_name'] = v4.text
r['home_abbr'] = v4['csk'].split('.')[0]
if r['game_start_time']:
v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr'] + r['game_start_time'].replace(':','')
else:
v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr']
r['game_key'] = v12 if v12 else None
schedule.append(r)
return schedule
def write_to_bucket(request):
try:
if type(request) == 'dict':
request_json = request
else:
request_json = request.get_json()
if request_json and 'ScheduleDays' in request_json:
schedule_days = request_json['ScheduleDays']
else:
schedule_days = 14
except Exception as e:
raise ValueError("Invalid input. Please provide ScheduleDays as an integer") from e
startDate = (datetime.now()).date()
endDate = (startDate + timedelta(days=schedule_days))
schedule = get_games(startDate,endDate)
game_date = pd.DataFrame(schedule)
client = storage.Client()
bucket_name = os.environ.get("CLOUD_STORAGE_BUCKET")
bucket = client.bucket(bucket_name)
bucket.blob('static/upcoming.json').upload_from_string(game_date.to_json(), 'text/json')
# -
schedule
game_date
from google.cloud import storage
from io import StringIO # if going with no saving csv file
# +
from google.cloud import storage
# -
bucket.blob('static/monday.csv').upload_from_string(game_date[game_date['game_day']=='Monday'].to_csv(), 'text/csv')
bucket.blob('static/tuesday.csv').upload_from_string(game_date[game_date['game_day']=='Tuesday'].to_csv(), 'text/csv')
bucket.blob('static/wednesday.csv').upload_from_string(game_date[game_date['game_day']=='Wednesday'].to_csv(), 'text/csv')
bucket.blob('static/thursday.csv').upload_from_string(game_date[game_date['game_day']=='Thursday'].to_csv(), 'text/csv')
bucket.blob('static/friday.csv').upload_from_string(game_date[game_date['game_day']=='Friday'].to_csv(), 'text/csv')
bucket.blob('static/saturday.csv').upload_from_string(game_date[game_date['game_day']=='Saturday'].to_csv(), 'text/csv')
bucket.blob('static/sunday.csv').upload_from_string(game_date[game_date['game_day']=='Sunday'].to_csv(), 'text/csv')
game_date[game_date['game_day']=='Monday']
bucket.blob('static/monday.json').upload_from_string(game_date[game_date['game_day']=='Monday'].to_json(), 'text/json')
bucket.blob('static/tuesday.json').upload_from_string(game_date[game_date['game_day']=='Tuesday'].to_json(), 'text/json')
bucket.blob('static/wednesday.json').upload_from_string(game_date[game_date['game_day']=='Wednesday'].to_json(), 'text/json')
bucket.blob('static/thursday.json').upload_from_string(game_date[game_date['game_day']=='Thursday'].to_json(), 'text/json')
bucket.blob('static/friday.json').upload_from_string(game_date[game_date['game_day']=='Friday'].to_json(), 'text/json')
bucket.blob('static/saturday.json').upload_from_string(game_date[game_date['game_day']=='Saturday'].to_json(), 'text/json')
bucket.blob('static/sunday.json').upload_from_string(game_date[game_date['game_day']=='Sunday'].to_json(), 'text/json')
bucket = client.bucket('nba-predictions-dev.appspot.com')
d_cpt = StringIO(bucket.blob('static/monday.json').download_as_string())
df = pd.read_csv(d_cpt)
df
data = json.loads(blob.decode("utf-8").replace("'",'"'))
home_teams = list(data['home_team_name'].values())
away_teams = list(data['visitor_team_name'].values())
game_day = list(data['game_day'].values())
game_date = list(data['game_date'].values())
game_start_time = list(data['game_start_time'].values())
games = []
for i in range(len(home_teams)-1):
games.append(f'{away_teams[i]} vs. {home_teams[i]} at {game_start_time[i]} on {game_day[i]}, {game_date[i]}')
games
home_teams[6]
range(len(home_teams)-1)
bucket
blob = bucket.blob('static/monday.json').download_as_string()
blob
import json
dict = json.loads(blob.decode("utf-8").replace("'",'"'))
dict
list(dict['home_team_name'].values())
list(dict['visitor_team_name'].values())
blob.get_json()
blob.download_as_string()
blob2 = bucket.blob('monday.json')
blob2
W = 10
type(W)
| notebooks/Testing Sandbox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 含并行连结的网络(GoogLeNet)
# :label:`sec_googlenet`
#
# 在2014年的ImageNet图像识别挑战赛中,一个名叫*GoogLeNet* :cite:`Szegedy.Liu.Jia.ea.2015` 的网络结构大放异彩。
# GoogLeNet吸收了NiN中串联网络的思想,并在此基础上做了改进。
# 这篇论文的一个重点是解决了什么样大小的卷积核最合适的问题。
# 毕竟,以前流行的网络使用小到 $1 \times 1$ ,大到 $11 \times 11$ 的卷积核。
# 本文的一个观点是,有时使用不同大小的卷积核组合是有利的。
# 在本节中,我们将介绍一个稍微简化的GoogLeNet版本:我们省略了一些为稳定训练而添加的特殊特性,但是现在有了更好的训练算法,这些特性不是必要的。
#
#
# ## (**Inception块**)
#
# 在GoogLeNet中,基本的卷积块被称为*Inception块*(Inception block)。这很可能得名于电影《盗梦空间》(Inception),因为电影中的一句话“我们需要走得更深”(“We need to go deeper”)。
#
# 
# :label:`fig_inception`
#
# 如 :numref:`fig_inception` 所示,Inception块由四条并行路径组成。
# 前三条路径使用窗口大小为 $1\times 1$、$3\times 3$ 和 $5\times 5$ 的卷积层,从不同空间大小中提取信息。
# 中间的两条路径在输入上执行 $1\times 1$ 卷积,以减少通道数,从而降低模型的复杂性。
# 第四条路径使用 $3\times 3$ 最大汇聚层,然后使用 $1\times 1$ 卷积层来改变通道数。
# 这四条路径都使用合适的填充来使输入与输出的高和宽一致,最后我们将每条线路的输出在通道维度上连结,并构成Inception块的输出。在Inception块中,通常调整的超参数是每层输出通道的数量。
#
# + origin_pos=2 tab=["pytorch"]
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
class Inception(nn.Module):
# `c1`--`c4` 是每条路径的输出通道数
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inception, self).__init__(**kwargs)
# 线路1,单1 x 1卷积层
self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
# 线路2,1 x 1卷积层后接3 x 3卷积层
self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
# 线路3,1 x 1卷积层后接5 x 5卷积层
self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
# 线路4,3 x 3最大汇聚层后接1 x 1卷积层
self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
# 在通道维度上连结输出
return torch.cat((p1, p2, p3, p4), dim=1)
# + [markdown] origin_pos=4
# 那么为什么GoogLeNet这个网络如此有效呢?
# 首先我们考虑一下滤波器(filter)的组合,它们可以用各种滤波器尺寸探索图像,这意味着不同大小的滤波器可以有效地识别不同范围的图像细节。
# 同时,我们可以为不同的滤波器分配不同数量的参数。
#
#
# ## [**GoogLeNet模型**]
#
# 如 :numref:`fig_inception_full` 所示,GoogLeNet 一共使用 9 个Inception块和全局平均汇聚层的堆叠来生成其估计值。Inception块之间的最大汇聚层可降低维度。
# 第一个模块类似于 AlexNet 和 LeNet,Inception块的栈从VGG继承,全局平均汇聚层避免了在最后使用全连接层。
#
# 
# :label:`fig_inception_full`
#
# 现在,我们逐一实现GoogLeNet的每个模块。第一个模块使用 64 个通道、 $7\times 7$ 卷积层。
#
# + origin_pos=6 tab=["pytorch"]
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# + [markdown] origin_pos=8
# 第二个模块使用两个卷积层:第一个卷积层是 64个通道、 $1\times 1$ 卷积层;第二个卷积层使用将通道数量增加三倍的 $3\times 3$ 卷积层。
# 这对应于 Inception 块中的第二条路径。
#
# + origin_pos=10 tab=["pytorch"]
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),
nn.ReLU(),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# + [markdown] origin_pos=12
# 第三个模块串联两个完整的Inception块。
# 第一个 Inception 块的输出通道数为 $64+128+32+32=256$,四个路径之间的输出通道数量比为 $64:128:32:32=2:4:1:1$。
# 第二个和第三个路径首先将输入通道的数量分别减少到 $96/192=1/2$ 和 $16/192=1/12$,然后连接第二个卷积层。第二个 Inception 块的输出通道数增加到 $128+192+96+64=480$,四个路径之间的输出通道数量比为 $128:192:96:64 = 4:6:3:2$。
# 第二条和第三条路径首先将输入通道的数量分别减少到 $128/256=1/2$ 和 $32/256=1/8$。
#
# + origin_pos=14 tab=["pytorch"]
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
Inception(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# + [markdown] origin_pos=16
# 第四模块更加复杂,
# 它串联了5个Inception块,其输出通道数分别是 $192+208+48+64=512$ 、 $160+224+64+64=512$ 、 $128+256+64+64=512$ 、 $112+288+64+64=528$ 和 $256+320+128+128=832$ 。
# 这些路径的通道数分配和第三模块中的类似,首先是含 $3×3$ 卷积层的第二条路径输出最多通道,其次是仅含 $1×1$ 卷积层的第一条路径,之后是含 $5×5$ 卷积层的第三条路径和含 $3×3$ 最大汇聚层的第四条路径。
# 其中第二、第三条路径都会先按比例减小通道数。
# 这些比例在各个 Inception 块中都略有不同。
#
# + origin_pos=18 tab=["pytorch"]
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
Inception(512, 160, (112, 224), (24, 64), 64),
Inception(512, 128, (128, 256), (24, 64), 64),
Inception(512, 112, (144, 288), (32, 64), 64),
Inception(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# + [markdown] origin_pos=20
# 第五模块包含输出通道数为 $256+320+128+128=832$ 和 $384+384+128+128=1024$ 的两个Inception块。
# 其中每条路径通道数的分配思路和第三、第四模块中的一致,只是在具体数值上有所不同。
# 需要注意的是,第五模块的后面紧跟输出层,该模块同 NiN 一样使用全局平均汇聚层,将每个通道的高和宽变成1。
# 最后我们将输出变成二维数组,再接上一个输出个数为标签类别数的全连接层。
#
# + origin_pos=22 tab=["pytorch"]
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
Inception(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten())
net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
# + [markdown] origin_pos=24
# GoogLeNet 模型的计算复杂,而且不如 VGG 那样便于修改通道数。
# [**为了使Fashion-MNIST上的训练短小精悍,我们将输入的高和宽从224降到96**],这简化了计算。下面演示各个模块输出的形状变化。
#
# + origin_pos=26 tab=["pytorch"]
X = torch.rand(size=(1, 1, 96, 96))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape:\t', X.shape)
# + [markdown] origin_pos=28
# ## [**训练模型**]
#
# 和以前一样,我们使用 Fashion-MNIST 数据集来训练我们的模型。在训练之前,我们将图片转换为 $96 \times 96$ 分辨率。
#
# + origin_pos=29 tab=["pytorch"]
lr, num_epochs, batch_size = 0.1, 10, 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
# + [markdown] origin_pos=30
# ## 小结
#
# * Inception 块相当于一个有4条路径的子网络。它通过不同窗口形状的卷积层和最大汇聚层来并行抽取信息,并使用 $1×1$ 卷积层减少每像素级别上的通道维数从而降低模型复杂度。
# * GoogLeNet将多个设计精细的Inception块与其他层(卷积层、全连接层)串联起来。其中Inception块的通道数分配之比是在 ImageNet 数据集上通过大量的实验得来的。
# * GoogLeNet 和它的后继者们一度是 ImageNet 上最有效的模型之一:它以较低的计算复杂度提供了类似的测试精度。
#
#
# ## 练习
#
# 1. GoogLeNet 有数个后续版本。尝试实现并运行它们,然后观察实验结果。这些后续版本包括:
# * 添加批量归一化层 :cite:`Ioffe.Szegedy.2015`(batch normalization),在 :numref:`sec_batch_norm`中将介绍)。
# * 对 Inception 模块进行调整。
# * 使用标签平滑(label smoothing)进行模型正则化 :cite:`Szegedy.Vanhoucke.Ioffe.ea.2016`。
# * 加入残差连接 :cite:`Szegedy.Ioffe.Vanhoucke.ea.2017` ,( :numref:`sec_resnet` 将介绍)。
# 1. 使用 GoogLeNet 的最小图像大小是多少?
# 1. 将 AlexNet、VGG 和 NiN 的模型参数大小与 GoogLeNet 进行比较。后两个网络结构是如何显著减少模型参数大小的?
#
# + [markdown] origin_pos=32 tab=["pytorch"]
# [Discussions](https://discuss.d2l.ai/t/1871)
#
| d2l/chapter_convolutional-modern/googlenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ETL_EDA
# This file records the process of acquiring raw data, traforming them, and loading them into a MongoDB.
import requests
import json
import numpy as np
import pandas as pd
import logging
import utils
import sched
import time
import pymongo
# ## 1. Raw Data from NASA
# The [dataset](https://api.nasa.gov/assets/insight/InSight%20Weather%20API%20Documentation.pdf) is a continuously updated json file. It contains data of Mars weather in the past seven days. It can be retrieved simply by making `requests` without parameters. Data is updated every Martian day, which is 37 minutes longer than earth day.
key = "<KEY>"
requested = requests.get(f"https://api.nasa.gov/insight_weather/?api_key={key}&feedtype=json&ver=1.0").text # Download data
requested
# ## 2. Raw Data to Documents/Dicts
# Using json and pandas package, we can easily transform downloaded json files into workable dataframe. We take sol days as keys, extract min, max temperature, atmosphere pressure and wind directions as values for each day. But not all data is valid. To deal with this, we check the validity value of each subject for each day, then replace missing values with last day's corresponding value. Now the data can be easily converted to a list of dicts which is what we want for the MongoDB.
# +
requested = json.loads(requested) # Transform data into dictionary
main_keys = requested['sol_keys'] # Take sol days as unique keys
def valid_check(subject, index):
"""Check if value is valid"""
return requested['validity_checks'][index][subject]['valid']
# Create a dict to store all data, set default value to -1
df_requested = {"sol_day": [-1], "date": [-1], "min_temp": [-1], "max_temp": [-1], "pressure": [-1], "wind": [-1]}
for i in main_keys:
# Write data into dict
df_requested["sol_day"].append(i)
df_requested["date"].append(requested[i]['Last_UTC'])
# If data is not valid/empty, use the last value
df_requested["min_temp"].append(requested[i]['AT']["mn"] if valid_check('AT', i) else df_requested["min_temp"][-1])
df_requested["max_temp"].append(requested[i]['AT']["mx"] if valid_check('AT', i) else df_requested["max_temp"][-1])
df_requested["pressure"].append(requested[i]['PRE']["av"] if valid_check('PRE', i) else df_requested["pressure"][-1])
df_requested["wind"].append(requested[i]["WD"] if valid_check('WD', i) else df_requested["wind"][-1])
# Convert to dataframe
df_requested = pd.DataFrame(df_requested)
df_requested.drop(0, inplace=True)
data = df_requested.to_dict(orient='records')
# -
df_requested.head()
print(data[0])
# ## 3. Upsert MongoDB
# If we fetch the data frequently, there are lots of duplicate data entry between each run. The de-duplication happens at insertion. The MongoDB API to use is `collection.replace_one(filter=..., replacement=..., upsert=True)`. The statement matches a document in MongoDB with `filter`, replaces it with `replacement` if the document exists or inserts `replacement` into the database if `filter` matches nothing. Credit to [<NAME>](https://github.com/blownhither).
client = pymongo.MongoClient('localhost', 27017)
db = client.get_database("MarsWeather")
collection = db.get_collection("DailyWeather")
update_count = 0
for record in data:
result = collection.replace_one(
filter={'sol_day': record['sol_day']}, # locate the document if exists
replacement=record, # latest document
upsert=True) # update if exists, insert if not
if result.matched_count > 0:
update_count += 1
print(f"rows={df_requested.shape[0]}, update={update_count}, "
f"insert={df_requested.shape[0]-update_count}")
| ETL_EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # AdaBoost Regression
# It focus on simillar method it fits sequence of weak learner on diffrent weighted training data.It starts by predicting original dataset
# and gives equal weighted to each observation.If prediction is incorrect using the first learner,it gives higher weighted to observation
# which have been predicted incorrectly
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn import metrics
from sklearn.metrics import r2_score
# import dataset
dataset = pd.read_csv('/home/webtunix/Desktop/Regression/random.csv')
print(len(dataset))
# Split data into x and y
x = dataset.iloc[:,1:4].values
y = dataset.iloc[:,4].values
# Split training and testing sets
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.3)
# Apply AdaBootRegression model
clf = AdaBoostRegressor()
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
print(pred)
# Accuracy of model
print("Accuracy:",r2_score(y_test,pred))
# Plotting the scatter graph of actual values and predicting values
# +
colors = np.random.rand(72)
#plot target and predicted values
plt.scatter(colors,y_test, c='orange',label='target')
plt.scatter(colors,pred, c='green',label='predicted')
#plot x and y lables
plt.xlabel('x')
plt.ylabel('y')
#plot title
plt.title(' AdaBoostRegressor')
plt.legend()
plt.show()
# -
# # Research Infinite Solutions LLP
# by Research Infinite Solutions (https://www.ris-ai.com//)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
#
#
| Regression_models/adaboast_implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression: Inverse Matrix Method
#
# This script explores how to accomplish linear regression with TensorFlow using the matrix inverse.
#
# Given the system $ A \cdot x = y $, the matrix inverse way of linear regression (equations for overdetermined systems) is given by solving for x as follows.
#
# $$x = \left( A^{T} \cdot A \right)^{-1} \cdot A^{T} \cdot y$$
#
# As a reminder, here, $x$ is our parameter matrix (vector of length $F+1$, where $F$ is the number of features). Here, $A$, our design matrix takes the form
#
# $$
# A=
# \begin{bmatrix}
# 1 & x_{11} & x_{12} & \dots & x_{1F} \\
# 1 & x_{21} & x_{22} & \dots & x_{2F} \\
# \vdots & \vdots & \vdots & \ddots & \vdots \\
# 1 & x_{n1} & x_{n2} & \dots & x_{nF}
# \end{bmatrix}
# $$
#
# Where $F$ is the number of independent features, and $n$ is the number of points. For an overdetermined system, $n>F$. Remember that one observed point in our system will have length $F+1$ and the $i^{th}$ point will look like
#
# $$point_{i} = \left( y_{i}, x_{i1}, x_{i2}, \dots, x_{iF} \right)$$
#
# For this recipe, we will consider only a 2-dimensional system ($F=1$), so that we can plot the results at the end.
#
# We start by loading the necessary libraries.
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Next we start a graph session.
sess = tf.Session()
# For illustration purposes, we randomly generate data to fit.
#
# The x-values will be a sequence of 100 evenly spaced values between 0 and 100.
#
# The y-values will fit to the line: $y=x$, but we will add normally distributed error according to $N(0,1)$.
# Create the data
x_vals = np.linspace(0, 10, 100)
y_vals = x_vals + np.random.normal(0, 1, 100)
# We create the design matrix, $A$, which will be a column of ones and the x-values.
# Create design matrix
x_vals_column = np.transpose(np.matrix(x_vals))
ones_column = np.transpose(np.matrix(np.repeat(1, 100)))
A = np.column_stack((x_vals_column, ones_column))
# We now create the y-values as a matrix with Numpy.
#
# After we have the y-values and the design matrix, we create tensors from them.
# +
# Format the y matrix
y = np.transpose(np.matrix(y_vals))
# Create tensors
A_tensor = tf.constant(A)
y_tensor = tf.constant(y)
print(A_tensor.shape)
# -
# Now we solve for the parameter matrix with TensorFlow operations.
# Matrix inverse solution
tA_A = tf.matmul(tf.transpose(A_tensor), A_tensor)
tA_A_inv = tf.matrix_inverse(tA_A)
product = tf.matmul(tA_A_inv, tf.transpose(A_tensor))
solution = tf.matmul(product, y_tensor)
# Run the solutions and extract the slope and intercept from the parameter matrix.
# +
solution_eval = sess.run(solution)
# Extract coefficients
slope = solution_eval[0][0]
y_intercept = solution_eval[1][0]
# -
# Now we print the solution we found and create a best fit line.
# +
print('slope: ' + str(slope))
print('y_intercept: ' + str(y_intercept))
# Get best fit line
best_fit = []
for i in x_vals:
best_fit.append(slope*i+y_intercept)
# -
# We use Matplotlib to plot the results.
# Plot the results
plt.plot(x_vals, y_vals, 'o', label='Data')
plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3)
plt.legend(loc='upper left')
plt.show()
| 03_Linear_Regression/01_Using_the_Matrix_Inverse_Method/01_lin_reg_inverse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # %matplotlib inline
# %matplotlib notebook
try:
from importlib import reload
except:
pass
from __future__ import print_function ## Force python3-like printing
import os
from matplotlib import pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import sfdmap
import numpy as np
from astropy.table import Table,Column
import pycoco as pcc
reload(pcc) ## FOR DEV
# reload(pcc.defaults)
# reload(pcc.functions)
# reload(pcc.classes)
# reload(pcc.utils)
import pyCoCo as pccsims
# -
pccsims.__file__
# +
def convert_column_string_encoding(column):
column = Column([pcc.utils.b(x) for x in column.data], name = column.name)
return column
def get_mjdmax_BessellV(sn):
v = sn.lcfit.spline["BessellV"]
mjd_spline = np.arange(np.nanmin(sn.phot.data["BessellV"]["MJD"]),
np.nanmax(sn.phot.data["BessellV"]["MJD"]),
0.001)
w = np.where(v(mjd_spline) == np.nanmax(v(mjd_spline)))
mjdmax = mjd_spline[w]
return mjdmax
# +
filter_path = pcc.defaults._default_filter_dir_path
coco_root_path = pcc.defaults._default_coco_dir_path
reload(pccsims)
coco = pccsims.pyCoCo(pcc.utils.b(filter_path), pcc.utils.b(coco_root_path))
# -
# Load in an templates object
# +
# snname = "SN2007uy"
snname = "SN2013ge"
sn = pcc.classes.SNClass(snname)
phot_path = os.path.join(coco_root_path, "data/lc/", snname + ".dat")
speclist_path = os.path.join(str(coco_root_path),"lists/" + snname + ".list")
recon_filename = os.path.abspath(os.path.join(str(coco_root_path), "recon/", snname + ".dat"))
print(phot_path)
sn.load_phot(path = phot_path)
# sn.phot.plot()
sn.get_lcfit(recon_filename)
sn.load_list(path = speclist_path)
sn.load_spec()
# sn.load_mangledspec()
# sn.plot_spec()
# sn.plot_mangledspec()
# sn.plot_lc(multiplot = False, mark_spectra=True, savepng=True, outpath = "/Users/berto/projects/LSST/SN2007uy")
sn.plot_lc(multiplot = False, mark_spectra=True)
# +
# sn.plot_lc(multiplot = True, lock_axis=True)
# -
sn.load_mangledspec()
# sn.plot_spec()
# +
# sn.plot_mangledspec()
# +
# for i in zip(sn.spec, sn.mangledspec):
# print(i)
# pcc.functions.compare_spec(sn.spec[i[0]], sn.mangledspec[i[1]], normalise=True)
# +
# pcc.plot_mangle(sn.spec["2009jf_-7.64.txt"], sn.mangledspec["SN2009jf_55114.060000.spec"])
# +
from scipy.integrate import simps
def calc_spectrum_filter_flux(filter_name, SpecClass):
filter_object = pcc.functions.load_filter("/Users/berto/Code/CoCo/data/filters/" + filter_name + ".dat")
filter_object.resample_response(new_wavelength = SpecClass.wavelength)
filter_area = simps(filter_object.throughput, filter_object.wavelength)
transmitted_spec = filter_object.throughput * SpecClass.flux
integrated_flux = simps(transmitted_spec, SpecClass.wavelength)
return integrated_flux/filter_area
def calc_specphot(sn, filtername):
specphot = np.array([])
specepoch = np.array([])
for spec in sn.mangledspec:
specphot = np.append(specphot, calc_spectrum_filter_flux(filtername, sn.mangledspec[spec]))
specepoch = np.append(specepoch, sn.mangledspec[spec].mjd_obs)
return specepoch, specphot
def compare_phot_specphot(sn, filtername):
""""""
specepoch, specphot = calc_specphot(sn, filtername)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(specepoch, specphot, label = "specphot")
ax.scatter(sn.phot.data[filtername]["MJD"], sn.phot.data[filtername]["flux"], label = filtername)
ax.set_ylim(0, 1.05 * np.nanmax(np.append(sn.phot.data[filtername]["flux"], specphot)))
ax.legend()
# plt.show()
# -
# compare_phot_specphot(sn, "BessellB")
# compare_phot_specphot(sn, "BessellV")
# compare_phot_specphot(sn, "SDSS_r")
# compare_phot_specphot(sn, "SDSS_i")
# inputs:
#
# * **`snname`**
# * **`redshift`**
# * **`absmag offset`**
# * **`EBV MW`**
# * **`EBV Host`**
# * **`Rv`**
# * **`MJD at Peak`**
#
# * **`MJD to simulate`**
# * **`filters to simulate`**
sn.lcfit.get_fit_splines()
# Quick check that the fit spline fits the fit (in Bessell V).
# Note: spline sampled at MJDOBS so looks slightly linear.
# +
# plt.plot(sn.phot.data["BessellV"]["MJD"], sn.lcfit.spline["BessellV"](sn.phot.data["BessellV"]["MJD"]), label = r"$\textnormal{Spline}$")
# plt.scatter(sn.phot.data["BessellV"]["MJD"], sn.phot.data["BessellV"]["flux"], label = r"$\textnormal{Photometry}$")
# plt.plot(sn.lcfit.data["BessellV"]["MJD"], sn.lcfit.data["BessellV"]["flux"], label = r"$\textnormal{Fit}$")
# plt.legend()
# +
mjdmax = get_mjdmax_BessellV(sn)[0]
filters_to_sim = convert_column_string_encoding(sn.phot.phot["filter"]).data
mjd_to_sim = sn.phot.phot["MJD"].data
# -
verbose = False
# verbose = True
for i, f in enumerate(filters_to_sim):
filters_to_sim[i] = f.replace(b"SDSS", b"LSST").replace(b"BessellV", b"LSST_g")
# filters_to_sim[i] = pcc.utils.b(str(f).replace("BessellV", "LSST_g").replace("SDSS_r", "LSST_r"))
if verbose:
print(mjdmax)
print(mjd_to_sim)
print(filters_to_sim)
# +
# tablepath = "/Users/berto/Code/verbose-enigma/testdata/info/info.dat"
# info = Table.read(tablepath, format = "ascii.commented_header")
info = pcc.functions.load_info()
# +
z_obs = info.get_sn_info("SN2007uy")["z_obs"]
m = sfdmap.SFDMap()
print(z_obs)
# +
reload(pccsims)
coco = pccsims.pyCoCo(pcc.utils.b(filter_path), pcc.utils.b(coco_root_path))
# flux, flux_err = coco.simulate(b"SN2009jf",
# 0.008, 0.0, 0.0, 0.0, 3.1,
# mjdmax, mjd_to_sim,
# filters_to_sim)
flux, flux_err = coco.simulate(b"SN2007uy",
z_obs, 0.0, 0.0, 0.0, 3.1,
mjdmax, mjd_to_sim,
filters_to_sim)
# flux, flux_err = coco.simulate(b"SN2009jf",
# 0.008, 0.0, 0.1, 0.1, 3.1,
# mjdmax, mjd_to_sim,
# filters_to_sim)
# -
coco.get_fit_params()
# +
specphot = coco.spec_photometry(b"SN2007uy",
z_obs, b"LSST_g")
# plt.scatter(specphot[0], specphot[1])
# plt.ylim(0, 1.02 *np.nanmax(specphot[1]))
# +
p = pcc.classes.PhotometryClass()
p.load_table(pcc.utils.simulate_out_to_ap_table(mjd_to_sim, flux, flux_err, filters_to_sim))
# plt.scatter(p.data["BessellV"]["MJD"], p.data["BessellV"]["flux"], label = "Synthetic Bessell V")
plt.scatter(p.data["LSST_g"]["MJD"], p.data["LSST_g"]["flux"], label = "Synthetic LSST g")
plt.scatter(sn.phot.data["BessellV"]["MJD"], sn.phot.data["BessellV"]["flux"], label = "Real Bessell V")
plt.scatter(specphot[0] + mjdmax, specphot[1])
plt.ylim(0, 1.02 *np.nanmax(np.append(p.data["LSST_g"]["flux"], sn.phot.data["BessellB"]["flux"])))
plt.legend()
# +
# p.plot()
# +
# p.save(filename = "SN2007uy_sim_LSST.dat", path = "/Users/berto/projects/LSST/cadence/")
# +
sn_fake = pcc.classes.SNClass("SN2007uy_sim")
sn_fake.load_phot(path = "/Users/berto/projects/LSST/cadence/SN2007uy_sim_LSST.dat")
sn_fake.plot_lc(multiplot = False)
# +
from matplotlib.ticker import MultipleLocator
# filters = ["BessellV"]
filters = ["SDSS_r"]
alpha = 1.0
xminorticks = 10
pcc.utils.setup_plot_defaults()
fig = plt.figure(figsize=[8, 4])
fig.subplots_adjust(left = 0.1, bottom = 0.13, top = 0.93,
right = 0.91, hspace=0, wspace = 0)
## Label the axes
xaxis_label_string = r'$\textnormal{Time, MJD (days)}$'
yaxis_label_string = r'$\textnormal{Flux, erg s}^{-1}\textnormal{\AA}^{-1}\textnormal{cm}^{-2}$'
ax1 = fig.add_subplot(111)
axes_list = [ax1]
for filter_key in filters:
plot_label_string = r'$\rm{' + sn.phot.data_filters[filter_key].filter_name.replace('_', '\\_') + '}$'
plot_label_string_fake = r'$\rm{' + sn_fake.phot.data_filters[filter_key].filter_name.replace('_', '\\_') + ', simulated}$'
ax1.errorbar(sn.phot.data[filter_key]['MJD'], sn.phot.data[filter_key]['flux'],
yerr = sn.phot.data[filter_key]['flux_err'],
capsize = 0, fmt = 'x', color = sn.phot.data_filters[filter_key]._plot_colour,
label = plot_label_string, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha)
ax1.fill_between(sn.lcfit.data[filter_key]['MJD'], sn.lcfit.data[filter_key]['flux_upper'], sn.lcfit.data[filter_key]['flux_lower'],
color = pcc.hex["batman"],
alpha = 0.8, zorder = 0)
ax1.errorbar(sn_fake.phot.data[filter_key]['MJD'], sn_fake.phot.data[filter_key]['flux'],
yerr = sn_fake.phot.data[filter_key]['flux_err'],
# capsize = 0, fmt = 'o', color = sn_fake.phot.data_filters[filter_key]._plot_colour,
capsize = 0, fmt = 'o', color = pcc.hex['r'],
label = plot_label_string_fake, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha)
xminorLocator = MultipleLocator(xminorticks)
ax1.spines['top'].set_visible(True)
ax1.xaxis.set_minor_locator(xminorLocator)
plot_legend = ax1.legend(loc = 'upper right', scatterpoints = 1, markerfirst = False,
numpoints = 1, frameon = False, bbox_to_anchor=(1., 1.),
fontsize = 12.)
ax1.set_ylabel(yaxis_label_string)
ax1.set_xlabel(xaxis_label_string)
outpath = "/Users/berto/projects/LSST/cadence/SN2007uy_consistency_check_SDSS_r"
fig.savefig(outpath + ".png", format = 'png', dpi=500)
# +
from matplotlib.ticker import MultipleLocator
# filters = ["BessellV"]
filters = ["LSST_g"]
alpha = 1.0
xminorticks = 10
pcc.utils.setup_plot_defaults()
fig = plt.figure(figsize=[8, 4])
fig.subplots_adjust(left = 0.1, bottom = 0.13, top = 0.93,
right = 0.91, hspace=0, wspace = 0)
## Label the axes
xaxis_label_string = r'$\textnormal{Time, MJD (days)}$'
yaxis_label_string = r'$\textnormal{Flux, erg s}^{-1}\textnormal{\AA}^{-1}\textnormal{cm}^{-2}$'
ax1 = fig.add_subplot(111)
axes_list = [ax1]
for filter_key in filters:
plot_label_string = r'$\rm{' + sn.phot.data_filters["BessellV"].filter_name.replace('_', '\\_') + '}$'
plot_label_string_fake = r'$\rm{' + sn_fake.phot.data_filters[filter_key].filter_name.replace('_', '\\_') + ', simulated}$'
ax1.errorbar(sn.phot.data["BessellV"]['MJD'], sn.phot.data["BessellV"]['flux'],
yerr = sn.phot.data["BessellV"]['flux_err'],
capsize = 0, fmt = 'x', color = sn.phot.data_filters["BessellV"]._plot_colour,
label = plot_label_string, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha)
ax1.fill_between(sn.lcfit.data["BessellV"]['MJD'], sn.lcfit.data["BessellV"]['flux_upper'], sn.lcfit.data["BessellV"]['flux_lower'],
color = pcc.hex["batman"],
alpha = 0.8, zorder = 0)
ax1.errorbar(sn_fake.phot.data[filter_key]['MJD'], sn_fake.phot.data[filter_key]['flux'],
yerr = sn_fake.phot.data[filter_key]['flux_err'],
# capsize = 0, fmt = 'o', color = sn_fake.phot.data_filters[filter_key]._plot_colour,
capsize = 0, fmt = 'o', color = pcc.hex['LSST_g'],
label = plot_label_string_fake, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha)
xminorLocator = MultipleLocator(xminorticks)
ax1.spines['top'].set_visible(True)
ax1.xaxis.set_minor_locator(xminorLocator)
plot_legend = ax1.legend(loc = 'upper right', scatterpoints = 1, markerfirst = False,
numpoints = 1, frameon = False, bbox_to_anchor=(1., 1.),
fontsize = 12.)
ax1.set_ylabel(yaxis_label_string)
ax1.set_xlabel(xaxis_label_string)
print(ax1.get_xlim())
outpath = "/Users/berto/projects/LSST/cadence/SN2007uy_consistency_check_BessellV_LSSTg"
# fig.savefig(outpath + ".png", format = 'png', dpi=500)
# -
# +
cadencepath = "/Users/berto/projects/LSST/cadence/LSST_DDF_2786_cadence.dat"
data = Table.read(cadencepath, format = "ascii.commented_header")
w = np.logical_or(data["filter"] == "LSST_g", data["filter"] == "LSST_r")
mjd_to_sim = data[w]["MJD"].data
filters_to_sim = convert_column_string_encoding(data[w]["filter"]).data
# -
# mjd_to_sim
mjd_to_sim = mjd_to_sim - (mjd_to_sim[0] - 54450)
#
flux, flux_err = coco.simulate(b"SN2007uy",
z_obs, 0.0, 0.0, 0.0, 3.1,
mjdmax, mjd_to_sim,
filters_to_sim)
# +
p = pcc.classes.PhotometryClass()
p.load_table(pcc.utils.simulate_out_to_ap_table(mjd_to_sim, flux, flux_err, filters_to_sim))
# -
p.plot()
p.save(filename = "SN2007uy_sim_LSST_gr.dat", path = "/Users/berto/projects/LSST/cadence/")
# +
sn_fake = pcc.classes.SNClass("SN2007uy_sim")
sn_fake.load_phot(path = "/Users/berto/projects/LSST/cadence/SN2007uy_sim_LSST_gr.dat")
sn_fake.plot_lc(multiplot = False)
# +
from matplotlib.ticker import MultipleLocator
filters = ["BessellV", "SDSS_r"]
markers = ["x", "o"]
# filters = ["LSST_g"]
alpha = 1.0
xminorticks = 10
pcc.utils.setup_plot_defaults()
fig = plt.figure(figsize=[8, 4])
fig.subplots_adjust(left = 0.1, bottom = 0.13, top = 0.93,
right = 0.91, hspace=0, wspace = 0)
## Label the axes
xaxis_label_string = r'$\textnormal{Time, MJD (days)}$'
yaxis_label_string = r'$\textnormal{Flux, erg s}^{-1}\textnormal{\AA}^{-1}\textnormal{cm}^{-2}$'
ax1 = fig.add_subplot(111)
axes_list = [ax1]
for j, filter_key in enumerate(filters):
plot_label_string = r'$\rm{' + sn.phot.data_filters[filter_key].filter_name.replace('_', '\\_') + '}$'
ax1.errorbar(sn.phot.data[filter_key]['MJD'], sn.phot.data[filter_key]['flux'],
yerr = sn.phot.data[filter_key]['flux_err'],
capsize = 0, fmt = markers[j], color = "none",
label = plot_label_string, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha,)
ax1.fill_between(sn.lcfit.data[filter_key]['MJD'], sn.lcfit.data[filter_key]['flux_upper'], sn.lcfit.data[filter_key]['flux_lower'],
color = pcc.hex["batman"],
alpha = 0.8, zorder = 0)
fake_filters = ["LSST_g", "LSST_r"]
for j, filter_key in enumerate(fake_filters):
plot_label_string_fake = r'$\rm{' + sn_fake.phot.data_filters[filter_key].filter_name.replace('_', '\\_') + ', simulated}$'
ax1.errorbar(sn_fake.phot.data[filter_key]['MJD'], sn_fake.phot.data[filter_key]['flux'],
yerr = sn_fake.phot.data[filter_key]['flux_err'],
capsize = 0, fmt = 'o', color = sn_fake.phot.data_filters[filter_key]._plot_colour,
# capsize = 0, fmt = 'o', color = pcc.hex['LSST_g'],
label = plot_label_string_fake, ecolor = pcc.hex['batman'], mec = pcc.hex["batman"],
alpha = alpha)
xminorLocator = MultipleLocator(xminorticks)
ax1.spines['top'].set_visible(True)
ax1.xaxis.set_minor_locator(xminorLocator)
plot_legend = ax1.legend(loc = 'upper right', scatterpoints = 1, markerfirst = False,
numpoints = 1, frameon = False, bbox_to_anchor=(1., 1.),
fontsize = 12.)
ax1.set_ylabel(yaxis_label_string)
ax1.set_xlabel(xaxis_label_string)
ax1.set_xlim(ax1.get_xlim()[0], 54643.724999999999 )
outpath = "/Users/berto/projects/LSST/cadence/SN2007uy_cadence_check_LSSTr_LSSTg"
fig.savefig(outpath + ".png", format = 'png', dpi=500)
# +
# flux
# +
# pccsims.__file__
# -
# +
# p.plot(["Bessellv"], legend=True)
# -
sn.plot_lc(["BessellV"], multiplot = False)
plt.scatter(p.data["BessellV"]["MJD"], p.data["BessellV"]["flux"], label = "Synthetic Bessell V")
p.plot(["BessellB"])
sn.plot_lc(multiplot=False)
sn.load_mangledspec()
sn.plot_mangledspec()
sn.plot_spec()
# +
mjdmax = get_mjdmax_BessellV(sn)[0]
filters_to_sim = convert_column_string_encoding(sn.phot.data["BessellB"]["filter"]).data
mjd_to_sim = sn.phot.data["BessellB"]["MJD"].data
flux, flux_err = coco.simulate(b"SN2009jf",
z_obs, -0.0, 0.2, 0.3, 3.1,
mjdmax, mjd_to_sim,
filters_to_sim)
# +
plt.scatter(mjd_to_sim,sn.phot.data["BessellB"]["flux"])
plt.plot(sn.lcfit.data["BessellB"]["MJD"], sn.lcfit.data["BessellB"]["flux"])
plt.ylim(0, np.nanmax(sn.phot.data["BessellB"]["flux"])*1.1)
# -
# +
p = pcc.classes.PhotometryClass()
p.load_table(pcc.utils.simulate_out_to_ap_table(mjd_to_sim, flux, flux_err, filters_to_sim))
# -
p.plot()
# +
# s = pcc.SpectrumClass()
# s.load("SN2009jf_55106.120000.spec", directory="/Users/berto/Code/CoCo/spectra/")
# s.plot()
# +
# s = pcc.SpectrumClass()
# s.load("SN2009jf_55108.130000.spec", directory="/Users/berto/Code/CoCo/spectra/")
# s.plot()
# +
# s = pcc.SpectrumClass()
# s.load("SN2009jf_55114.060000.spec", directory="/Users/berto/Code/CoCo/spectra/")
# s.plot()
# -
# +
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
def load_coords(filename = "sncoordinates.list"):
"""
"""
path = os.path.abspath(os.path.join(pcc.__path__[0], os.path.pardir, filename))
coordtable = Table.read(path, format = 'ascii.commented_header')
return coordtable
# +
# # %timeit load_coords()
# -
cootable = load_coords()
# +
# %%timeit
snname = "SN2009jf"
w = np.where(cootable["snname"] == snname)
c = SkyCoord(cootable["RA"][w], cootable["Dec"][w], frame='icrs')
# -
c.ra.deg[0], c.dec.deg[0]
import sfdmap
m = sfdmap.SFDMap()
m.ebv(c.ra.deg[0], c.dec.deg[0], unit = 'degree')
| notebooks/CoCo_SN2007uy_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:adventofcode]
# language: python
# name: conda-env-adventofcode-py
# ---
# # Chellenge 5
#
# ## Challenge 5.1
myinput = 'uqwqemis'
# +
import hashlib
def md5hash(myinput):
hasher = hashlib.md5()
hasher.update(myinput.encode('utf-8'))
return hasher.hexdigest()
def is_valid(myhash):
lower_bound = int('0x' + '00000' + 'f'*27, 16)
if int(myhash, 16) > lower_bound:
return False
else:
return True
def password(myinput):
i = 0
j = 0
pswd = ''
while i < len(myinput):
myhash = md5hash(myinput + str(j))
if is_valid(myhash):
pswd += myhash[5]
i += 1
j += 1
return pswd
# -
password(myinput)
# ## Challenge 5.2
def complex_password(myinput):
i = 0
j = 0
pswd = [None] * len(myinput)
while i < len(myinput):
myhash = md5hash(myinput + str(j))
if is_valid(myhash):
pos = int(myhash[5], 16)
if (pos in range(len(myinput))) and (pswd[pos] is None):
pswd[pos] = myhash[6]
i += 1
j += 1
return ''.join(pswd)
complex_password(myinput)
| 2016/ferran/day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="x4HI2mpwlrcn"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="679Lmwt3l1Bk"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="DSPCom-KmApV"
# # 卷积神经网络(Convolutional Neural Network, CNN)
# + [markdown] id="klAltGp8ycek"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://tensorflow.google.cn/tutorials/images/cnn"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />在 tensorFlow.google.cn 上查看</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/images/cnn.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />在 Google Colab 中运行</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/images/cnn.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/images/cnn.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载 notebook</a>
# </td>
# </table>
# + [markdown] id="qLGkt5qiyz4E"
# Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的
# [官方英文文档](https://tensorflow.google.cn/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到
# [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n) GitHub 仓库。要志愿地撰写或者审核译文,请加入
# [<EMAIL> Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。
# + [markdown] id="m7KBpffWzlxH"
# ### 导入 TensorFlow
# + id="iAve6DCL4JH4"
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
# + [markdown] id="jRFxccghyMVo"
# ### 下载并准备 CIFAR10 数据集
#
#
# CIFAR10 数据集包含 10 类,共 60000 张彩色图片,每类图片有 6000 张。此数据集中 50000 个样例被作为训练集,剩余 10000 个样例作为测试集。类之间相互独立,不存在重叠的部分。
# + id="JWoEqyMuXFF4"
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# 将像素的值标准化至0到1的区间内。
train_images, test_images = train_images / 255.0, test_images / 255.0
# + [markdown] id="7wArwCTJJlUa"
# ### 验证数据
#
# 我们将测试集的前 25 张图片和类名打印出来,来确保数据集被正确加载。
#
# + id="K3PAELE2eSU9"
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# 由于 CIFAR 的标签是 array,
# 因此您需要额外的索引(index)。
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
# + [markdown] id="Oewp-wYg31t9"
# ### 构造卷积神经网络模型
# + [markdown] id="3hQvqXpNyN3x"
# 下方展示的 6 行代码声明了了一个常见卷积神经网络,由几个 [Conv2D](https://tensorflow.google.cn/api_docs/python/tf/keras/layers/Conv2D) 和 [MaxPooling2D](https://tensorflow.google.cn/api_docs/python/tf/keras/layers/MaxPool2D) 层组成。
#
# CNN 的输入是张量 (Tensor) 形式的 (image_height, image_width, color_channels),包含了图像高度、宽度及颜色信息。不需要输入 batch size。如果您不熟悉图像处理,颜色信息建议您使用 RGB 色彩模式,此模式下,`color_channels` 为 `(R,G,B)` 分别对应 RGB 的三个颜色通道(color channel)。在此示例中,我们的 CNN 输入,CIFAR 数据集中的图片,形状是 `(32, 32, 3)`。您可以在声明第一层时将形状赋值给参数 `input_shape` 。
#
# + id="L9YmGQBQPrdn"
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# + [markdown] id="lvDVFkg-2DPm"
# 我们声明的 CNN 结构是:
# + id="8-C4XBg4UTJy"
model.summary()
# + [markdown] id="_j-AXYeZ2GO5"
# 在上面的结构中,您可以看到每个 Conv2D 和 MaxPooling2D 层的输出都是一个三维的张量 (Tensor),其形状描述了 (height, width, channels)。越深的层中,宽度和高度都会收缩。每个 Conv2D 层输出的通道数量 (channels) 取决于声明层时的第一个参数(如:上面代码中的 32 或 64)。这样,由于宽度和高度的收缩,您便可以(从运算的角度)增加每个 Conv2D 层输出的通道数量 (channels)。
# + [markdown] id="_v8sVOtG37bT"
# ### 增加 Dense 层
# *Dense 层等同于全连接 (Full Connected) 层。*
# 在模型的最后,您将把卷积后的输出张量(本例中形状为 (4, 4, 64))传给一个或多个 Dense 层来完成分类。Dense 层的输入为向量(一维),但前面层的输出是3维的张量 (Tensor)。因此您需要将三维张量展开 (flatten) 到1维,之后再传入一个或多个 Dense 层。CIFAR 数据集有 10 个类,因此您最终的 Dense 层需要 10 个输出及一个 softmax 激活函数。
# + id="mRs95d6LUVEi"
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
# + [markdown] id="ipGiQMcR4Gtq"
# 查看完整的 CNN 结构:
# + id="8Yu_m-TZUWGX"
model.summary()
# + [markdown] id="xNKXi-Gy3RO-"
# 可以看出,在被传入两个 Dense 层之前,形状为 (4, 4, 64) 的输出被展平成了形状为 (1024) 的向量。
# + [markdown] id="P3odqfHP4M67"
# ### 编译并训练模型
# + id="MdDzI75PUXrG"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
# + [markdown] id="jKgyC5K_4O0d"
# ### 评估模型
# + id="gtyDF0MKUcM7"
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
# + id="0LvwaKhtUdOo"
print(test_acc)
# + [markdown] id="8cfJ8AR03gT5"
# 我们搭建的简单的 CNN 模型在测试集上可以达到 70% 的准确率。对于只有几行的代码来说效果不错!对于另一种 CNN 结构可参考另一个使用的基于 Keras 子类 API 和 `tf.GradientTape` 的样例 [here](https://tensorflow.google.cn/tutorials/quickstart/advanced)。
| Part III CNN (original)/01.cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:image-crop-analysis]
# language: python
# name: conda-env-image-crop-analysis-py
# ---
# ```
# Copyright 2021 Twitter, Inc.
# SPDX-License-Identifier: Apache-2.0
# ```
#
# # Gender Gaze Analysis
#
# * This notebook prepares a dataset for gender gaze analysis.
# * It selects `MAX_FOUND` number of images
# * The selected images' saliency maps are stored in the folder `./gender_gaze/annotations/{GENDER}` with the same name as the image.
# * Each image's salienct segment regions are saved in a file with a suffix `_regions`
# * Once the images are generated you can look at the saliency map images and assess if the most salient point is on the face or not as well as if any non face area is getting detected as a salient region using the `_regions` file.
# +
import logging
import shlex
import subprocess
import sys
from collections import namedtuple
from pathlib import Path
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
logging.basicConfig(level=logging.ERROR)
# +
import platform
BIN_MAPS = {"Darwin": "mac", "Linux": "linux"}
HOME_DIR = Path("../").expanduser()
try:
import google.colab
# ! pip install pandas scikit-learn scikit-image statsmodels requests dash
! [[ -d image-crop-analysis ]] || git clone https://github.com/twitter-research/image-crop-analysis.git
HOME_DIR = Path("./image-crop-analysis").expanduser()
IN_COLAB = True
except:
IN_COLAB = False
sys.path.append(str(HOME_DIR / "src"))
bin_dir = HOME_DIR / Path("./bin")
bin_path = bin_dir / BIN_MAPS[platform.system()] / "candidate_crops"
model_path = bin_dir / "fastgaze.vxm"
data_dir = HOME_DIR / Path("./data/")
data_dir.exists()
# -
df = pd.read_csv(data_dir / Path("dataset.tsv"), sep="\t")
df.head()
from crop_api import parse_output, ImageSaliencyModel, is_symmetric, reservoir_sampling
from image_manipulation import get_image_saliency_map, process_image
model = ImageSaliencyModel(crop_binary_path=bin_path, crop_model_path=model_path)
# %%time
MAX_FOUND = 100
for gender in df.sex_or_gender.unique():
annotation_dir = data_dir / Path(f"./gender_gaze/annotations/{gender}")
annotation_dir.mkdir(parents=True, exist_ok=True)
found = 0
for img_path in df[df.sex_or_gender == gender].sample(frac = 1, random_state=42).local_path:
if not img_path.lower().endswith((".jpg", ".jpeg")): continue
if found >= MAX_FOUND: break
img_path = data_dir / Path(f"./images/{img_path}")
if (annotation_dir / img_path.name).exists():
found += 1
continue
try:
img, image_label_overlay, regions, threshold = get_image_saliency_map(img_path, model)
except TypeError as e:
print(img_path, e)
continue
img_shape = img.shape
n_regions = len([r for r in regions if r.area > 1000])
print(img_path.name, img_shape[0] / img_shape[1], n_regions)
if n_regions < 2 or (img_shape[0] / img_shape[1]) < 1.25:
# Only select images if it has more than 2 big regions (of area > 1000) and image is significantly tall.
continue
found += 1
process_image(img_path, model)
img_path_parts = img_path.name.rsplit(".", 1)
plt.savefig(annotation_dir / f"{img_path_parts[0]}_regions.{img_path_parts[-1]}", bbox_inches="tight")
plt.close("all")
model.plot_img_crops(img_path, aspectRatios=[1], topK=1)
plt.savefig(annotation_dir / img_path.name, bbox_inches="tight")
plt.close("all")
| notebooks/Gender Gaze Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# %matplotlib inline
def sigmoid(x, x0, k):
"""
this function returns y values of logistic function
params: x values, x0 = threshold and k = slope
"""
y = 1 / (1 + np.exp(-k*(x-x0)))
return y
xdata = np.array([0.0, 1.0, 3.0, 4.3, 7.0, 8.0, 8.5, 10.0, 12.0])
ydata = np.array([0.01, 0.02, 0.04, 0.11, 0.43, 0.7, 0.89, 0.95, 0.99])
xdata_ = xdata + 2
popt, pcov = curve_fit(sigmoid, xdata, ydata)
x = np.linspace(-1, 15, 50)
y = sigmoid(x, *popt)
x_ = x + 2
fig = plt.figure(figsize = [11,5])
plt.subplot(121) #first plot
plt.plot(x,y)
plt.plot(x_,y,'-r')
plt.legend(loc='best')
plt.xlabel('lump size', size=15)
plt.ylabel('P malignant tumor', size=15)
plt.title('Logistic function: varying threshold', size=15)
plt.ylim(0, 1.05)
plt.xlim(0, 15)
plt.subplot(122) # second plot
y_ = sigmoid(x_, *popt-.5) # changing the covariance matrix
# which determines the slope
plt.plot(x,y)
plt.plot(x_,y_,'-g')
plt.legend(loc='best')
plt.xlabel('lump size', size=15)
plt.ylabel('P malignant tumor', size=15)
plt.title('Logistic function: varying slope', size=15)
plt.ylim(0, 1.05)
plt.xlim(0, 15)
plt.tight_layout()
plt.show()
| code/Logistic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8Xdmm79IChoP" colab_type="code" colab={}
import pandas as pd
import scipy.stats as stats
# + id="6o9H0nwZCkJk" colab_type="code" colab={}
df_collection = pd.read_csv('https://raw.githubusercontent.com/niravjdn/Software-Measurement-Project/master/data/jacoc-by-version/configuration/common-configuration-2.2.csv', error_bad_lines=False)
# + id="F4Zm04R7Dir8" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="U1nY7nhdFF4e" colab_type="code" colab={}
df_collection['Statement_Percentage'] = (df_collection['LINE_COVERED'] / (df_collection['LINE_COVERED'] + df_collection['LINE_MISSED'])) * 100
# + id="0sGHQ9qLFw1u" colab_type="code" colab={}
df_collection['Branch_Percentage'] = (df_collection['BRANCH_COVERED'] / (df_collection['BRANCH_COVERED'] + df_collection['BRANCH_MISSED'])) * 100
# + id="G1uiGZmiGBe8" colab_type="code" colab={}
df_collection['CC'] = df_collection['COMPLEXITY_COVERED'] + df_collection['COMPLEXITY_MISSED'];
# + id="cbxZwOnTDpfv" colab_type="code" outputId="970c3f60-7cbe-4d15-be5d-2047b54cc8f9" executionInfo={"status": "ok", "timestamp": 1554517197381, "user_tz": 240, "elapsed": 1139, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 400}
df_collection.head()
# + id="Ztm1rZvLEeyS" colab_type="code" outputId="5867bca9-fc98-45ba-c3ea-9ca07e84d8a2" executionInfo={"status": "ok", "timestamp": 1554517197761, "user_tz": 240, "elapsed": 1482, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 378}
df_collection.plot(x='CC', y='Statement_Percentage', style='o')
# + id="RyiTWuCqo9DT" colab_type="code" outputId="175a17a4-a22c-4a8b-a751-6779d53828de" executionInfo={"status": "ok", "timestamp": 1554517198015, "user_tz": 240, "elapsed": 1706, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 378}
df_collection.plot(x='CC', y='Branch_Percentage', style='o')
# + id="ufAGflaPGfZD" colab_type="code" outputId="25c9b0f2-f894-4294-c539-6a45f36eec78" executionInfo={"status": "ok", "timestamp": 1554517198477, "user_tz": 240, "elapsed": 2144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 347}
plt.scatter(df_collection['CC'], df_collection['Statement_Percentage'])
plt.show() # Depending on whether you use IPython or interactive mode, etc.
# + id="yXwX1zpHHhOU" colab_type="code" outputId="5da9f96c-43e6-4433-e4c6-d5aaf0aff664" executionInfo={"status": "ok", "timestamp": 1554517198482, "user_tz": 240, "elapsed": 2128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 477}
df_collection.corr(method ='spearman')
# + id="LlZdN8Q8Ig0l" colab_type="code" outputId="11dd7846-8313-41cd-9ce8-a9f53e1d270d" executionInfo={"status": "ok", "timestamp": 1554517198486, "user_tz": 240, "elapsed": 2110, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 112}
df_collection[['CC','Statement_Percentage']].corr(method ='spearman')
# + id="T86BizaEQ9Mb" colab_type="code" outputId="5eb05c44-726a-4641-e3f6-fa2f1d727cf8" executionInfo={"status": "ok", "timestamp": 1554517198488, "user_tz": 240, "elapsed": 2091, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df_clean = df_collection.dropna()
stats.spearmanr(df_clean['Statement_Percentage'], df_clean['CC'])
# + id="1DAV9QEjIxpy" colab_type="code" outputId="eda6f1ad-f420-4721-8401-8995f3e78b7c" executionInfo={"status": "ok", "timestamp": 1554517198492, "user_tz": 240, "elapsed": 2074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 112}
df_collection[['CC','Branch_Percentage']].corr(method ='spearman')
# + id="LdbShMPAQ-R9" colab_type="code" outputId="60911446-5d7d-4e3d-f581-8aca74de46c2" executionInfo={"status": "ok", "timestamp": 1554517198495, "user_tz": 240, "elapsed": 2051, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df_clean = df_collection.dropna()
stats.spearmanr(df_clean['Branch_Percentage'], df_clean['CC'])
# + id="3FBbpGRwlP5l" colab_type="code" outputId="e0ba11ba-cc9f-42b2-e9a6-265d0e04ce30" executionInfo={"status": "ok", "timestamp": 1554517198500, "user_tz": 240, "elapsed": 2036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
print('Total Statment Coverage '+str((df_collection.LINE_COVERED.sum()/(df_collection.LINE_MISSED.sum() + df_collection.LINE_COVERED.sum()))*100))
# + id="PWuIdk61pENk" colab_type="code" outputId="376a2939-c815-4f58-98cd-55f1a2840950" executionInfo={"status": "ok", "timestamp": 1554517198503, "user_tz": 240, "elapsed": 2017, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-MXavU57lU4k/AAAAAAAAAAI/AAAAAAAAMFw/tzJ-F4ETDCM/s64/photo.jpg", "userId": "16537180351831762327"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
print('Total Branch Coverage '+str((df_collection.BRANCH_COVERED.sum()/(df_collection.BRANCH_MISSED.sum() + df_collection.BRANCH_COVERED.sum()))*100))
# + id="MVF4iS4X6VoF" colab_type="code" colab={}
| Jupyter Notebook/Jupyter Notebok/Final Metrics Correlation/Metric 1,2 to 3,4/Jacoco/Configuration/Configuration 2.2.ipynb |
# ---
# title: "Countplot "
# author: "Aavinash"
# date: 2020-09-04
# description: "-"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kagglevil_
# language: python
# name: kagglevil_
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
data = pd.read_csv('Whatsapp_chat.csv', index_col=0)
data
data.groupby('names').count()
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((3,3))
sns.countplot(x='names', data=data).yaxis.set_major_formatter(formatter)
| docs/python/seaborn/Countplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div dir='rtl'>
#
# # الوصف
#
# </div>
#
# <div dir='rtl'>
# يُستخدم هذا الدفتر لطلب حساب متوسط السلاسل الزمنية لطبقة بيانات WaPOR لمنطقة باستخدام WaPOR API. ستحتاج إلى WaPOR API Token لاستخدام هذا الكمبيوتر المحمول
# </div>
# <div dir='rtl'>
#
# # الخطوة 1: اقرأ APIToken
#
# </div>
#
# <div dir='rtl'>
# احصل على APItoken من https://wapor.apps.fao.org/profile. أدخل رمز API الخاص بك عند تشغيل الخلية أدناه
# </div>
# +
import requests
import pandas as pd
path_query=r'https://io.apps.fao.org/gismgr/api/v1/query/'
path_sign_in=r'https://io.apps.fao.org/gismgr/api/v1/iam/sign-in/'
APIToken=input('رمز API الخاص بك: أدخل رمز API الخاص بك ')
# -
# <div dir='rtl'>
#
# # الخطوة 2: الحصول على إذن AccessToken
#
# </div>
#
# <div dir='rtl'>
# باستخدام رمز إدخال API للحصول على AccessToken للحصول على إذن
# </div>
resp_signin=requests.post(path_sign_in,headers={'X-GISMGR-API-KEY':APIToken})
resp_signin = resp_signin.json()
AccessToken=resp_signin['response']['accessToken']
AccessToken
# <div dir='rtl'>
#
# # الخطوه 3: اكتب حمولة الاستعلام لمزيد من
#
# </div>
#
# <div dir='rtl'>
# الأمثلة على زيارة تحميل استعلام السلاسل الزمنية للمنطقة
# https://io.apps.fao.org/gismgr/api/v1/swagger-ui/examples/AreaStatsTimeSeries.txt
# </div>
# +
crs="EPSG:4326" # تنسيق النظام المرجعي
cube_code="L1_PCP_E"
workspace='WAPOR_2'
start_date="2009-01-01"
end_date="2019-01-01"
# الحصول على قياس مكعب البيانات
cube_url=f'https://io.apps.fao.org/gismgr/api/v1/catalog/workspaces/{workspace}/cubes/{cube_code}/measures'
resp=requests.get(cube_url).json()
measure=resp['response']['items'][0]['code']
print('MEASURE: ',measure)
# احصل على البعد الزمني لمكعب البيانات
cube_url=f'https://io.apps.fao.org/gismgr/api/v1/catalog/workspaces/{workspace}/cubes/{cube_code}/dimensions'
resp=requests.get(cube_url).json()
items=pd.DataFrame.from_dict(resp['response']['items'])
dimension=items[items.type=='TIME']['code'].values[0]
print('DIMENSION: ',dimension)
# -
# <div dir='rtl'>
#
# ## تحديد المنطقة من خلال مدى التنسيق
#
# </div>
# +
bbox= [37.95883206252312, 7.89534, 43.32093, 12.3873979377346] #latlon
xmin,ymin,xmax,ymax=bbox[0],bbox[1],bbox[2],bbox[3]
Polygon=[
[xmin,ymin],
[xmin,ymax],
[xmax,ymax],
[xmax,ymin],
[xmin,ymin]
]
query_areatimeseries={
"type": "AreaStatsTimeSeries",
"params": {
"cube": {
"code": cube_code, #كود_المكعب
"workspaceCode": workspace, # رمز مساحة العمل: استخدم WAPOR للإصدار 1.0 و WAPOR_2 للإصدار 2.1
"language": "en"
},
"dimensions": [
{
"code": dimension, # استخدام DAY DEKAD MONTH أو YEAR
"range": f"[{start_date},{end_date})" # تاريخ البدء وتاريخ الانتهاء
}
],
"measures": [
measure
],
"shape": {
"type": "Polygon",
"properties": {
"name": crs # تنسيق النظام المرجعي
},
"coordinates": [
Polygon
]
}
}
}
query_areatimeseries
# -
# <div dir='rtl'>
#
# ## أو حدد المنطقة بقراءة GeoJSON
#
# </div>
# +
import ogr
shp_fh=r".\data\Awash_shapefile.shp"
shpfile=ogr.Open(shp_fh)
layer=shpfile.GetLayer()
epsg_code=layer.GetSpatialRef().GetAuthorityCode(None)
shape=layer.GetFeature(0).ExportToJson(as_object=True)['geometry'] # الحصول على هندسة sha pefile في سلسلة JSON
shape["properties"]={"name": "EPSG:{0}".format(epsg_code)} # الإسقاط latlon
query_areatimeseries={
"type": "AreaStatsTimeSeries",
"params": {
"cube": {
"code": cube_code,
"workspaceCode": workspace,
"language": "en"
},
"dimensions": [
{
"code": dimension,
"range": f"[{start_date},{end_date})"
}
],
"measures": [
measure
],
"shape": shape
}
}
query_areatimeseries
# -
# <div dir='rtl'>
#
# # الخطوة 4: انشر حمولة الاستعلام مع رمز الوصول في رأس
#
# </div>
#
# <div dir='rtl'>
# الاستجابات ، احصل على عنوان url للاستعلام عن الوظيفة
# </div>
# +
resp_query=requests.post(path_query,headers={'Authorization':'Bearer {0}'.format(AccessToken)},
json=query_areatimeseries)
resp_query = resp_query.json()
job_url=resp_query['response']['links'][0]['href']
job_url
# -
# <div dir='rtl'>
#
# # الخطوة 5: احصل على نتائج الوظيفة.
#
# </div>
#
# <div dir='rtl'>
# سوف يستغرق الأمر بعض الوقت حتى تنتهي المهمة. عند انتهاء المهمة، ستتغير حالتها من "قيد التشغيل" إلى "مكتمل" أو "مكتمل بأخطاء". إذا كانت مكتملة، فيمكن تحقيق نتائج السلاسل الزمنية للمنطقة من "إخراج" الاستجابة.
# </div>
i=0
print('RUNNING',end=" ")
while i==0:
resp = requests.get(job_url)
resp=resp.json()
if resp['response']['status']=='RUNNING':
print('.',end =" ")
if resp['response']['status']=='COMPLETED':
results=resp['response']['output']
df=pd.DataFrame(results['items'],columns=results['header'])
i=1
if resp['response']['status']=='COMPLETED WITH ERRORS':
print(resp['response']['log'])
i=1
df
df.index=pd.to_datetime(df.day,format='%Y-%m-%d')
df.plot()
| notebooks_AR/Module1_unit5/4_AreaStatsTimeSeries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Prototype finished.
#
# # Topic Prototyping
#
# Prototype code for Topic Modeling of Posts. Objective is to assign topics of the most promising model to the Posts.
# +
import os, sys
sys.path.append("..")
from config import credentials
import dropbox
import numpy as np
import pandas as pd
from joblib import dump, load
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# -
random_state = 23
# ## Loading
data_path = "/Data/CSVData"
model_path = "../models"
# +
team_dbx = dropbox.DropboxTeam(credentials.dropbox_team_access_token)
team_root = team_dbx.with_path_root(dropbox.common.PathRoot.namespace_id(
credentials.dropbox_team_namespace_id))
user_dbx = team_root.as_user(credentials.dropbox_team_member_id)
posts_fpath = os.path.join(data_path, "PolPosts.csv")
_, res = user_dbx.files_download(posts_fpath)
raw_posts = pd.read_csv(res.raw)
print("Posts", raw_posts.shape)
# -
# ## Preprocessing
# Dataset cleansing
# +
# Remove unnecessary cols and rename them uniformly
posts_cols = raw_posts.columns.to_list()
posts_cols.remove("text")
posts_cols.remove("textID") # keep these cols
raw_posts.drop(posts_cols, axis=1, inplace=True)
raw_posts.columns = ["text", "textID"]
corpus = raw_posts.copy()
# Remove empty texts
corpus.text.replace("", np.nan, inplace=True)
corpus.dropna(subset=["text"], inplace=True)
corpus = corpus[~corpus.text.str.isspace()]
# Remove duplicated texts
corpus.drop_duplicates(subset=["text"], keep="first", inplace=True)
corpus.shape
# -
# ## Topic Assignment
#
# TFIDF + LDA
data = corpus.copy()
# ### Vectorizer
tfidf = load(os.path.join(model_path, "topic_vectorizer", "tfidf.joblib"))
tfidf_v = load(os.path.join(model_path, "topic_vectorizer", "tfidf_v.joblib"))
len(tfidf.vocabulary_)
# ### Model
# +
model = load(os.path.join(model_path, "topic_lda", "lda_40.joblib"))
# Transform
doc_topic_distr = model.transform(tfidf_v)
# -
# ### Assign Topics
data["topic_distribution"] = doc_topic_distr.tolist()
data["topic"] = doc_topic_distr.argmax(axis=1)
# ### Persist Table
persist_fpath = os.path.join(data_path, "topics", "topics_n40.csv")
user_dbx.files_upload(bytes(data.to_csv(index=False), "utf-8"),
persist_fpath, mode=dropbox.files.WriteMode.overwrite)
# # Conclusion
#
# Topics of the trained model has been successfully assigned to Posts. Prototype can be scriptified.
| notebooks/1.2-tte-topic-prototyping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from selenium import webdriver
# !dir .\chromedriver\chromedriver.exe
browser = webdriver.Chrome('./chromedriver/chromedriver.exe')
browser.get('https://www.genie.co.kr/chart/top200')
html = browser.page_source
from bs4 import BeautifulSoup as bs
soup = bs( html ,'html.parser')
tags = soup.select('td.info')
len(tags)
# 곡명 : a.title. 가수이름 : a.artist
title = tag.select('a.title')
len(title) , type(title) # 리스트
title[0].text.strip()
title2 = tag.select('a.title')[0].text.strip()
title2
artist = tag.select('a.artist')
len(artist) , type(artist) # 리스트
artist2 = tag.select('a.artist')[0].text.strip()
artist2
contents = list()
for tag in tags:
title = tag.select('a.title')[0].text.strip()
artist = tag.select('a.artist')[0].text.strip()
print('곡명 :',title)
print('가수 :', artist)
print('----------------------------------------------')
song = [ title , artist ]
contents.append(song)
contents
import pandas as pd
df = pd.DataFrame(contents,columns=['Title','Artist'])
df
df.to_excel('./saves/genie_scraping.xls',index=False)
| genie_scarping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Research Topic : Predict Solar Power Generation
# # Author : <NAME>
# # Institute : Sylhet Engineering College
# ## Import Necessary Library
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os
import re
import pickle
# ## Import Dataset
plant1_GenData = pd.read_csv('Plant_1_Generation_Data.csv')
plant2_GenData = pd.read_csv('Plant_2_Generation_Data.csv')
plant1_WeatherSensorData = pd.read_csv('Plant_1_Weather_Sensor_Data.csv')
plant2_WeatherSensorData = pd.read_csv('Plant_2_Weather_Sensor_Data.csv')
plant1_GenData.head()
plant1_WeatherSensorData.head()
# ## Explotrary Data Analysis - EDA
import matplotlib.pyplot as plt
plant1_GenData.groupby('DATE_TIME')['DATE_TIME'].count()
plant1_WeatherSensorData.groupby('DATE_TIME')['DATE_TIME'].count()
dates = []
for date in plant1_GenData['DATE_TIME']:
new_date = "2020-"
new_date += date[3:6]
new_date += date[0:2]
new_date += date[10:]
new_date += ":00"
dates.append(new_date)
plant1_GenData['DATE_TIME'] = dates
plant1_GenData.groupby('DATE_TIME')['DATE_TIME'].count()
# ## Finding out which data points are missing from plant 1's Generator Data
missing = [date for date in list(plant1_WeatherSensorData['DATE_TIME']) if date not in list(plant1_GenData['DATE_TIME'])]
print(missing)
columns = ['DC_POWER', 'AC_POWER', 'DAILY_YIELD', 'TOTAL_YIELD']
means = []
for column in columns:
means.append(plant1_GenData[column].mean())
print(means)
for date in missing:
plant1_GenData = plant1_GenData.append({'DATE_TIME':date, 'PLANT_ID':'4135001', 'SOURCE_KEY':'<KEY>', 'DC_POWER':means[0], 'AC_POWER':means[1], 'DAILY_YIELD':means[2], 'TOTAL_YIELD':means[3]},ignore_index=True)
print([date for date in list(plant1_GenData['DATE_TIME']) if date not in list(plant1_WeatherSensorData['DATE_TIME'])])
columnsWSD = ['AMBIENT_TEMPERATURE', 'MODULE_TEMPERATURE', 'IRRADIATION']
meansWSD = []
for column in columnsWSD:
meansWSD.append(plant1_WeatherSensorData[column].mean())
print(meansWSD)
plant1_WeatherSensorData = plant1_WeatherSensorData.append({'DATE_TIME':'2020-06-03 14:00:00', 'PLANT_ID':'4135001', 'SOURCE_KEY':'<KEY>', columnsWSD[0]:meansWSD[0], columnsWSD[1]:meansWSD[1], columnsWSD[2]:meansWSD[2]},ignore_index=True)
plant1 = plant1_WeatherSensorData.copy()
combining = {'DC_POWER':[], 'AC_POWER':[], 'DAILY_YIELD':[], 'TOTAL_YIELD':[]}
# +
dcPower = {}
acPower = {}
dailyYield = {}
totalYield = {}
for i in range(len(plant1_GenData['DATE_TIME'])):
entry = plant1_GenData.iloc[i]
date = entry['DATE_TIME']
if date in dcPower:
dcPower[date]['total'] += entry['DC_POWER']
dcPower[date]['num'] += 1
acPower[date]['total'] += entry['AC_POWER']
acPower[date]['num'] += 1
dailyYield[date]['total'] += entry['DAILY_YIELD']
dailyYield[date]['num'] += 1
totalYield[date]['total'] += entry['TOTAL_YIELD']
totalYield[date]['num'] += 1
else:
dcPower[date] = {'total':entry['DC_POWER'], 'num':1}
acPower[date] = {'total':entry['AC_POWER'], 'num':1}
dailyYield[date] = {'total':entry['DAILY_YIELD'], 'num':1}
totalYield[date] = {'total':entry['TOTAL_YIELD'], 'num':1}
# -
for key in dcPower.keys():
dcPower[key]['mean'] = dcPower[key]['total']/dcPower[key]['num']
for key in acPower.keys():
acPower[key]['mean'] = acPower[key]['total']/acPower[key]['num']
for key in dailyYield.keys():
dailyYield[key]['mean'] = dailyYield[key]['total']/dailyYield[key]['num']
for key in totalYield.keys():
totalYield[key]['mean'] = totalYield[key]['total']/totalYield[key]['num']
for i in range(len(plant1['DATE_TIME'])):
entry = plant1.iloc[i]
date = entry['DATE_TIME']
combining['DC_POWER'].append(dcPower[date]['mean'])
combining['AC_POWER'].append(acPower[date]['mean'])
combining['DAILY_YIELD'].append(dailyYield[date]['mean'])
combining['TOTAL_YIELD'].append(totalYield[date]['mean'])
for key in combining.keys():
plant1[key] = combining[key]
plant1.head()
# ## Data Point Analysis Using Visualization
plant1.plot(kind='scatter',x='AMBIENT_TEMPERATURE',y='MODULE_TEMPERATURE',color='red')
plant1.plot(kind='scatter',x='MODULE_TEMPERATURE',y='DC_POWER',color='red')
plant1.plot(kind='scatter',x='MODULE_TEMPERATURE',y='AC_POWER',color='red')
plant1.plot(kind='scatter',x='DC_POWER',y='IRRADIATION',color='red')
plant1.plot(kind='scatter',x='AC_POWER',y='IRRADIATION',color='red')
# +
### Let us now process Plant 2's data in a similar fashion
# -
plant2 = plant2_WeatherSensorData.copy()
# +
dcPower2 = {}
acPower2 = {}
dailyYield2 = {}
totalYield2 = {}
for i in range(len(plant2_GenData['DATE_TIME'])):
entry = plant2_GenData.iloc[i]
date = entry['DATE_TIME']
if date in dcPower2:
dcPower2[date]['total'] += entry['DC_POWER']
dcPower2[date]['num'] += 1
acPower2[date]['total'] += entry['AC_POWER']
acPower2[date]['num'] += 1
dailyYield2[date]['total'] += entry['DAILY_YIELD']
dailyYield2[date]['num'] += 1
totalYield2[date]['total'] += entry['TOTAL_YIELD']
totalYield2[date]['num'] += 1
else:
dcPower2[date] = {'total':entry['DC_POWER'], 'num':1}
acPower2[date] = {'total':entry['AC_POWER'], 'num':1}
dailyYield2[date] = {'total':entry['DAILY_YIELD'], 'num':1}
totalYield2[date] = {'total':entry['TOTAL_YIELD'], 'num':1}
# -
for key in dcPower2.keys():
dcPower2[key]['mean'] = dcPower2[key]['total']/dcPower2[key]['num']
for key in acPower2.keys():
acPower2[key]['mean'] = acPower2[key]['total']/acPower2[key]['num']
for key in dailyYield2.keys():
dailyYield2[key]['mean'] = dailyYield2[key]['total']/dailyYield2[key]['num']
for key in totalYield2.keys():
totalYield2[key]['mean'] = totalYield2[key]['total']/totalYield2[key]['num']
combining2 = {'DC_POWER':[], 'AC_POWER':[], 'DAILY_YIELD':[], 'TOTAL_YIELD':[]}
for i in range(len(plant2['DATE_TIME'])):
entry = plant2.iloc[i]
date = entry['DATE_TIME']
combining2['DC_POWER'].append(dcPower2[date]['mean'])
combining2['AC_POWER'].append(acPower2[date]['mean'])
combining2['DAILY_YIELD'].append(dailyYield2[date]['mean'])
combining2['TOTAL_YIELD'].append(totalYield2[date]['mean'])
for key in combining2.keys():
plant2[key] = combining2[key]
plant2.head()
plant2.plot(kind='scatter',x='AMBIENT_TEMPERATURE',y='MODULE_TEMPERATURE',color='red')
plant2.plot(kind='scatter',x='MODULE_TEMPERATURE',y='DC_POWER',color='red')
plant2.plot(kind='scatter',x='MODULE_TEMPERATURE',y='AC_POWER',color='red')
plant2.plot(kind='scatter',x='DC_POWER',y='IRRADIATION',color='red')
plant2.plot(kind='scatter',x='AC_POWER',y='IRRADIATION',color='red')
# +
### Plant 2 has a weaker correlation between ambient temperature and module temperature ; module temperature and dc power ;
# +
### module temperature and ac power ; dc power and irradiation ; ac power and irradiation.
# +
### The conclusions that we can derive from these plots are that the Ambient temperature raises the Module temperature
### which in turn affects the DC and AC power generated by the solar power generator, the increase in DC and AC power
### being generated means an increase in Irradiation
# +
### Now that we have formed our hypothesis, we can create a linear regression model and train it using our Plant2 Data
### in order to predict DC and AC power generation, and irradiation levels which identify the need for panel
### cleaning/maintenance. If the DC or AC power generation does not fit our trained model then we can identify
### faulty or suboptimally performing equipment.
# +
### First we will train a model on the ambient temperature in order to predict dc power.
### We will run two iterations: 1) We use plant 1 as training data and plant 2 as test 2) vice versa.
### My hypothesis is that we will obtain better results by using plant 2 as training data since there are
### more abnormalities in plant 2's data than plant 1 hence the model will have much less variance with
### the tradeoff of slight higher bias
# -
X_train, y_train = plant1[['AMBIENT_TEMPERATURE']], plant1['DC_POWER']
# ## Let's start off with a basic linear regression model
from sklearn.linear_model import LinearRegression
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
X_test, y_test = plant2[['AMBIENT_TEMPERATURE']], plant2['DC_POWER']
y_pred = linear_model.predict(X_test)
y_pred
# +
### Now that we have our predictions from our basic linear regression model let's use the Mean Absolute Error
# +
### to see how well our model did
# -
# ## Error Analysis
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, y_pred)
# +
### Let's compare with a DecisionTreeRegressor and XGBoostRegressor
# -
# ## Let's Decision Tree Regressor
from sklearn.tree import DecisionTreeRegressor
decisiontree_model = DecisionTreeRegressor()
decisiontree_model.fit(X_train, y_train)
y_pred = decisiontree_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
# ## Let's XGBoost Regressor
from xgboost import XGBRegressor
xgboost_model = XGBRegressor()
xgboost_model.fit(X_train, y_train)
y_pred = xgboost_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
# +
### The DecisionTreeRegressor and XGBoostRegressor are both close. Now let us swap the training and test data
### and compare results.
# -
X_train, y_train = plant2[['AMBIENT_TEMPERATURE']], plant2['DC_POWER']
X_test, y_test = plant1[['AMBIENT_TEMPERATURE']], plant1['DC_POWER']
xgboost_model = XGBRegressor()
xgboost_model.fit(X_train, y_train)
y_pred = xgboost_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
decisiontree_model = DecisionTreeRegressor()
decisiontree_model.fit(X_train, y_train)
# ## Predict Model
y_pred = decisiontree_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
# +
### Notice the drastic performance increase on both models when we opt to using the more variable data of plant 2 compared
### to that of plant 1.
# +
### So now we can successfully predict the dc power based on ambient temperature for plant 1 or 2. We will not predict
### the ac power for brevity, note that it is the exact same process but replacing 'DC_POWER' with 'AC_POWER'
# +
### Now we can predict irradiation levels which identifies the need for panel cleaning/maintenance.
# -
X_train, y_train = plant2[['AMBIENT_TEMPERATURE']], plant2['IRRADIATION']
X_test, y_test = plant1[['AMBIENT_TEMPERATURE']], plant1['IRRADIATION']
decisiontree_model = DecisionTreeRegressor()
decisiontree_model.fit(X_train, y_train)
y_pred = decisiontree_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
xgboost_model = XGBRegressor()
xgboost_model.fit(X_train, y_train)
# ## Predict Model
y_pred = xgboost_model.predict(X_test)
mean_absolute_error(y_test, y_pred)
# ## Generate Pickle For Deploy
import pickle
pickle.dump(decisiontree_model,open('model_solar_generation.pkl','wb'))
model = pickle.load(open('model_solar_radiation.pkl','rb'))
# +
### In this case, the XGBoostRegressor performs better than the DecisionTreeRegressor. We are successfully able
### to identify the need for panel cleaning/maintenance by predicting the irradiation based on ambient temperature.
| Solar Power Generation Predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Identificación de créditos riesgosos usando SVM
# ===
# Las entidades financieras desean mejorar sus procedimientos de aprobación de créditos con el fin de disminuir los riesgos de no pago de la deuda, lo que acarrea pérdidas a la entidad. El problema real consiste en poder decidir si se aprueba o no un crédito particular con base en información que puede ser fácilmente recolectada por teléfono o en la web. Se tiene una muestra de 1000 observaciones. Cada registro contiene 20 atributos que recopilan información tanto sobre el crédito como sobre la salud financiera del solicitante. Construya un sistema de recomendación que use máquinas de vectores de soporte.
#
# El archivo de datos se encuentra disponible en el siguiente link:
#
# https://raw.githubusercontent.com/jdvelasq/datalabs/master/datasets/credit.csv
#
#
#
# Los atributos y sus valores son los siguientes:
#
# Attribute 1: (qualitative)
# Status of existing checking account
# A11 : ... < 0 DM
# A12 : 0 <= ... < 200 DM
# A13 : ... >= 200 DM /
# salary assignments for at least 1 year
# A14 : no checking account
#
# Attribute 2: (numerical)
# Duration in month
#
# Attribute 3: (qualitative)
# Credit history
# A30 : no credits taken/
# all credits paid back duly
# A31 : all credits at this bank paid back duly
# A32 : existing credits paid back duly till now
# A33 : delay in paying off in the past
# A34 : critical account/
# other credits existing (not at this bank)
#
# Attribute 4: (qualitative)
# Purpose
# A40 : car (new)
# A41 : car (used)
# A42 : furniture/equipment
# A43 : radio/television
# A44 : domestic appliances
# A45 : repairs
# A46 : education
# A47 : (vacation - does not exist?)
# A48 : retraining
# A49 : business
# A410 : others
#
# Attribute 5: (numerical)
# Credit amount
#
# Attribute 6: (qualitative)
# Savings account/bonds
# A61 : ... < 100 DM
# A62 : 100 <= ... < 500 DM
# A63 : 500 <= ... < 1000 DM
# A64 : .. >= 1000 DM
# A65 : unknown/ no savings account
#
# Attribute 7: (qualitative)
# Present employment since
# A71 : unemployed
# A72 : ... < 1 year
# A73 : 1 <= ... < 4 years
# A74 : 4 <= ... < 7 years
# A75 : .. >= 7 years
#
# Attribute 8: (numerical)
# Installment rate in percentage of disposable income
#
# Attribute 9: (qualitative)
# Personal status and sex
# A91 : male : divorced/separated
# A92 : female : divorced/separated/married
# A93 : male : single
# A94 : male : married/widowed
# A95 : female : single
#
# Attribute 10: (qualitative)
# Other debtors / guarantors
# A101 : none
# A102 : co-applicant
# A103 : guarantor
#
# Attribute 11: (numerical)
# Present residence since
#
# Attribute 12: (qualitative)
# Property
# A121 : real estate
# A122 : if not A121 : building society savings agreement/
# life insurance
# A123 : if not A121/A122 : car or other, not in attribute 6
# A124 : unknown / no property
#
# Attribute 13: (numerical)
# Age in years
#
# Attribute 14: (qualitative)
# Other installment plans
# A141 : bank
# A142 : stores
# A143 : none
#
# Attribute 15: (qualitative)
# Housing
# A151 : rent
# A152 : own
# A153 : for free
#
# Attribute 16: (numerical)
# Number of existing credits at this bank
#
# Attribute 17: (qualitative)
# Job
# A171 : unemployed/ unskilled - non-resident
# A172 : unskilled - resident
# A173 : skilled employee / official
# A174 : management/ self-employed/
# highly qualified employee/ officer
#
# Attribute 18: (numerical)
# Number of people being liable to provide maintenance for
#
# Attribute 19: (qualitative)
# Telephone
# A191 : none
# A192 : yes, registered under the customers name
#
# Attribute 20: (qualitative)
# foreign worker
# A201 : yes
# A202 : no
#
# +
#
# Use el transformador LabelEncoder para preprocesar
# las columnas alfanuméricas del dataframe.
#
# Use los primeros 900 datos para entrenamiento del
# modelo y los 100 restantes para validación.
#
# Construya el SVM usando los valores por defecto de
# los parámetros.
#
# Compute la matriz de confusión para la muestra de
# validación.
#
# Rta/
# True
# True
# True
# True
#
# >>> Inserte su codigo aquí >>>
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC
data = pd.read_csv("https://raw.githubusercontent.com/jdvelasq/datalabs/master/datasets/german.csv")
y = data['default']
x = data.drop('default', axis=1)
#Encoding
encoder = LabelEncoder()
alphanum = []
for i in x.columns:
if x[i].dtype == 'object':
alphanum.append(i)
for column in alphanum:
x[column] = encoder.fit_transform(x[column])
#separación de datos
yTrain = y[0:900]
ytest = y[900:]
xTrain = x[0:900]
xtest = x[900:]
#Modelación
model = SVC()
model.fit(xTrain, yTrain)
y_pred = model.predict(xtest)
#matriz de confución
cm = confusion_matrix(ytest, y_pred)
cm
# ---->>> Evaluación ---->>>
# cm es la matriz de confusion
print(cm[0][0] == 67)
print(cm[0][1] == 1)
print(cm[1][0] == 30)
print(cm[1][1] == 2)
# +
#
# Encuentre la mejor combinación de kernel y parámetros
# de regularización para los valores suministrados
# durante el entrenamiento y compute la matriz de
# confusión para la muestra de prueba.
#
# Rta/
# True
# True
# True
# True
#
kernels = ['rbf', 'linear', 'poly', 'sigmoid']
Cs = [1, 2, 3, 4, 5]
# >>> Inserte su codigo aquí >>>
# ---->>> Evaluación ---->>>
# cm es la matriz de confusion
print(cm[0][0] == 68)
print(cm[0][1] == 0)
print(cm[1][0] == 30)
print(cm[1][1] == 2)
cm
| notebooks/analitica_predictiva/06-002_identificacion_de_creditos_riesgosos_usando_svm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # sample_project/src/run_model.py
# +
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from urllib.parse import urlparse
import mlflow
# +
mlflow.set_tracking_uri("http://127.0.0.1:5000")
mlflow.set_experiment("deployment_exp")
with mlflow.start_run() as run:
X, y = datasets.load_digits(return_X_y=True)
model1 = DecisionTreeClassifier()
model2 = SVC()
model2.fit(X, y)
score = model2.score(X, y)
mlflow.log_metric("acc", score)
tracking_url_type_store = urlparse(
mlflow.get_artifact_uri()
).scheme
if tracking_url_type_store!="file":
mlflow.sklearn.log_model(
model2, "model",
registered_model_name="DeploymentModel"
)
else:
mlflow.sklearn.log_model(model2, "model")
# -
# # sample_project/tests/test_sample.py
def test_sample():
return True
| test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implémentation de la pile en Python
# On peut voir dans la documentation officielle de python qu'il est facile d'implémenter une structure de pile avec les listes :
#
# https://docs.python.org/fr/3/tutorial/datastructures.html#using-lists-as-stacks
# ## Implémentation directe
# Exécutez le code suivant et modifiez le pour bien comprendre les mécanismes de pile :
pile=[1,2,3]
pile.append(4)
print("après ajout de 4",pile)
pile.pop()
pile.pop()
print("après suppression de deux éléments",pile)
pile.pop()
pile.pop()
print(pile)
# Montrer (sur votre copie ou ici) que cette suite peut-être définie explicitement par:
# $u_n=36000 \times 0,95^n+4000$
#
# (on pourra montrer que $v_n=u_n-4000$ est une suite géométrique)
#
# Calculer à nouveau les 10 premiers termes de la suite avec le formule explicite et comparer
# Montrer que la suite $u_n$ est décroissante et admet pour limite 4000.
#
# En déduire un programme de seuil qui , pour une valeur de k donnée en entrée (k>4000), calcule à partir de quel rang n la suite est inférieure à k
| .ipynb_checkpoints/ArithmeticoGeom-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, forest
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
from IPython.display import display
import numpy as np
import scipy
import re
# +
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
# %time train = pd.read_csv('../kaggle_train.csv')
display(train.describe(include='all').T)
# -
col = ['EngineVersion', 'AppVersion', 'AvSigVersion', 'OsBuildLab', 'Census_OSVersion']
for c in col:
for i in range(6):
train[c + str(i)] = train[c].map(lambda x: re.split('\.|-', str(x))[i] if len(re.split('\.|-', str(x))) > i else -1)
try:
train[c + str(i)] = pd.to_numeric(train[c + str(i)])
except:
print(f'{c + str(i)} cannot be casted to number')
train['HasExistsNotSet'] = train['SmartScreen'] == 'ExistsNotSet'
for col, val in train.items():
if pd.api.types.is_string_dtype(val):
train[col] = val.astype('category').cat.as_ordered()
train[col] = train[col].cat.codes
elif pd.api.types.is_numeric_dtype(val) and val.isnull().sum() > 0:
train[col] = val.fillna(val.median())
train
X, Y = train.drop('HasDetections', axis=1), train['HasDetections']
from sklearn import preprocessing
import numpy as np
def oneHot(s):
if s == True:
return 1
else:
return 0
X_slice = train.iloc[:, 113]
print (X_slice)
X_slice = np.array([oneHot(i) for i in X_slice])
print (X_slice)
X_slice = X_slice.reshape(X_slice.shape[0], 1)
print (X_slice)
train.iloc[:, 113] = X_slice
train
train.to_csv('dataset84.csv')
| LGBM(84)/Preprocessed data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### To remove duplicate images using <i><b>fdupes for linux
# * <i><b>fdupes</b></i> is a Linux utility for identifying or deleting duplicate files by comparing md5sum then running a byte-to-byte comparaison
# ### Install fdupes for linux
sudo apt-get update && apt-get install fdupes
# ### Search duplicate photos in a folder
fdupes Path_To_folder
# ### Number of duplicates in a folder
fdupes -m Path_To_folder
# ### Delete files and preserve the first one
fdupes -dN Path_To_folder
| Notebooks/Remove_Duplicates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from spell import correction
import pickle
correction('spellin')
f = open('df_eli.pkl', 'rb') # 'rb' for reading binary file
df_eli = pickle.load(f)
f.close()
print(df_eli.shape)
df_eli.head()
# !pip install pyspellchecker
# +
from spellchecker import SpellChecker
spell = SpellChecker()
# find those words that may be misspelled
misspelled = spell.unknown(['something', 'is', 'hapenning', 'here'])
for word in misspelled:
# Get the one 'most likely' answer
print(spell.correction(word))
# Get a list of 'likely' options
print(spell.candidates(word))
# -
spell.correction('google')
spell = SpellChecker() # loads default word frequency list
spell.word_frequency.load_words(['google'])
spell.correction('google')
spell.known(['microsoft', 'goole'])
dir(spell)
help(spell.unknown)
spell.unknown(['applk'])
help(spell)
len(spell.edit_distance_1('applk'))
| Trail_spell_checker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from scipy.special import expit
from rbm import RBM
from sampler import VanillaSampler, PartitionedSampler
from trainer import VanillaTrainier
from performance import Result
import numpy as np
import datasets, performance, plotter, mnist, pickle, rbm, os, logging
logger = logging.getLogger()
# Set the logging level to logging.DEBUG to
logger.setLevel(logging.INFO)
# %matplotlib inline
models_names = [
"one","two","three","four","five","six","seven", "eight", "nine", "bar","two_three"]
# RBM's keyed by a label of what they were trained on
models = datasets.load_models(models_names)
# +
data_set_size = 40
number_gibbs_alternations = 1000
# the model we will be `corrupting` the others with, in this case we are adding bars to the digit models
corruption_model_name = "bar"
def result_key(data_set_size, num_gibbs_alternations, model_name, corruption_name):
return '{}Size_{}nGibbs_{}Model_{}Corruption'.format(data_set_size, number_gibbs_alternations, model_name, corruption_name)
def results_for_models(models, corruption_model_name, data_set_size, num_gibbs_alternations):
results = {}
for model_name in models:
if model_name is not corruption_model_name:
key = result_key(data_set_size, number_gibbs_alternations, model_name, corruption_model_name)
logging.info("Getting result for {}".format(model_name))
model_a = models[model_name]
model_b = models[corruption_model_name]
model_a_data = model_a.visible[:data_set_size]#visibles that model_a was fit to.
model_b_data = model_b.visible[:data_set_size]#visibles that model_b was fit to.
r = Result(data_set_size, num_gibbs_alternations, model_a, model_b,model_a_data, model_b_data)
r.calculate_result()
results[key] = r
return results
results = results_for_models(models, corruption_model_name, data_set_size, number_gibbs_alternations)
# -
for key in models:
# plotter.plot(results[key].composite)
# plotter.plot(results[key].visibles_for_stored_hidden(9)[0])
# plotter.plot(results[key].vis_van_a)
plotter.plot(models[key].visible[:40])
# #In the cell below #
#
# I have calculated in the previous cell the loglikelyhood score of the partitioned sampling and vanilla sampling technique image-wise. So I have a score for each image. I have done this for all the MNIST digits that have been 'corrupted' by the bar images. That is RBM's trained models 1 - 9 and an RBM trained on 2's and 3's
#
# The `wins` for a given model are where the partitioned scored better than the vanilla sampling technique
#
# Conversly, `losses` are images where the vanilla score better.
#
# Intuitively, `ties` is where they scored the same, which could only really occur when the correction would be zero, or ultimately cancelled out.
for key in results:
logging.info("Plotting, win, lose and tie images for the {}".format(key))
results[key].plot_various_images()
# #Thoughts#
#
# So on a dataset of size 50, with 100 gibbs alterations we see in all cases that for the digit model, 1,2,3,..,9 that the partitioned sampling technique does either better or the same more often than the vanilla does. Let's try some different configurations.
results.update(results_for_models(models, corruption_model_name, 400, 500))
# results.update(results_for_models(models, corruption_model_name, 10, 1))
results
# with open('results_dict', 'wb') as f3le:
# pickle.dump(results,f3le, protocol = None)
with open('results_dict', 'rb') as f4le:
results = pickle.load(f4le)
# for key in results:
# if key.startswith('400'):
# logging.info("Results for hiddens")
# r = results[key].stored_hiddens
# for i in range(len(r)):
# print(results[key].imagewise_score())
| Max/MNIST-ORBM-Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming Exercise 8 - Anomaly Detection and Recommender Systems
# +
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 150)
pd.set_option('display.max_seq_items', None)
# config matplotlib inline
# %matplotlib inline
import seaborn as sns
sns.set_context('notebook')
sns.set_style('white')
# -
# ## Anomaly Detection
data1 = loadmat('data/ex8data1.mat')
data1.keys()
X1 = data1['X']
print('X1:', X1.shape)
plt.scatter(X1[:,0], X1[:,1], c='b', marker='x')
plt.title('Outlier detection')
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)')
clf = EllipticEnvelope()
clf.fit(X1)
# +
# Create the grid for plotting
xx, yy = np.meshgrid(np.linspace(0, 25, 200), np.linspace(0, 30, 200))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Calculate the decision function and use threshold to determine outliers
y_pred = clf.decision_function(X1).ravel()
percentile = 1.9
threshold = np.percentile(y_pred, percentile)
outliers = y_pred < threshold
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(14,5))
# Left plot
# Plot the decision function values
sns.distplot(y_pred, rug=True, ax=ax1)
# Plot the decision function values for the outliers in red
sns.distplot(y_pred[outliers], rug=True, hist=False, kde=False, norm_hist=True, color='r', ax=ax1)
ax1.vlines(threshold, 0, 0.9, colors='r', linestyles='dotted',
label='Threshold for {} percentile = {}'.format(percentile, np.round(threshold, 2)))
ax1.set_title('Distribution of Elliptic Envelope decision function values');
ax1.legend(loc='best')
# Right plot
# Plot the observations
ax2.scatter(X1[:,0], X1[:,1], c='b', marker='x')
# Plot outliers
ax2.scatter(X1[outliers][:,0], X1[outliers][:,1], c='r', marker='x', linewidths=2)
# Plot decision boundary based on threshold
ax2.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red', linestyles='dotted')
ax2.set_title("Outlier detection")
ax2.set_xlabel('Latency (ms)')
ax2.set_ylabel('Throughput (mb/s)');
# -
# ### Recommender Systems
data2 = loadmat('data/ex8_movies.mat')
data2.keys()
Y = data2['Y']
R = data2['R']
print('Y:', Y.shape)
print('R:', R.shape)
Y
R
sns.heatmap(Y, yticklabels=False, xticklabels=False)
| Notebooks/.ipynb_checkpoints/8. Anomaly Detection and Recommender Systems-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Import modules
"""
import pathlib
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from platipy.imaging import ImageVisualiser
from platipy.imaging.label.utils import get_com
from platipy.imaging.registration.linear import linear_registration
from platipy.imaging.registration.deformable import fast_symmetric_forces_demons_registration
from platipy.imaging.registration.utils import apply_transform
# %matplotlib notebook
# +
"""
Parameters
"""
breast_name_dict = {
"004":"CHESTWALL_LT",
"005":"WHOLE_BREAST",
"006":"CHEST_WALL",
"007":"WHOLE_BREAST",
"008":"CHESTWALL",
"009":"CHESTWALL",
"010":"WHOLE_BREAST",
"012":"WHOLE_BREAST",
"013":"WHOLE_BREAST",
"014":"WHOLE_BREAST",
"015":"WHOLE_BREAST",
"016":"WHOLE_BREAST",
"018":"WHOLE_BREAST",
"019":"WHOLE_BREAST",
"021":"CW_RIGHT",
"023":"WHOLE_BREAST"
}
# -
PET_timepoints={"04": ["4","5","6"],"05":["4","5","6"],"06":["4","5","6"],"07":["4","5","6"],"08":["4","5","6"],
"09":["6","7","8"],"10":["4","5","6"],"12":["4","5","6"],"13":["4","5","6"],"14":["4","5","6"],"15":["4","5","6"],"16":["3","4","5"],
"18":["4","5","6"],"19":["4","5"]}
# +
"""
Set up data
"""
data_dir = pathlib.Path(f"/home/alicja/PET_LAB_PROCESSED")
pt_id_list = sorted([i.name[4:] for i in data_dir.glob("WES*")])
timepoints=[1,2,3]
# -
pt_id ="004"
timepoint = 1
# +
"""
Define file names
"""
#filename_rtsim = str(data_dir / f"WES_{pt_id}" / "IMAGES" / f"WES_{pt_id}_CT_RTSIM.nii.gz")
breast_contour_fp="/home/alicja/PET-LAB Code/PET-LAB/PET segmentation/PET breast contours/"
filename_breast_contour= breast_contour_fp+f"breast_contour_dilate_{pt_id.slice(-2)}_{PET_timepoints[pt_id.slice(-2)]}.nii.gz"
filename_breast = str(data_dir / f"WES_{pt_id}" / "LABELS" / f"WES_{pt_id}_RTSIM_LABEL_{breast_name_dict[pt_id]}_CTV.nii.gz")
filename_ct_ac = str(data_dir / f"WES_{pt_id}" / "IMAGES" / f"WES_{pt_id}_TIMEPOINT_{timepoint}_CT_AC.nii.gz")
filename_pt = str(data_dir / f"WES_{pt_id}" / "IMAGES" / f"WES_{pt_id}_TIMEPOINT_{timepoint}_PET.nii.gz")
# +
img_rtsim = sitk.ReadImage( filename_rtsim )
img_breast = sitk.ReadImage( filename_breast )
img_ct_ac = sitk.ReadImage( filename_ct_ac )
img_pt = sitk.ReadImage( filename_pt )
# -
"""
Resample PET
"""
img_pt_res = sitk.Resample(
img_pt,
img_ct_ac,
sitk.Transform(),
sitk.sitkNearestNeighbor
)
# +
"""
Input data plots
"""
vis = ImageVisualiser(img_rtsim, cut=get_com(img_breast), figure_size_in=5)
vis.add_contour(img_breast)
fig = vis.show()
fig.savefig(f"./PLOTS/WES_{pt_id}_PETSEG_0.jpeg",dpi=400)
vis = ImageVisualiser(img_ct_ac, figure_size_in=5)
vis.add_scalar_overlay(img_pt_res, colormap=plt.cm.magma, name="PET [SUV]")
fig = vis.show()
fig.savefig(f"./PLOTS/WES_{pt_id}_PETSEG_1.jpeg",dpi=400)
# +
img_rtsim_to_ac, tfm_rtsim_to_ac = linear_registration(
img_ct_ac,
img_rtsim,
shrink_factors= [10,5],
smooth_sigmas= [2,1],
sampling_rate= 1,
final_interp= 2,
metric= 'mean_squares',
optimiser= 'gradient_descent_line_search',
reg_method='rigid',
)
# -
vis = ImageVisualiser(img_ct_ac, figure_size_in=5)
vis.add_comparison_overlay(img_rtsim_to_ac)
fig = vis.show()
fig.savefig(f"./PLOTS/WES_{pt_id}_PETSEG_3.jpeg",dpi=400)
| JupyterNotebooks/.ipynb_checkpoints/loop_for_pet_tumour_segmentation-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cv2
import math
from PIL import Image, ImageStat
import sys
from os import listdir
true_po_list=[]
precision_list=[]
edge_list=[]
# K=4
def get_image(image_path):
"""
Get a numpy 2D array of an image so that one can access RGBA[x][y] or RGBA[x,y].
Also get a cropped image for further use for hsv and k-means clustering
"""
origin = Image.open(image_path, 'r')
width, height = origin.size
area = (0, 0, width, 0.5*height)
image = origin.crop(area) # crop top half of the image
# result = Image.fromarray(image.astype(np.uint8))
image.save(file+"_cropped.png")
width, height = image.size
# print (image.size)
pixel_values = list(image.getdata())
if image.mode == 'RGBA':
channels = 4
elif image.mode == 'L':
channels = 1
else:
print("Unknown mode: %s" % image.mode)
return None
# pixel_values = np.array(pixel_values).reshape((width, height, channels))
pixel_values = np.array(pixel_values).reshape((height, width, channels))
# print (np.shape(pixel_values))
return pixel_values,image,origin #return pixels matrix with RGBA value, and cropped half image
def get_bri(pixels):
"""
Get a numpy 2D array of an image so that one can access brightness[x][y]
"""
brightness=[[0 for x in range(len(pixels[0]))] for y in range(len(pixels))]
# Matrix = [[0 for x in range(w)] for y in range(h)]
for i in range(len(pixels)):
for j in range(len(pixels[0])):
R,G,B,A=pixels[i,j]
brightness[i][j]=0.2126*R + 0.7152*G + 0.0722*B
return brightness
def hsv( image ):
"""
Get a numpy 2D array of an image so that one can access HSV[x][y]
"""
img = np.array(image)
HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# H,S,V=cv2.split(HSV)
#H = HSV[:,:,0]
#S = HSV[:,:,1].mean
#V = HSV[:,:,2].mean
#return H,S,V
# H=np.mean(H)
# S=np.mean(S)
# V=np.mean(V)
return HSV
# +
"""
Return HSL matrix
"""
def hls(image):
img = np.array(image)
HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
return HLS
# -
"""
K-means clustering to segment the image into 12 color class
clustered.png is used as output
"""
def K_means(file,K):
nametemp = file
file=(file+"_cropped.png")
img = cv2.imread(file)
# img = np.array(image)
Z = img.reshape((-1,3))#reshape array into n rows and 3 colunms
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# K = 15 #number of clusters
ret,label,center=cv2.kmeans(Z,K,None,criteria,50,cv2.KMEANS_PP_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
label2=label.reshape((len(img),len(img[0]))) #save the label into 2D array corresponde to image shape
res2 = res.reshape((img.shape))
result = Image.fromarray(res2.astype(np.uint8))
result.save(nametemp+"_clustered.png")
return (label2)
# cv2.imshow('res2',res2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# +
"""
mark out sky
"""
def mark_sky(pixels,label2,image,file):
#get Bmax in image
Bmax=0
for i in pixels:
# print (i)
for j in i:
if j[2]>Bmax:
Bmax=j[2]
# print (Bmax)
counterclass_skypre={}
counterclass_all={}
Skypre=[[0 for x in range(len(pixels[0]))] for y in range(len(pixels))]
for i in range(len(label2)):
for j in range(len(label2[0])):
if label2[i][j] not in counterclass_all:
counterclass_all[label2[i][j]]=1
else:
counterclass_all[label2[i][j]]+=1
# if label2[i][j]==0:
#add condition like white cloud as sky
if 0.75*180>=HLS[i][j][0]>=0.35*180 or HLS[i][j][1]>=0.95*255 \
or (HLS[i][j][1]>0.7*255 and HLS[i][j][2]<=0.2*255)\
or (HLS[i][j][2]<=0.005*255 and pixels[i][j][2]>=0.9*Bmax) \
or (HLS[i][j][2]>=0.03*255 and pixels[i][j][2]>=0.9*Bmax) or \
((pixels[i][j][2]>=0.95*Bmax) and ((abs(pixels[i][j][2]-pixels[i][j][0])<0.02*255))):
Skypre[i][j]=1
if label2[i][j] not in counterclass_skypre:
counterclass_skypre[label2[i][j]]=1
else:
counterclass_skypre[label2[i][j]]+=1
skylabel_list=[]
for i in counterclass_skypre:
if counterclass_skypre[i]/counterclass_all[i]>0.7:
skylabel_list.append(i)
# print (skylabel_list)
RED = 0
GREEN = 1
BLUE = 2
data = np.asarray(image,dtype="int32")
# print (np.shape(data))
for i in range(len(pixels)):
for j in range(len(pixels[0])):
if label2[i][j] in skylabel_list:
data[i, j, GREEN] = 0
data[i, j, RED] = 0
data[i, j, BLUE] = 255
else:
continue
result = Image.fromarray(data.astype(np.uint8))
result.save(file+"_sky_mark.png")
"""
calculate true positive rate
"""
true_sky_counter=0
true_positive_counter=0
false_positive_counter=0
tmpfile=file
file = file.replace('.png',' copy.png')
marked_image=Image.open(file, 'r')
# marked_pixel_values = list(marked_image.getdata())
# width, height = marked_image.size
# print (data[0][0])
# if image.mode == 'RGBA':
# channels = 4
# elif image.mode == 'L':
# channels = 1
# else:
# print("Unknown mode: %s" % image.mode)
# return None
marked_pixel_values = cv2.imread(file)
# print (np.shape(marked_pixel_values))
# print (np.shape(data))
# print (np.shape(pixels))
# marked_pixel_values = np.array(marked_pixel_values).reshape((height, width))
for i in range(int(len(marked_pixel_values)/2+1)):
for j in range(len(marked_pixel_values[0])):
if marked_pixel_values[i][j][0] == 255 and marked_pixel_values[i][j][1] == 0 and marked_pixel_values[i][j][2] == 0 :
true_sky_counter+=1
if data[i][j][0] == 0 and data[i][j][1] == 0 and data[i][j][2] == 255:
true_positive_counter+=1
else:
if data[i][j][0] == 0 and data[i][j][1] == 0 and data[i][j][2] == 255:
false_positive_counter+=1
# continue
# print ("true_positive_rate/recall for file ",tmpfile," is: ",true_positive_counter/true_sky_counter)
true_po_list.append(true_positive_counter/true_sky_counter)
# print ("precision for file ",tmpfile," is: ",true_positive_counter/(true_positive_counter+false_positive_counter))
precision_list.append(true_positive_counter/(true_positive_counter+false_positive_counter))
# print ("true sky count:", true_sky_counter)
# print ("true positive count:",true_positive_counter)
# print ("false positive count:",false_positive_counter)
# -
def edge_cal(file):
"""
edge detection: generate a matrix with edge pixel has value 255
"""
from matplotlib import pyplot as plt
file=(file+"_cropped.png")
img = cv2.imread(file,0)
edges = cv2.Canny(img,150,500)
result = Image.fromarray(edges.astype(np.uint8))
result.save(file+"edged.png")
# print (np.shape(edges))
# plt.subplot(121),plt.imshow(img,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()
"""
proportion of edge pixel
threshold 0.1 dedicated for case which has many trees
"""
# for i in range(len(edges)):
# for j in range(len(edges[i])):
counter = 0
for i in edges:
for j in i:
if j == 255:
counter+=1
sum=len(edges)*len(edges[0])
# print ("edged pixels proportions for ",file, ": ",counter/sum)
edge_list.append(counter/sum)
# +
"""
Load and feature extraction and calculation
"""
path1 = "/Users/caijieyang/Desktop/experiment"
files= listdir(path1)
for K in range(4, 16):
true_po_list=[]
precision_list=[]
edge_list=[]
# print (files)
for file in files:
if "panorama" in file and "cropped" not in file and "clustered" not in file and "mark" not in file and "copy" not in file:
# file=path1+file
pixels,image,origin=get_image(file)
# print (np.shape(pixels))
# print (pixel1)
brightness=get_bri(pixels)
HSV=hsv(image)
HLS=hls(image)
label2=K_means(file,K)
# edge_cal(file)
brightness=np.asarray(brightness)
mark_sky(pixels,label2,origin,file)
print ("K: ",K)
print ("average true positive rate: ",np.mean(true_po_list))
print ("average precision rate: ",np.mean(precision_list))
# print ("average edge rate: ",np.mean(edge_list))
print ("####################################################")
# print (np.shape(HSV))
# print (np.shape(HLS))
# -
print (true_po_list)
# +
# print (pixels[100,200])
# print (HLS[0,:])
# +
# """
# convert brightness into array
# """
# print (np.shape(pixels))
# print (np.shape(image))
# brightness=brightness.transpose()
# print (np.shape(brightness))
# print ("pixel RGBA: ", pixels[0,0])
# print ("cloud brightness:", brightness[0][0])
# print ("cloud RGBA:", pixels[0,0])
# print ("sky brightness:", brightness[94][462])
# print ("sky RGBA:", pixels[94,462])
# print ("tree brightness:", brightness[140][127])
# print ("tree RGBA:", pixels[140,127])
# -
# +
# """
# convert img into grayscale
# """
# # grayimg = np.array(img)
# grayimg=cv2.imread(file)
# graypix = cv2.cvtColor(grayimg, cv2.COLOR_RGB2GRAY)
# # graypix=cvtColor(img,COLOR_RGB2GRAY)
# cv2.imwrite('gray_image.png',graypix)
# +
# print (np.shape(res2))
# print (np.shape(label2))
# print (label2[:,0])
# -
# +
# print (HLS[115][461])
# print (pixels[36][176])
# +
#HLS [0]:H [1]:L [2]:S
# print (np.shape(Skypre))
# print (counter)
# print (counterclass_skypre)
# -
# +
# """
# mark based soly on blue
# """
# RED = 0
# GREEN = 1
# BLUE = 2
# data = np.asarray(image,dtype="int32")
# print (np.shape(data))
# for i in range(len(brightness)):
# for j in range(len(brightness[0])):
# if 100<pixels[i][j][1]<255 and pixels[i][j][0]>100 and pixels[i][j][2]>100:
# data[i, j, GREEN] = 0
# data[i, j, RED] = 0
# data[i, j, BLUE] = 255
# else:
# continue
# result = Image.fromarray(data.astype(np.uint8))
# result.save("sky_mark1.png")
# +
# """
# transfer image into grayscale
# """
# import numpy as np
# import matplotlib.pyplot as plt
# from skimage import measure
# # Construct some test data
# # x, y = np.ogrid[-np.pi:np.pi:100j, -np.pi:np.pi:100j]
# # r = np.sin(np.exp((np.sin(x)**3 + np.cos(y)**2)))
# # Find contours at a constant value of 0.8
# contours = measure.find_contours(img, 0.8)
# # Display the image and plot all contours found
# fig, ax = plt.subplots()
# ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
# for n, contour in enumerate(contours):
# ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
# ax.axis('image')
# ax.set_xticks([])
# ax.set_yticks([])
# plt.show()
# -
| experiment/firsttemp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from moredata.datasets import get_path
from moredata import Converter
import pandas as pd
df = pd.read_csv(get_path("airbnb-berlin-main"))
df = df.loc[(~df['latitude'].isna()) & (~df['longitude'].isna())]
df.to_json('./data/airbnb-berlin.json', orient='records')
# + pycharm={"name": "#%%\n"}
import moredata
data = moredata.models.JsonData(data_file='./data/airbnb-berlin.json', parser=moredata.parser.parse_document)
osm_enricher = moredata.enricher.osm.OSMPlacesConnector(place_name="Berlin, DE", files=['./data/tourism.csv.gz'], radius=50, geometry_intersected=True)
data_enriched = osm_enricher.enrich(data)
moredata.utils.write_json_generator_to_json("./data/airbnb-berlin-enriched", data_enriched, 100000)
# + pycharm={"name": "#%%\n"}
import pandas as pd
df_enriched = pd.read_json('./data/airbnb-berlin-enriched-0.json', orient='records')
# -
df_enriched.loc[(~df_enriched['local'].isna()) & (~df_enriched['geometry_intersected'].isna()), 'near_tourism_places'] = 'Yes'
df_enriched['near_tourism_places'] = df_enriched['near_tourism_places'].fillna('No')
df_enriched['price'] = df_enriched['price'].apply(lambda x: float(x.replace('$', '').replace(',', '')))
df_enriched['price'].describe()
import seaborn as sns
import matplotlib.pyplot as plt
sns.barplot(y="price", x="near_tourism_places", data=df_enriched)
plt.show()
# +
import folium
from moredata.utils import geodesic_point_buffer
from shapely.geometry.polygon import Polygon
from shapely import wkt
import geopandas as gpd
from util import add_categorical_legend
poi = df_enriched.loc[(~df_enriched['geometry_intersected'].isna())][['geometry_intersected', 'latitude', 'longitude']] \
.reset_index().iloc[[5]]
polygon = wkt.loads(poi['geometry_intersected'][5][0])
latitude = poi['latitude'].squeeze()
longitude = poi['longitude'].squeeze()
shp = Polygon(geodesic_point_buffer(latitude, longitude, 50))
m = folium.Map(location=[latitude, longitude], zoom_start=17, tiles='CartoDB positron')
folium.Marker(
[latitude, longitude], icon=folium.Icon(color="blue"), popup='Point of Interest'
).add_to(m)
sim_geo = gpd.GeoSeries.from_wkt([shp.wkt])
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'grey'})
geo_j.add_to(m)
sim_geo = gpd.GeoSeries(polygon)
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'black', 'color': 'black'})
geo_j.add_to(m)
m = add_categorical_legend(m, 'Polygons',
colors = ['blue','black'],
labels = ['Point of Interest', 'Tourism Place'])
m
# -
| examples/osm/osm-places.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gradient-based solver for ridge regression
# In this notebook, you will create a **gradient descent** solver for **ridge regression** and then compare it to the built-in solver in `sklearn.linear_model`.
# ## 1. Set up notebook and create data set
# After loading in some standard packages, we create a synthetic data set consisting of data points `(x,y)`:
# * `x`: 100-dimensional vector whose coordinates are independent draws from a standard normal (Gaussian) distribution
# * `y`: response value given by `y = wx + e` where `w` is a target regression function and `e` is Gaussian noise
#
# We will fix `w` to be the 100-dimensional vector whose first ten coordinates are exactly 1.0, and whose remaining coordinates are zero. Thus only the first ten coordinates of `x` are relevant to the regression task.
# %matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
# The following procedure, **generate_data**, creates a data set of a specified number of points. It is invoked as follows:
# * `trainx, trainy = generate_data(n)`
#
# Here:
# * `n` is the target number of points
# * `trainx`: `nx100` array of data points
# * `trainy`: array of `n` response values
def generate_data(n):
d = 100
w = np.zeros(d)
for i in range(0,10):
w[i] = 1.0
#
trainx = np.random.normal(size=(n,d))
e = np.random.normal(size=(n))
trainy = np.dot(trainx, w) + e
#
return trainx, trainy
# ## 2. Gradient descent solver for ridge regression
# <font color="magenta">**For you to do:**</font> Define a procedure, **ridge_regression_GD**, that uses gradient descent to solve the ridge regression problem. It is invoked as follows:
#
# * `w,b,losses = ridge_regression_GD(x,y,C)`
#
# Here, the input consists of:
# * training data `x,y`, where `x` and `y` are numpy arrays of dimension `n`-by-`d` and `n`, respectively (if there are `n` training points)
# * regularization constant `C`
#
# The function should find the `d`-dimensional vector `w` and offset `b` that minimize the ridge regression loss function (with regularization constant `C`), and return:
# * `w` and `b`
# * `losses`, an array containing the ridge regression loss at each iteration
#
# <font color="magenta">Advice:</font> First figure out the derivative, which has a relatively simple form. Next, when implementing gradient descent, think carefully about two issues.
#
# 1. What is the step size?
# 2. When has the procedure converged?
#
# Take the time to experiment with different ways of handling these.
def ridge_regression_GD(x,y,C):
### Put your code here
m,n = x.shape
w = np.zeros((n+1,1))
x_bias = np.c_[np.ones((m,1)),x]
max_iter = 10000
alpha = 1e-2
losses = np.zeros(max_iter)
for i in range(max_iter):
h = np.dot(x_bias, w)
delta_y = y.reshape(-1,1)-h.reshape(-1,1)
loss = np.dot(delta_y.T, delta_y)
losses[i] = loss
if i!=0:
w -= alpha/(i)*(-2*np.dot(x_bias.T, delta_y) + np.r_[np.zeros((1,1)), 2*C*w[1:]])
# w -= alpha/(i)*(-2*np.dot(x.T, delta_y))
else:
w -= alpha*(-2*np.dot(x_bias.T, delta_y) + np.r_[np.zeros((1,1)), 2*C*w[1:]])
# b -= -2*np.sum(delta_y, axis=0)
# loss = np.dot(delta_y.T, delta_y) + C*np.dot(w.T, w)
return w[1:],w[0],losses
def ridge_regression_GD2(x,y,C):
### Put your code here
m,n = x.shape
w = np.zeros((n,1))
b = 0
max_iter = 1000
alpha = 1e-3
losses = np.zeros(max_iter)
for i in range(max_iter):
h = np.dot(x, w) + b
delta_y = y.reshape(-1,1)-h.reshape(-1,1)
loss = np.dot(delta_y.T, delta_y)
losses[i] = loss
if i!=0:
w -= alpha/i*(-2*np.dot(x.T, delta_y) + 2*C*w) # w和b分开更新,两个地方都需要乘以学习率alpha
b -= alpha/i*(-2*np.sum(delta_y))
else:
w -= alpha*(-2*np.dot(x.T, delta_y) + 2*C*w)
b -= alpha*(-2*np.sum(delta_y))
return w,b,losses
error = y.reshape(-1,1) - np.dot(x, w) + b
np.sum(np.r_[np.zeros((1,1)), np.ones((2,1))])
# Let's try it out and print a graph of the loss values during the optimization process.
# Generate 200 data points
n = 200
x,y = generate_data(n)
# Set regularization constant
C = 1.0
x.shape, y.shape
x[:20]
# Run gradient descent solver
w, b, losses = ridge_regression_GD2(x,y,C)
w
losses
b
# +
# Plot the losses
plt.plot(losses,'r')
plt.xlabel('Iterations', fontsize=14)
plt.ylabel('Loss', fontsize=14)
plt.show()
# -
# <font color="magenta">**Something to think about**</font>
#
# 1. In setting the step size, does it work to use a fixed schedule 1/t? Why or why not?
#
# 2. Can you set up the gradient descent procedure in such a way that on each iteration, the loss monotonically decreases?
#
# ## 3. Evaluate the gradient descent solver
# Now let's compare the regressor found by your gradient descent procedure to that returned by the built-in ridge regression solver in `sklearn`. We will compare them in two ways:
# * Their MSE values
# * The distance between the corresponding `w`-vectors
#
# The latter should be smaller than 10^{-4}.
#
y.reshape(-1,1).shape
np.shape(y.reshape(-1,1) -np.dot(x, w))
def compute_mse(w,b,x,y):
residuals = y.reshape(-1,1) - (np.dot(x, w) + b)
return np.dot(residuals.T, residuals)/n
# Generate 200 data points
n = 200
x,y = generate_data(n)
# Set regularization constant
C = 10.0
# Run gradient descent solver and compute its MSE
w, b, losses = ridge_regression_GD2(x,y,C)
# Use built-in routine for ridge regression and compute MSE
regr = linear_model.Ridge(alpha=C)
regr.fit(x, y)
# Print MSE values and L2 distance between the regression functions
print("MSE of gradient descent solver: ", compute_mse(w,b,x,y))
print("MSE of built-in solver: ", mean_squared_error(regr.predict(x), y))
print("Distance between w-coefficients: ", np.linalg.norm(w-regr.coef_))
# <font color="magenta">**Something to think about**</font>
#
# The data was originally generated using a linear function in which only ten of the 100 features (the first ten) were relevant. Does the vector `w` returned by ridge regression correctly identify the relevant features?
w
regr.coef_
| week5 - Optimization and Geometry/DSE220x_PA5/DSE220x_PA5/ridge-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Auto-tuning a Convolutional Network for NVIDIA GPU
# ==================================================
# **Author**: `<NAME> <https://github.com/merrymercy>`_, `<NAME> <https://github.com/eqy/>`_
#
# Auto-tuning for specific devices and workloads is critical for getting the
# best performance. This is a tutorial on how to tune a whole convolutional
# network for NVIDIA GPU.
#
# The operator implementation for NVIDIA GPU in TVM is written in template form.
# The template has many tunable knobs (tile factor, unrolling, etc).
# We will tune all convolution and depthwise convolution operators
# in the neural network. After tuning, we produce a log file which stores
# the best knob values for all required operators. When the TVM compiler compiles
# these operators, it will query this log file to get the best knob values.
#
# We also released pre-tuned parameters for some NVIDIA GPUs. You can go to
# `NVIDIA GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#nvidia-gpu>`_
# to see the results.
#
# Note that this tutorial will not run on Windows or recent versions of macOS. To
# get it to run, you will need to wrap the body of this tutorial in a :code:`if
# __name__ == "__main__":` block.
#
#
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute:
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
#
#
# +
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import tvm.contrib.graph_executor as runtime
# -
# Define Network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
#
#
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
# Set Tuning Options
# ------------------
# Before tuning, we apply some configurations.
#
#
# +
#### DEVICE CONFIG ####
target = tvm.target.cuda()
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.log" % network
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 2000,
"early_stopping": 600,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
# -
# <div class="alert alert-info"><h4>Note</h4><p>How to set tuning options
#
# In general, the default value provided here works well.
#
# If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning runs longer.
#
# If you have multiple devices, you can use all of them for measurement to
# accelerate the tuning process. (see the 'Scale up measurement` section below).</p></div>
#
#
#
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
#
#
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
#
#
# +
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, out_shape = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# load parameters
dev = tvm.device(str(target), 0)
module = runtime.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=600))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
# -
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended. One sample output is listed below.
# It takes about 4 hours to get the following output on a 32T AMD Ryzen Threadripper.
# The tuning target is NVIDIA 1080 Ti.
# (You can see some errors during compilation. If the tuning is not stuck, it is okay.)
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/12] Current/Best: 541.83/3570.66 GFLOPS | Progress: (960/2000) | 1001.31 s Done.
# [Task 2/12] Current/Best: 0.56/ 803.33 GFLOPS | Progress: (704/2000) | 608.08 s Done.
# [Task 3/12] Current/Best: 103.69/1141.25 GFLOPS | Progress: (768/2000) | 702.13 s Done.
# [Task 4/12] Current/Best: 2905.03/3925.15 GFLOPS | Progress: (864/2000) | 745.94 sterminate called without an active exception
# [Task 4/12] Current/Best: 2789.36/3925.15 GFLOPS | Progress: (1056/2000) | 929.40 s Done.
# [Task 5/12] Current/Best: 89.06/1076.24 GFLOPS | Progress: (704/2000) | 601.73 s Done.
# [Task 6/12] Current/Best: 40.39/2129.02 GFLOPS | Progress: (1088/2000) | 1125.76 s Done.
# [Task 7/12] Current/Best: 4090.53/5007.02 GFLOPS | Progress: (800/2000) | 903.90 s Done.
# [Task 8/12] Current/Best: 4.78/1272.28 GFLOPS | Progress: (768/2000) | 749.14 s Done.
# [Task 9/12] Current/Best: 1391.45/2325.08 GFLOPS | Progress: (992/2000) | 1084.87 s Done.
# [Task 10/12] Current/Best: 1995.44/2383.59 GFLOPS | Progress: (864/2000) | 862.60 s Done.
# [Task 11/12] Current/Best: 4093.94/4899.80 GFLOPS | Progress: (224/2000) | 240.92 sterminate called without an active exception
# [Task 11/12] Current/Best: 3487.98/4909.91 GFLOPS | Progress: (480/2000) | 534.96 sterminate called without an active exception
# [Task 11/12] Current/Best: 4636.84/4912.17 GFLOPS | Progress: (1184/2000) | 1381.16 sterminate called without an active exception
# [Task 11/12] Current/Best: 50.12/4912.17 GFLOPS | Progress: (1344/2000) | 1602.81 s Done.
# [Task 12/12] Current/Best: 3581.31/4286.30 GFLOPS | Progress: (736/2000) | 943.52 s Done.
# Compile...
# Evaluate inference time cost...
# Mean inference time (std dev): 1.07 ms (0.05 ms)
#
# As a reference baseline, the time cost of MXNet + TensorRT on resnet-18 is 1.30ms. So we are a little faster.
#
#
# <div class="alert alert-info"><h4>Note</h4><p>**Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org</p></div>
#
#
#
#
# Scale up measurement by using multiple devices
# ----------------------------------------------
# If you have multiple devices, you can use all of them for measurement.
# TVM uses the RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 GPU cards, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#
# Then open another new terminal for the RPC server. We need to start one dedicated server
# for each device. We use a string key to distinguish the types of devices.
# You can pick a name you like.
# (Note: For rocm backend, there are some internal errors with the compiler,
# we need to add `--no-fork` to the argument list.)
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=127.0.0.1:9190 --key=1080ti
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=127.0.0.1 --port=9190
#
# For example, if we have four 1080ti, two titanx and one gfx900, the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# 1080ti 4 4 0
# titanx 2 2 0
# gfx900 1 1 0
# ----------------------------------
#
# Finally, we need to change the tuning option to use RPCRunner. Use the code below
# to replace the corresponding part above.
#
#
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 2000,
"early_stopping": 600,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.RPCRunner(
"1080ti", # change the device key to your key
"127.0.0.1",
9190,
number=20,
repeat=3,
timeout=4,
min_repeat_ms=150,
),
),
}
| _downloads/d1434e80dd27eef6b1c9cbaa13f1197b/tune_relay_cuda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(raster)
library(caret)
library(randomForest)
# +
set.seed(1000)
#loading reference data
RD.files <- './Data/Reference_Data.shp'
RD <- shapefile(RD.files)
#loading predictors (mean seasonal spectral variations represented by FPCA scores rasters)
data.folder <- './Data/Predictors'
#file.names <- list.files(data.folder, pattern='*.tif$') #uncomment to take all the predictors
file.names <- list.files(data.folder, pattern='f_') #this takes the NDVI...change the pattern to load the proper predictor
file.paths <- paste(data.folder, file.names, sep="/")
# creating a RasterStack object containing all FPCA scores pixels (all seasonal spectral variations)
predictors <- stack(file.paths)
crs(predictors) <- crs(RD)
# -
#extract values from predictors at reference data location
pred.val <- extract(predictors,RD, method = 'bilinear')
#creating training data
dataset <- as.data.frame(pred.val)
dataset$CLASS <- RD$Class
#downsampling
nc <- length(unique(dataset$CLASS)) #number of target classes
c1 <- length(colnames(dataset)) #number of predictors
sample <- rep(ceiling(0.75*min(table(dataset$CLASS))),nc)
# +
#Random Forest Recursive Feature Elimination
subsets <- seq(2,ncol(dataset)-1,3)
set.seed(1000)
ctrl <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
number = 10,
repeats = 5,
verbose = F)
prProfile <- rfe(x=dataset[, c(1:ncol(dataset)-1)], y=as.factor(dataset$CLASS),
sizes = subsets,
rfeControl = ctrl)
pr <- predictors(prProfile)[1: prProfile$bestSubset]
# +
#training the model
set.seed(1000)
ctrl <- trainControl(method = "repeatedcv",number = 10, repeats = 5)
dataset <- dataset[,c(pr,'CLASS')]
#dataset <- dataset
mtry <- 1:sqrt(ncol(dataset))
tunegrid <- expand.grid(.mtry=mtry)
rfDownsampled <- train(as.factor(CLASS) ~ ., data = dataset,
method = "rf",
ntree = 1500,
tuneGrid = tunegrid,
metric = "Accuracy",
strata = as.factor(dataset$CLASS),
trControl = ctrl,
sampsize = sample)
# -
#resampling at 10m
map <- predict(disaggregate(predictors, c(2,2), method = 'bilinear'),rfDownsampled)
#plot map
plot(map)
#export raster
writeRaster(map, './Data/Results/VegetationMap.tif',datatype = 'INT2S')
#export raster
writeRaster(map, './Data/Results/VegetationMap.tif',datatype = 'INT2S', overwrite=TRUE)
#export results of model
write.table(rfDownsampled$results,dec =',','./Data/Results/TOPORes.txt')
write.table(rfDownsampled$finalModel$confusion,dec =',', './Data/Results/TOPOFinMod.txt')
| RF_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow2_p36)
# language: python
# name: conda_tensorflow2_p36
# ---
import pandas as pd
import numpy as np
np.set_printoptions(precision=6, suppress=True)
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn import linear_model
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import (LinearLocator, MultipleLocator, FormatStrFormatter)
from matplotlib.dates import MONDAY
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
from matplotlib import gridspec
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# %matplotlib inline
# +
plt.rcParams['figure.figsize'] = ((8/2.54), (6/2.54))
plt.rcParams["font.family"] = "Arial"
plt.rcParams["mathtext.default"] = "rm"
plt.rcParams.update({'font.size': 11})
MARKER_SIZE = 15
cmap_m = ["#f4a6ad", "#f6957e", "#fccfa2", "#8de7be", "#86d6f2", "#24a9e4", "#b586e0", "#d7f293"]
cmap = ["#e94d5b", "#ef4d28", "#f9a54f", "#25b575", "#1bb1e7", "#1477a2", "#a662e5", "#c2f442"]
plt.rcParams['axes.spines.top'] = False
# plt.rcParams['axes.edgecolor'] =
plt.rcParams['axes.linewidth'] = 1
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
# -
# # Data loading
l = np.load('./results/2020_S/fw_ct_dataset.npz', allow_pickle=True)
data_indices = l['data_indices']
input_data = l['input_data']
output_label = l['output_label']
INPUT_MAXS = l['INPUT_MAXS']
INPUT_MINS = l['INPUT_MINS']
OUTPUT_MAX = l['OUTPUT_MAX']
OUTPUT_MIN = l['OUTPUT_MIN']
input_data = input_data.astype('float32')
output_label = output_label.astype('float32')
input_data = input_data.reshape(input_data.shape[0], -1)
data_indices, input_data, output_label = shuffle(data_indices, input_data, output_label, random_state=3101)
N_TRAIN = int(input_data.shape[0]*.7)
train_input = input_data[:N_TRAIN, ...]
train_label = output_label[:N_TRAIN, ...]
train_indices = data_indices[:N_TRAIN]
val_input = input_data[N_TRAIN:, ...]
val_label = output_label[N_TRAIN:, ...]
val_indices = data_indices[N_TRAIN:]
# # Validation data
l = np.load('./results/2020_W/fw_ct_dataset.npz', allow_pickle=True)
data_indices = l['data_indices']
input_data = l['input_data']
output_label = l['output_label']
INPUT_MAXS = l['INPUT_MAXS']
INPUT_MINS = l['INPUT_MINS']
OUTPUT_MAX = l['OUTPUT_MAX']
OUTPUT_MIN = l['OUTPUT_MIN']
input_data = input_data.astype('float32')
output_label = output_label.astype('float32')
input_data = input_data.reshape(input_data.shape[0], -1)
# + tags=[]
N_TRAIN = int(input_data.shape[0]*.09)
N_DEV = int(input_data.shape[0]/3)
TRAIN_INDEX = [_ for _ in range(N_TRAIN)] + \
[_ for _ in range(N_DEV, N_DEV+N_TRAIN)] + \
[_ for _ in range(N_DEV*2, N_DEV*2+N_TRAIN)]
TEST_INDEX = [_ for _ in range(input_data.shape[0]) if _ not in TRAIN_INDEX]
test_input = input_data[TEST_INDEX, ...]
test_label = output_label[TEST_INDEX, ...]
test_indices = data_indices[TEST_INDEX]
# -
print(f'number of data set: {input_data.shape[0]}')
print(f'number of training set: {train_input.shape[0]}')
print(f'number of validation set: {val_input.shape[0]}')
print(f'number of test set: {test_input.shape[0]}')
# # Model construction
reg = linear_model.LinearRegression()
model = reg.fit(train_input, train_label)
pred_output = model.predict(test_input)
pred_output = pred_output*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN
test_label = test_label*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN
# +
fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2)))
ax0 = plt.subplot()
ax0.spines['right'].set_visible(False)
ax0.spines['left'].set_position(('outward', 5))
ax0.spines['bottom'].set_position(('outward', 5))
ax0.plot(test_label, pred_output, 'o', ms=5, mec='k', c=cmap[0])
fig.tight_layout()
# -
pred_df = pd.DataFrame(test_label, index=test_indices[:, 0], columns=['label'])
pred_df['pred'] = pred_output
pred_df.index = pd.DatetimeIndex(pred_df.index)
# +
fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2)))
ax0 = plt.subplot()
ax0.spines['right'].set_visible(False)
ax0.spines['left'].set_position(('outward', 5))
ax0.spines['bottom'].set_position(('outward', 5))
ax0.plot(pred_df.index, pred_df['label'], '-o', ms=5, mec='k', c=cmap[4])
ax0.plot(pred_df.index, pred_df['pred'], 'o', ms=5, mec='k', c=cmap[0])
ax0.set_ybound(0, 2)
fig.tight_layout()
# -
pred_df.to_csv('./results/model_output/linreg_pre.csv')
| 4-3_LinearReg_pre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing Necessary Libraries
import numpy as np # linear algebra
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from PIL import Image
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from keras.utils import np_utils
import cv2
# +
oilspill = os.listdir(r"C:\Users\kenny\Desktop\DataScience Related Affair\Oil spill-data\Spill_Data\OilSpill")
print(oilspill[:10]) #the output we get are the .jpg files
nospill = os.listdir(r"C:\Users\kenny\Desktop\DataScience Related Affair\Oil spill-data\Spill_Data\NoSpill")
print('\n')
print(nospill[:10])
# -
# ## Data Preprocessing
# +
data = []
labels = []
for img in oilspill:
try:
img_read = plt.imread(r"C:\Users\kenny\Desktop\DataScience Related Affair\Oil spill-data\Spill_Data\OilSpill" + "/" + img)
img_resize = cv2.resize(img_read, (50, 50))
img_array = img_to_array(img_resize)
img_aray=img_array/255
data.append(img_array)
labels.append(1)
except:
None
for img in nospill:
try:
img_read = plt.imread(r"C:\Users\kenny\Desktop\DataScience Related Affair\Oil spill-data\Spill_Data\NoSpill" + "/" + img)
img_resize = cv2.resize(img_read, (50, 50))
img_array = img_to_array(img_resize)
img_array= img_array/255
data.append(img_array)
labels.append(0)
except:
None
# -
image_data = np.array(data)
labels = np.array(labels)
idx = np.arange(image_data.shape[0])
np.random.shuffle(idx)
image_data = image_data[idx]
labels = labels[idx]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(image_data, labels, test_size = 0.2, random_state = 42)
y_train = np_utils.to_categorical(y_train, 2)
y_test = np_utils.to_categorical(y_test, 2)
print(f'Shape of training image : {x_train.shape}')
print(f'Shape of testing image : {x_test.shape}')
print(f'Shape of training labels : {y_train.shape}')
print(f'Shape of testing labels : {y_test.shape}')
# ## Architecture of the CNN model
# +
import keras
from keras.layers import Dense, Conv2D
from keras.layers import Flatten
from keras.layers import MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.models import Sequential
from keras import backend as K
from keras import optimizers
# +
inputShape= (50,50,3)
model=Sequential()
model.add(Conv2D(32, (3,3), activation = 'relu', input_shape = inputShape))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization(axis =-1))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3,3), activation = 'relu'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3,3), activation = 'relu'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.5))
model.add(Dense(2, activation = 'softmax'))
# -
model.summary()
#compile the model
model.compile(loss = 'binary_crossentropy', optimizer = 'Adam', metrics = ['accuracy'])
H = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10)
print(H.history.keys())
# summarize history for accuracy
plt.plot(H.history['acc'])
plt.plot(H.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(H.history['loss'])
plt.plot(H.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper right')
plt.show()
# make predictions on the test set
preds = model.predict(x_test)
# +
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test.argmax(axis=1), preds.argmax(axis=1)))
# -
from sklearn.metrics import classification_report
print(classification_report(y_test.argmax(axis=1), preds.argmax(axis=1)))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
class_names=np.array((0,1))
plot_confusion_matrix(y_test.argmax(axis=1), preds.argmax(axis=1), classes=class_names, title='Confusion Matrix')
| Oil Spill Classification using CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] editable=true
# # Part I. Pre-Processing the raw csv Files
# + [markdown] editable=true
# #### Import Python packages
# + editable=true
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# + [markdown] editable=true
# #### Get list of all raw csv files
# + editable=true
# Path to the folder containing the raw csv files
filepath = os.getcwd() + '/event_data'
# For loop to iterate over each file and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*'))
print(file_path_list)
# + [markdown] editable=true
# #### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# + editable=true
# List that will contain all the data rows from all the raw csv files
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
#ignoring the first line of the file, which contains the column names
next(csvreader)
# extracting each data row one by one and appending to the list
for line in csvreader:
full_data_rows_list.append(line)
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# + editable=true
# Number of rows in the newly created csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
# + [markdown] editable=true
# # Part II. Data Modeling using Apache Cassandra
#
# ## The newly created CSV file titled <font color=red>event_datafile_new.csv</font>, located within the current directory, contains the following columns:
# - artist
# - firstName of user
# - gender of user
# - item number in session
# - last name of user
# - length of the song
# - level (paid or free song)
# - location of the user
# - sessionId
# - song title
# - userId
#
# The image below is a screenshot of what the pre-processed data looks like in the <font color=red>**event_datafile_new.csv**</font> after the code above is run:<br>
#
# <img src="images/image_event_datafile_new.jpg">
# + [markdown] editable=true
# #### Creating a Cluster
# + editable=true
# Making a connection to a Cassandra instance on local machine
# (127.0.0.1)
from cassandra.cluster import Cluster
cluster = Cluster()
# To establish connection for executing queries, creating a session
session = cluster.connect()
# + [markdown] editable=true
# #### Create Keyspace
# + editable=true
try:
session.execute("""CREATE KEYSPACE IF NOT EXISTS sparkifydb
WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 1}""")
except Exception as e:
print(e)
# + [markdown] editable=true
# #### Set Keyspace
# + editable=true
try :
session.set_keyspace('sparkifydb')
except Exception as e:
print(e)
# + [markdown] editable=true
# ### With Apache Cassandra, database tables are modelled keeping in mind the queries we want to run. For this project, we have 3 types of queries. We will create separate tables for each type of query.
# + [markdown] editable=true
# #### Sample Query 1 : Give me the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4
#
# #### For this type of query, we will create a table called <font color=blue>**"songsBySessions"**</font> where the PRIMARY KEY is <font color=blue>**(sessionId, itemInSession)**</font>.
# + editable=true
query = """CREATE TABLE IF NOT EXISTS songsBySessions \
(artist text, song text, length float, sessionId int, itemInSession int, \
PRIMARY KEY (sessionId, itemInSession))"""
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
#Inserting data to the songsBySessions table
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
#print(line)
query = "INSERT INTO songsBySessions (artist, song, length, sessionId, itemInSession)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
session.execute(query, (line[0], line[9], float(line[5]), int(line[8]), int(line[3])))
# + editable=true
#SELECT statement to verify the data inserted into the table
query = """SELECT artist, song, length from songsBySessions where sessionId = 338 and itemInSession = 4"""
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.artist, row.song, row.length)
# + [markdown] editable=true
# #### 2. Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182
#
# #### For this type of query, we will create a table called <font color=blue>**"songsByUsers"**</font> where the PRIMARY KEY is <font color=blue>**(userId, sessionId, itemInSession)**</font>. Here, we had to include <font color=blue>**itemInSession**</font> in our primary key as we need the songs sorted by itemInSession
# + editable=true
#Creating the songsByUsers table
query = """CREATE TABLE IF NOT EXISTS songsByUsers \
(artist text, song text, itemInSession int, user text, userId int, sessionId int, \
PRIMARY KEY (userId, sessionId, itemInSession))"""
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
#Inserting data to the songsByUsers table
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
#print(line)
query = "INSERT INTO songsByUsers (artist, song, itemInSession, user, userId, sessionId)"
query = query + " VALUES (%s, %s, %s, %s, %s, %s)"
session.execute(query, (line[0], line[9], int(line[3]), line[1] + " " + line[4], int(line[10]), int(line[8])))
# + editable=true
#SELECT statement to verify the data inserted into the table
query = """SELECT artist, song, user from songsByUsers where userId = 10 and sessionId = 182"""
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.artist, row.song, row.user)
# + [markdown] editable=true
# #### 3. Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
#
# #### For this type of query, we will create a table called <font color=blue>**"userBySongs"**</font> where the PRIMARY KEY is <font color=blue>**(song, sessionId)**</font>. Here, we had to include <font color=blue>**sessionId**</font> in our primary key as song names might not be unique.
# + editable=true
query = """CREATE TABLE IF NOT EXISTS usersBySongs \
(user text, song text, sessionId int, \
PRIMARY KEY (song, sessionId))"""
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
#print(line)
query = "INSERT INTO usersBySongs (user, song, sessionId)"
query = query + " VALUES (%s, %s, %s)"
session.execute(query, (line[1] + " " + line[4], line[9], int(line[8]) ))
# + editable=true
query = """SELECT user from usersBySongs where song = 'All Hands Against His Own'"""
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.user)
# + [markdown] editable=true
# ### Drop the tables
# + editable=true
drop_query1 = "DROP TABLE IF EXISTS songsBySessions"
drop_query2 = "DROP TABLE IF EXISTS songsByUsers"
drop_query3 = "DROP TABLE IF EXISTS usersBySongs"
try:
session.execute(drop_query1)
session.execute(drop_query2)
session.execute(drop_query3)
except Exception as e:
print(e)
# + [markdown] editable=true
# ### Close the session and cluster connection¶
# + editable=true
session.shutdown()
cluster.shutdown()
| Apache_Cassandra_ETL_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#importando as libs
import pandas as pd
import numpy as np
import math
import seaborn as sns
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
#lendo o dataset
autism_data = pd.read_csv("Autism_Data.csv")
#testando pra ver se está tudo certo
autism_data.head()
# -
#verificando se há valores faltosos
autism_data.isnull().sum()
#identificando a quantidade de valores duplicados
autism_data.duplicated().sum()
#removendo valores duplicados
autism_data = autism_data.drop_duplicates(keep='last')
#identificando a quantidade de valores duplicados
autism_data.duplicated().sum()
#para correlacionar é preciso que todas as variaveis sejam numericas
autism_data.info()
#renomeando a coluna com o resultado do teste
autism_data = autism_data.rename(columns={'Class/ASD': 'has_autism'})
#retirando o restante das colunas
autism_data = autism_data.drop(['used_app_before','result', 'age_desc', 'relation', 'ethnicity', 'age', 'contry_of_res'], axis=1)
# +
#transformando os varores de 'gender' em 1 para f e em 0 para m
autism_data["gender"] = autism_data["gender"].replace("f", 1)
autism_data["gender"] = autism_data["gender"].replace("m", 0)
#transformando valores de 'austim'
autism_data["austim"] = autism_data["austim"].replace("yes", 1)
autism_data["austim"] = autism_data["austim"].replace("no", 0)
#transformando valores de 'has_autism' em 1 para YES e em 0 para NO
autism_data["has_autism"] = autism_data["has_autism"].replace("YES", 1)
autism_data["has_autism"] = autism_data["has_autism"].replace("NO", 0)
#transformando valores de 'jundice' em 1 para YES e em 0 para NO
autism_data["jundice"] = autism_data["jundice"].replace("yes", 1)
autism_data["jundice"] = autism_data["jundice"].replace("no", 0)
# -
#alterando o valor de 'object' para 'int'
autism_data['has_autism'].astype(str).astype(int)
#todos os dados prontos
autism_data.info()
#plotando gráficos para a identificação de outliers
autism_data.plot(kind='box')
plt.show()
# +
#plotando gráficos para a identificação de outliers
# %matplotlib inline
sns.set(style="whitegrid", color_codes=True)
sns.boxplot(data=autism_data)
# -
#correlação de uma variavel para todas as outras
autism_data.corr().style.format("{:.2}").background_gradient(cmap = plt.get_cmap('coolwarm'),axis=1)
#o gráfico demonstra o grau de relacionamento entre as variáveis
sns.clustermap(autism_data.corr(), metric="correlation", method="single", cmap="Blues", standard_scale=1)
# +
#'has_autism' tem forte relação com 'A9_Score'
#em seguida de A6_Score e A5_Score juntamente com A4_Score
# +
## testes de PCA -- ignorar daqui
# +
#01
#gerando bases através da redução de dimensionalidade via PCA
from sklearn.decomposition import PCA
#pca = PCA(n_components=2)
pca = PCA(n_components=None)
num_autism_data = autism_data.iloc[:,:13]
print(pca.fit(num_autism_data))
T = pca.transform(num_autism_data)
print("____________________________________________________________________________")
print("O número de colunas exclusivas é:", T.shape[1])
#Os valores singulares correspondentes a cada um dos componentes selecionados.
#Os valores singulares são iguais às 2 normas das variáveis n_components no espaço de menor dimensão.
print("____________________________________________________________________________")
print(np.cumsum(pca.singular_values_))
print("____________________________________________________________________________")
print(pca.singular_values_)
print("____________________________________________________________________________")
plt.scatter(T[:,0], T[:,1])
# +
def cores_para_categorias(valores):
cores = {key: index for index, key in enumerate(valores.unique())}
return valores.apply(lambda x: cores[x])
plt.scatter(T[:,0], T[:,1], c=cores_para_categorias(autism_data.has_autism), cmap='tab10')
# -
print(pca.singular_values_)
print("O número de colunas exclusivas é:", T.shape[1])
print(np.cumsum(pca.explained_variance_ratio_))
# +
#testando dados transformados em uma mesma escala
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
pca = make_pipeline(StandardScaler(), PCA(n_components=2))
pca.fit(num_autism_data)
r2 = pca.transform(num_autism_data)
plt.scatter(T[:,0], T[:,1], c=cores_para_categorias(autism_data.has_autism), cmap='tab10')
# +
pca = PCA(n_components=4)
pipe = make_pipeline(StandardScaler(), pca)
pipe.fit(num_autism_data)
r2 = pipe.transform(num_autism_data)
plt.scatter(T[:,0], T[:,1], c=cores_para_categorias(autism_data.has_autism), cmap='tab10')
coeff = np.transpose(pca.components_)
for i in range(13):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color = 'red',alpha = 0.5)
plt.text(coeff[i,0]* 1.5, coeff[i,1] * 1.5, autism_data.columns[i], color = 'red', ha = 'center', va = 'center')
plt.xlim(0,0.6)
plt.ylim(-0.8,0.6)
# +
# Aparentemente o melhoramento mais significativo para o modelo de regressão é adicionando as variaveis:
# A9_Score, A6_Score, A5_Score e A4_Score
# pode-se propor um novo teste mas ponderando o valor das repostas, um AQ10 ponderado
# Ou então escrever que o modelo com x/10 variaveis explica y% do resultado.
# +
## testes de PCA -- até aqui
# +
#03 bases para redução de dimensionalidade via PCA
X_or1 = autism_data.loc[:,['A3_Score','A4_Score','A5_Score','A6_Score','A9_Score']]
X_or2 = autism_data.loc[:,['A1_Score','A2_Score','A7_Score','A8_Score','A10_Score']]
X_or3 = autism_data.loc[:,['A9_Score','A6_Score','A4_Score','A7_Score','A10_Score']]
X = autism_data.iloc[:,0:10]
y = autism_data.has_autism
Xy_data = autism_data.drop(['gender', 'jundice', 'austim'], axis=1)
target_class = [0 ,1]
# +
#correlação para cada base
# -
X_or1.corr().style.format("{:.2}").background_gradient(cmap = plt.get_cmap('coolwarm'),axis=1)
X_or2.corr().style.format("{:.2}").background_gradient(cmap = plt.get_cmap('coolwarm'),axis=1)
X_or3.corr().style.format("{:.2}").background_gradient(cmap = plt.get_cmap('coolwarm'),axis=1)
# +
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
pca = PCA(n_components = 5) #quantidade de atributos
#Ajuste o modelo com e aplique a redução de dimensionalidade
X_r = pca.fit(X).transform(X)
X_r1 = pca.fit(X_or1).transform(X_or1)
X_r2 = pca.fit(X_or2).transform(X_or2)
X_r3 = pca.fit(X_or3).transform(X_or3)
# +
print("taxa de variação: ", pca.explained_variance_ratio_)
target_class = [0 ,1]
plt.figure()
colors = ['navy', 'turquoise']
lw = 2
for color, i, target_class in zip(colors, [0, 1], target_class):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_class)
plt.legend(loc='best', shadow=False)
plt.title('PCA do Dataset')
# +
target_class = [0 ,1]
plt.figure()
colors = ['navy', 'turquoise']
lw = 2
for color, i, target_class in zip(colors, [0, 1], target_class):
plt.scatter(X_r1[y == i, 0], X_r1[y == i, 1], color=color, alpha=.8, lw=lw, label=target_class)
plt.legend(loc='best', shadow=False)
plt.title('Dataset 01')
# +
target_class = [0 ,1]
plt.figure()
colors = ['navy', 'turquoise']
lw = 2
for color, i, target_class in zip(colors, [0, 1], target_class):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], color=color, alpha=.8, lw=lw, label=target_class)
plt.legend(loc='best', shadow=False)
plt.title('Dataset 02')
# +
target_class = [0 ,1]
plt.figure()
colors = ['navy', 'turquoise']
lw = 2
for color, i, target_class in zip(colors, [0, 1], target_class):
plt.scatter(X_r3[y == i, 0], X_r3[y == i, 1], color=color, alpha=.8, lw=lw, label=target_class)
plt.legend(loc='best', shadow=False)
plt.title('Dataset 03')
# +
#cross validation using train_test_split em toda a base
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=0)
svmlassifier = svm.SVC().fit(X_train, y_train)
#svmlassifier.fit(X_train, y_train)
svmlassifier.score(X_test, y_test)
# -
#cross validation using cross_val_score em toda a base
from sklearn.model_selection import cross_val_score
svmlassifier = svm.SVC(C=1)
scores_X = cross_val_score(svmlassifier, X, y, cv=5)
scores_X
# +
#SVM - Regressão - com Cross Validation
# +
#SVM para todo o dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=0)
svmlassifier = svm.SVC(C=1)
svmlassifier.fit(X_train, y_train)
svmlassifier.score(X_test, y_test)
# +
#KNN - K-Nearest Neighbors
# -
from sklearn.neighbors import KNeighborsClassifier
# +
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
classifier = KNeighborsClassifier(n_neighbors = 5)
classifier.fit(x_train, y_train)
# -
y_pred = classifier.predict(x_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
classifier2 = KNeighborsClassifier(n_neighbors = 9)
classifier2.fit(x_train, y_train)
y_pred2 = classifier2.predict(x_test)
print(confusion_matrix(y_test, y_pred2))
print(classification_report(y_test, y_pred2))
# +
#Árvore de Decisão
#Baseado no Slide
# +
import graphviz
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
le = LabelEncoder()
le.fit(Xy_data['has_autism'].values)
y = le.transform(Xy_data['has_autism'].values)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=10)
# -
tree = DecisionTreeClassifier(criterion='entropy',min_samples_leaf=5,min_samples_split=5,max_depth=None,random_state=10)
tree.fit(X_train, y_train)
y_pred = tree.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print('DecisionTreeClassifier accuracy score: {}'.format(accuracy))
score=cross_val_score(tree, X, y, cv=10)
print("Accuracy: %0.5f (+/- %0.5f)" % (score.mean(), score.std() * 2))
# +
#plot da árvore
#def plot_tree(tree, dataframe, label_col, label_encoder, plot_title):
# label_names = pd.unique(dataframe[label_col])
# # Obtaining plot data.
# graph_data = export_graphviz(tree,feature_names=dataframe.drop(label_col,axis=1).columns,class_names=label_names,filled=True,rounded=True,out_file=None)
# # Generating plot.
# graph = graphviz.Source(graph_data)
# graph.render(plot_title)
# return graph
#tree_graph = plot_tree(tree, Xy_data, 'has_autism', le, 'ASD Tree')
#tree_graph
# +
#Árvore de Decisão
#Baseado na web
# -
dtree=DecisionTreeClassifier()
dtree.fit(X,y)
# +
#plot da árvore
#from sklearn.externals.six import StringIO
#from IPython.display import Image
#from sklearn.tree import export_graphviz
#import pydotplus
#dot_data = StringIO()
#export_graphviz(dtree, out_file=dot_data,
# filled=True, rounded=True,
# special_characters=True)
#graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
#Image(graph.create_png())
# +
#Naive Bayes
import numpy as np
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X, y)
clf_pf = GaussianNB()
clf_pf.partial_fit(X, y, np.unique(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.34,random_state=10)
cv_results = cross_val_score(clf_pf, X_train, y_train,cv = 10,scoring = "accuracy",verbose = 2)
print("media de acuracia",cv_results.mean())
print('desvio padrão medio:',cv_results.std())
# -
print(clf_pf.predict([[1,1,1,1,1,0,0,0,1,0]]))
#Rede Neural
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)
clf.fit(X, y)
print(clf.predict([[1,1,1,1,1,1,0,0,1,0]]))
print(clf.predict([[1,0,0,1,1,0,0,0,1,0]]))
# +
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_validate
scaler = StandardScaler()
#iris.data.reshape(-1,1)
scaler.fit(X)
baseX=scaler.transform(X)
model = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(15,), random_state=1,learning_rate='constant',max_iter=100)
model.fit(X,y)
scores = cross_val_score(model, X, y, cv=10)
print("media de acuracia:", scores.mean())
print("desvio padrão medio:", scores.std())
# +
from sklearn.model_selection import cross_val_score
from sklearn import datasets
from sklearn.neural_network import MLPRegressor
est = MLPRegressor(hidden_layer_sizes=(15,2), max_iter=700, learning_rate_init=0.0001)
est.fit(X,y)
cross_val_score(est, X, y, cv=10)
est.predict([[1,0,0,1,1,0,0,0,1,0]])
# +
#K-means
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
kmeans.labels_
# -
kmeans.predict([[1,0,0,1,1,0,0,0,1,0], [1,0,1,1,1,0,0,0,1,0]])
kmeans.cluster_centers_
# +
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init= 10, random_state = 0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
#Plotting the results onto a line graph, allowing us to observe 'The elbow'
plt.plot(range(1, 11), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS') #within cluster sum of squares
plt.show()
# -
kmeans = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(X)
# +
#Resultados Individuais
#SMV
#accuracy score: 0.9785714285714285
#KNN
# precision recall f1-score support
#
# 0 0.97 0.95 0.96 103
# 1 0.87 0.92 0.89 37
#
# accuracy 0.94 140
# macro avg 0.92 0.94 0.93 140
#weighted avg 0.94 0.94 0.94 140
#Árvore de Decisão
#accuracy score: 0.9214285714285714
#Rede Neural/MLP
#media de acuracia: 0.9985507246376812
#desvio padrão medio: 0.004347826086956508
#Naive Bayes
#media de acuracia 0.960815088909446
#desvio padrão medio: 0.03621285311643785
# +
#Comitês Homogêneos
# +
#Bagging para tamanho 5
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
bagging = BaggingClassifier(KNeighborsClassifier(),max_samples=0.5, max_features=0.5)
from sklearn import model_selection
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = KNeighborsClassifier() # Classificador
num_trees = 5 # numero de classificadores
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = DecisionTreeClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = MLPClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#Bagging para tamanho 10
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = KNeighborsClassifier() # Classificador
num_trees = 10 # numero de classificadores
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = DecisionTreeClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = MLPClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#Bagging para tamanhotamanho 20
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = KNeighborsClassifier() # Classificador
num_trees = 20 # numero de classificadores
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = DecisionTreeClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
kfold = model_selection.KFold(n_splits=10, random_state=0)
cart = MLPClassifier() # Classificador
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=0)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#AdaBoost w/ tree para 5
from sklearn.ensemble import AdaBoostClassifier
seed = 7
num_trees = 5
model1 = DecisionTreeClassifier()
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = AdaBoostClassifier(model1,n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#AdaBoost w/ tree para 10
from sklearn.ensemble import AdaBoostClassifier
seed = 7
num_trees = 10
model1 = DecisionTreeClassifier()
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = AdaBoostClassifier(model1,n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#AdaBoost w/ tree para 20
from sklearn.ensemble import AdaBoostClassifier
seed = 7
num_trees = 20
model1 = DecisionTreeClassifier()
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = AdaBoostClassifier(model1,n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#MLP, KNN e AD com Votação
from sklearn.ensemble import VotingClassifier, BaggingClassifier, AdaBoostClassifier, RandomForestClassifier
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.34, random_state=seed)
# Fitting a Decision Tree.
tree = DecisionTreeClassifier(min_samples_split=5, min_samples_leaf=3, random_state=seed)
tree.fit(X_train, y_train)
# Fitting a MLP.
mlp = MLPClassifier(hidden_layer_sizes=(10,), max_iter=10000, random_state=seed)
mlp.fit(X_train, y_train)
# Fitting a kNN.
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
# Fitting a Voting Classifier by combining the three above classifiers.
voting_clf = VotingClassifier(estimators=[('Tree', tree), ('MLP', mlp), ('kNN', knn)], voting='hard')
voting_clf.fit(X_train, y_train)
results = model_selection.cross_val_score(voting_clf,X,y,cv=10)
results.mean()
# +
# Voting Ensemble for Classification
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
seed = 7
kfold = model_selection.KFold(n_splits=10, random_state=seed)
# create the sub models
estimators = []
model1 = LogisticRegression(n_estimators=num_trees)
estimators.append(('logistic', model1))
model2 = DecisionTreeClassifier()
estimators.append(('cart', model2))
model3 = SVC(gamma='auto')
estimators.append(('svm', model3))
model4 = KNeighborsClassifier()
estimators.append(('KNN', model4))
# create the ensemble model
ensemble = VotingClassifier(estimators)
results = model_selection.cross_val_score(ensemble, X, y, cv=kfold)
print(results.mean())
print(results.std())
# +
#Comitês Heterogêneos
# +
#Stacking
from mlxtend.classifier import StackingClassifier
X_train, X_test, y_train, y_test = train_test_split(iris_Ada.data, iris_Ada.target, test_size=0.34, random_state=seed)
clf1 = KNeighborsClassifier()
clf2 = DecisionTreeClassifier()
clf3 = LinearSVC()
lr = LogisticRegression(solver='lbfgs', multi_class='ovr')
clf = StackingClassifier(classifiers=[clf1, clf2, clf3],meta_classifier=lr)
clf.fit(X_train, y_train).score(X_test, y_test)
results1 = model_selection.cross_val_score(clf,iris_Ada.data,iris_Ada.target,cv=10)
print("Meta Classificador: LR",results1.mean())
clf4 = StackingClassifier(classifiers=[clf1, clf2, clf3],meta_classifier=clf3)
results2 = model_selection.cross_val_score(clf4,iris_Ada.data,iris_Ada.target,cv=10)
clf4.fit(X_train, y_train).score(X_test, y_test)
print("Meta Classificador: SVC",results2.mean())
# +
from sklearn.linear_model import LinearRegression
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
regr = LinearRegression() # cria o modelo
regr.fit(X_train, y_train) # treina o modelo
r2_train = regr.score(X_train, y_train)
r2_test = regr.score(X_test, y_test)
print('R2 no set de treino: %.2f' % r2_train)
print('R2 no set de teste: %.2f' % r2_test)
plt.scatter(y=res, x=y_hat, color='green', s=50, alpha=.6)
plt.hlines(y=0, xmin=-10, xmax=15, color='orange')
plt.ylabel('$\epsilon = y - \hat{y}$ - Resíduos')
plt.xlabel('$\hat{y}$ ou $E(y)$ - Predito')
plt.show()
| autism_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Perlin Noise
#
# ## Author: <NAME>
# This algorithm has many applications in computer graphics and can serve to demonstrate several things... and help us learn about math, algorithms and Python :).
# #### Noise
# Noise is just random values. We can generate noise by just calling a random generator. Note that these are actually called *pseudorandom generators*. We'll talk about this later in this course.
# We can generate noise in however many dimensions we want. For example, if we want to generate a single dimension, we just pick N random values and call it a day. If we want to generate a 2D noise space, we can take an approach which is similar to what we already did with `np.meshgrid()`.
#
# $$ \text{noise}(x, y) = N, N \in [n_{min}, n_{max}] $$
#
# This function takes two coordinates and returns a single number N between $n_{min}$ and $n_{max}$. (This is what we call a "scalar field").
#
# Random variables are always connected to **distributions**. We'll talk about these a great deal but now let's just say that these define what our noise will look like. In the most basic case, we can have "uniform noise" - that is, each point in our little noise space $[n_{min}, n_{max}]$ will have an equal chance (probability) of being selected.
#
# #### Perlin noise
# There are many more distributions but right now we'll want to have a look at a particular one. **Perlin noise** is a kind of noise which looks smooth. It looks cool, especially if it's colored. The output may be tweaked to look like clouds, fire, etc. 3D Perlin noise is most widely used to generate random terrain.
#
# #### Algorithm
# ... Now you're on your own :). Research how the algorithm is implemented (note that this will require that you understand some other basic concepts like vectors and gradients).
#
# #### Your task
# 1. Research about the problem. See what articles, papers, Python notebooks, demos, etc. other people have created
# 2. Create a new notebook and document your findings. Include any assumptions, models, formulas, etc. that you're using
# 3. Implement the algorithm. Try not to copy others' work, rather try to do it on your own using the model you've created
# 4. Test and improve the algorithm
# 5. (Optional) Create a cool demo :), e.g. using Perlin noise to simulate clouds. You can even do an animation (hint: you'll need gradients not only in space but also in time)
# 6. Communicate the results (e.g. in the Softuni forum)
#
# Hint: [This](http://flafla2.github.io/2014/08/09/perlinnoise.html) is a very good resource. It can show you both how to organize your notebook (which is important) and how to implement the algorithm.
# ### History
# Perlin noise is a type of gradient noise developed by <NAME> in 1983 as a result of his frustration with the "machine-like" look of computer graphics at the time. He formally described his findings in a SIGGRAPH paper in 1985 called An image Synthesizer. In 1997, Perlin was awarded an Academy Award for Technical Achievement for creating the algorithm. The development of Perlin Noise has allowed computer graphics artists to better represent the complexity of natural phenomena in visual effects for the motion picture industry.
#
#
# Source: [Wikipedia](https://en.wikipedia.org/wiki/Perlin_noise)
# ### Problem definition
#
# Produce natural appearing textures on computer generated surfaces for motion picture visual effects. Generate a fixed gradient noise to achieve a better looking textures.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def simulate_points(a, b, max_noise):
x = np.linspace(-2, 3, 100) # the hyperparameters (β0, β1)
y = a + b * x # the predicted value (y = β0 + β1 * x)
y_noise = np.random.uniform(-max_noise, max_noise, len(x))
y += y_noise
return [x, y]
x, y = simulate_points(-10, 10, 100)
plt.scatter(x, y)
plt.show()
# ## Algorithm implementation
| CourseProject/Perlin Noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('../') # or just install the module
sys.path.append('../../fuzzy-tools') # or just install the module
sys.path.append('../../astro-lightcurves-handler') # or just install the module
# +
import pandas as pd
kf = '0'
method = 'linear-fstw'
rootdir = f'../save/fats/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={method}'
filedir = f'{rootdir}/{kf}@train.{method}.df'
df = pd.read_parquet(filedir) # parquet
df = df.loc[['ZTF19adcfsoc.1']]
df = df.transpose()
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df)
# +
import pandas as pd
kf = 0
rootdir = '../save/fats/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method=linear-fstw'
filedir = f'{rootdir}/{kf}@train.df'
df = pd.read_parquet(filedir) # parquet
df = df['SPM_gamma_r']
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df)
| experiments/check_fats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA as sklearnPCA
a=np.array([[1,2],[3,4]])
a[:,1]
class Build_data:
def __init__(self,n_data,features,classes,max_val,split):
self.train_data=np.array([])
self.test_data=np.array([])
self.data=np.array([])
self.n_data=n_data
self.features=features
self.classes=classes
self.max_val=max_val
self.split=split
def create_dataset(self):
for i in range(self.n_data):
self.data=np.append(self.data,[i+1]) # appending index
for j in range(self.features): # appenind features
self.data=np.append(self.data,[np.random.randint(0,self.max_val+1)])
self.data=np.append(self.data,[np.random.randint(0,self.classes)]) # appending class label
self.data=np.reshape(self.data,(self.n_data,self.features+2)) # reshaping to make a matrix
def process_dataset(self):
clone_data=np.random.permutation(self.data)
self.train_data = clone_data[:int((len(self.data)+1)*self.split/100)] # 80% to training set
self.test_data = clone_data[int(len(self.data)*self.split/100):]
def create_csv(self):
np.savetxt("dataset.csv",self.data,delimiter=",")
np.savetxt("train_data.csv",self.train_data, delimiter=",")
np.savetxt("test_data.csv",self.test_data,delimiter=",")
def plot_data(self):
train_dist0=[]
train_label0=[]
train_dist1=[]
train_label1=[]
test_dist0=[]
test_label0=[]
test_dist1=[]
test_label1=[]
for train_dp in self.train_data:
if train_dp[7]==0:
train_label0.append(train_dp[1])
train_dist0.append((np.sum(train_dp[1:7]**2))**0.5)
else :
train_label1.append(train_dp[1])
train_dist1.append((np.sum(train_dp[1:7]**2))**0.5)
for test_dp in self.test_data:
test_dist.append((np.sum(train_dp[1:7]**2))**0.5)
plt.plot(self.train_data[:,1],train_dist,'ro')
plt.show()
c1 = Build_data(1000,6,2,100,80)
c1.create_dataset()
c1.process_dataset()
c1.create_csv()
c1.plot_data()
# +
class Knn:
def __init__(self,k,features):
self.k=k
self.features=features
def run(self):
dist=np.array([[0,0]]) # appending 0 0 dummy array for later concatenating
positive=0
train_data=np.genfromtxt('train_data.csv',delimiter=',')
test_data=np.genfromtxt('test_data.csv',delimiter=',')
for test in test_data:
true=0
x=test[1:self.features]
y=test[self.features+1]
for train in train_data:
xt=train[1:self.features]
yt=train[self.features+1]
train_distance=np.sum((x-xt)**2) # calculating only sum of square of differnce of coordinates as it gives a similar measure of distance
dist=np.concatenate((dist,[[train_distance,int(y==yt)]]),axis=0)
dist=dist[np.argsort(dist[:, 0])]
for kn in range(1,self.k+1): # start from 1 as the 0th dimensional array is a dummy
if dist[kn][1]==1: # dist[kn][1]==1 if the knth nearest point have same label as test datapoint
true=true+1
if true>self.k-true:
positive=positive+1
elif true==self.k-true:
if np.random.randint(0,2) == 1:
positive=positive+1
else:
;
dist=np.array([[0,0]])
accuracy=float(positive)/len(test_data)
return accuracy
def scikit_run(self):
train_data = np.genfromtxt('train_data.csv',delimiter=',')
test_data = np.genfromtxt('test_data.csv',delimiter=',')
x_train = train_data[:, 1:self.features+1]
y_train = train_data[:,self.features+1]
x_test = test_data[:, 1:self.features+1]
y_test = test_data[:,self.features+1]
knn = KNeighborsClassifier(n_neighbors=self.k)
# fitting the model
knn.fit(x_train, y_train)
# predict the response
pred = knn.predict(x_test)
# evaluate accuracy
accuracy=knn.score(x_test, y_test)
return accuracy
# -
import time
my_accuracy=[]
scikit_accuracy=[]
my_runtime=[]
scikit_runtime=[]
k1=np.array([i for i in range(1,22)])
for k in range(1,22):
example1=Knn(k,6)
print "k = ",k
start=time.clock()
accuracy = example1.run()
print "My accuracy = ",accuracy
my_accuracy+=[accuracy]
end=time.clock()
my_runtime+=[end-start]
print "My time = ",end-start
print "\n"
start = time.clock()
accuracy = example1.scikit_run()
print "Scikit accuracy = ",accuracy
scikit_accuracy += [accuracy]
end = time.clock()
scikit_runtime += [end-start]
print "Scikit Time = ",end-start
print "\n"
# +
plt.plot(k1,my_accuracy,'r',k1,scikit_accuracy,'b')
plt.show()
plt.plot(k1,my_runtime,'r',k1,scikit_runtime,'b')
plt.show()
| asn1/KNN/.ipynb_checkpoints/Knn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # UNIX Commands for Data Scientists
# In this reading we will go through the UNIX commands introduced in this week's video again so you can familiarize more with their syntax.
#
# At any point feel free to modify the code and explore yourself the functionality of the UNIX shell.
# ## How to execute the commands
#
# On **Windows** you need to open Git Bash and paste the command into the terminal, either using the mouse right click or right clicking on the top window border and select edit -> paste.
#
# On **Mac OS** or **Linux** you can choose to either execute commands through this Jupyter Notebook or copy paste them into a terminal.
# ## Declare Filename
#
# First we want to create a variable to hold the filename of the text file we want to analyze, so that if we want to change it later, we can change it only in this line.
#
# This is the only case where the syntax is different in the Jupyter Notebook and running directly in the shell.
#
# In the Notebook, each command is run on a separate shell process therefore we need to store `filename` in an enviromental variable, which is a way to set a persistent variable. This is performed using the `%env` IPython Magic function, execute `%env?` to learn more.
# !ls ./unix
# %env filename=./unix/shakespeare.txt
# If you are instead running in a shell, you can just define a shell variable named filename with this syntax:
#
# filename=./unix/shakespeare.txt
#
# Make sure that there are **no spaces** around the equal sign.
# We can verify that the variable is now defined by printing it out with `echo`. For the rest of this reading we will use this variable to point to the filename.
# !echo $filename
# ## head
# `head` prints some lines from the top of the file, you can specify how many with `-n`, what happens if you don't specify a number of lines?
# !head -n 3 $filename
# ## tail
# !tail -n 10 $filename
# ## wc
# `wc`, which stands for wordcount, prints the number of lines, words and characters:
# !wc $filename
# you can specify `-l` to only print the number of lines. Execute (in Git Bash on Windows or on Linux):
#
# wc --help
#
# or (on Mac or on Linux):
#
# man wc
#
# to find out how to print only words instead. Or guess!
# !wc -l $filename
# # ## cat
# You can use pipes with `|` to stream the output of a command to the input of another, this is useful to compone many tools together to achieve a more complicated output.
#
# For example `cat` dumps the content of a file, then we can pipe it to `wc`:
# !cat $filename | wc -l
# ## grep
# `grep` is an extremely powerful tool to look for text in one or more files. For example in the next command we are looking for all the lines that contain a word, we also specify with `-i` that we are interested in case insensitive matching, i.e. don't care about case.
# !grep -i 'parchment' $filename
# We can combine `grep` and `wc` to count the number of lines in a file that contain a specific word:
# !grep -i 'liberty' $filename | wc -l
# ## sed
# `sed` is a powerful stream editor, it works similarly to `grep`, but it also modifies the output text, it uses regular expressions, which are a language to define pattern matching and replacement.
#
# For example:
#
# s/from/to/g
#
# means:
#
# * `s` for substitution
# * `from` is the word to match
# * `to` is the replacement string
# * `g` specifies to apply this to all occurrences on a line, not just the first
#
# In the following we are replacing all instances of 'parchment' to 'manuscript'
#
# Also we are redirecting the output to a file with `>`. Therefore the output instead of being printed to screen is saved in the text file `temp.txt`.
# +
#replace all instances of 'parchment' to 'manuscript'
# !sed -e 's/parchment/manuscript/g' $filename > temp.txt
# -
# Then we are checking with `grep` that `temp.txt` contains the word "manuscript":
# !grep -i 'manuscript' temp.txt
# ## sort
# !head -n 5 $filename
# We can sort in alphabetical order the first 5 lines in the file, see that we are just ordering by the first letter in each line:
# !head -n 5 $filename | sort
# We can specify that we would like to sort on the second word of each line, we specify that the delimiter is space with `-t' '` and then specify we want to sort on column 2 `-k2`.
#
# Therefore we are sorting on "is, of, presented, releases"
# !head -n 5 $filename | sort -t' ' -k2
# `sort` is often used in combination with `uniq` to remove duplicated lines.
#
# `uniq -u` eliminates duplicated lines, but they need to be consecutive, therefore we first use `sort` to have equal lines consecutive and then we can filter them out easily with `uniq`:
# !sort $filename | wc -l
# !sort $filename | uniq -u | wc -l
# # Lets bring it all together
#
# The "UNIX philosophy" is "Do one thing, do it well" (https://en.wikipedia.org/wiki/Unix_philosophy). The point is to have specialized tools with just 1 well defined function and then compose them together with pipes.
# ## Count the most frequent words
#
# For example we want to find the 15 most frequent words with their count. We can achieve this combining the tools we learned in this reading.
#
# First try it yourself, copy/paste this line many times run it piece by piece and try to understand what each step is doing, read documentation with `--help` or `man`, then will go through it together:
# **Warning for MAC OS**: Mac OS has a different version of `sed` that has a special treatment of line feed `\n` and carriage return `\n`. Therefore on Mac we need to replace each occurrence of:
#
# sed -e 's/ /\n/g' -e 's/\r//g'
#
# with:
#
# sed -e 's/ /\'$'\n/g' -e $'s/\r//g'
# !sed -e 's/ /\n/g' -e 's/\r//g' $filename | sed '/^$/d'| sort | uniq -c | sort -nr | head -15
# **do not worry** about the Broken Pipe error, it is due to the fact that `head` is closing the pipe after the first 15 lines, and `sort` is complaining that it would have more text to write
# # !sed -e 's/ /\n/g' -e 's/\r//g' $filename
#
# `sed` is making 2 replacements. The first replaces each space with `\n`, which is the symbol for a newline character, basically this is splitting all of the words in a text on separate lines. See yourself below!
#
# The second replacement is more complicated, `shakespeare.txt` is using the Windows convention of using `\r\n` to indicate a new line. `\r` is carriage return, we want to get rid of it, so we are replacing it with nothing.
# !sed -e 's/ /\n/g' -e 's/\r//g' < $filename | head
# Next we are not interested in counting empty lines, so we can remove them with:
#
# sed '/^$/d'
#
# * `^` indicates the beginning of a line
# * `$` indicates the end of a line
#
# Therefore `/^$/` matches empty lines. `/d` instructs `sed` to delete them.
# Next we'd like to count the occurrence of each word, here we can use `uniq` with the `-c` option, but as with the `-u` option, it needs equal lines to be consecutive, so we do a sort first:
# !sed -e 's/ /\n/g' -e 's/\r//g' $filename | sed '/^$/d' | sort | uniq -c | head
# Good so we have counted the words, so we need to sort but we need to sort in numeric ordering instead of alphabetical so we specify `-n`, also we need reverse order `-r`, bigger first!
#
# And finally we take the first 15 lines:
# !sed -e 's/ /\n/g' -e 's/\r//g' $filename | sed '/^$/d' | sort | uniq -c | sort -nr | head -15
# ## Write the output to a file
#
# We can also do the same and save the output to a file for later usage:
# !sed -e 's/ /\n/g' -e 's/\r//g' < $filename | sed '/^$/d' | sort | sed '/^$/d' | uniq -c | sort -nr | head -15 > count_vs_words
# !cat count_vs_words
| Week-3-UNIX/UNIX-Jupyter-Notebook-Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tensorflow 2.1
# language: python
# name: tf2
# ---
# ## MNIST 数据集
#
# + 提供 $6$ 万张 $28\times28$ 像素点的 $0\sim9$ 手写数字图片和标签,用于训练。
# + 提供 $1$ 万张 $28\times28$ 像素点的 $0\sim9$ 手写数字图片和标签,用于测试。
import tensorflow as tf
import matplotlib.pyplot as plt
# 导入 MNIST 数据集
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 将训练集中的第一个样本 x_train[0] 可视化出来
plt.imshow(x_train[0], cmap="gray") # 绘制灰度图
# 打印第一个样本的输入特征的维度
x_train[0].shape
# 打印第一个样本的标签
y_train[0]
# 打印训练测试集样本的维度
x_train.shape
# 打印训练测试集标签的维度
y_train.shape
# 打印测试集样本的维度
x_test.shape
# 打印测试集标签的维度
y_test.shape
# ### 实践:训练 MNIST 数据集
# 步骤一导入相关模块,以及步骤二导入训练集和测试集在上面均已完成,下面还需要在步骤二的基础上对输入数据进行归一化的处理。之后的其他步骤不变。
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
# 作为输入特征,输入神经网络时,将数据拉伸为一维数组
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary()
# #### 同样的,也可以使用类来实现网络结构
#
# 只需修改步骤三,其他步骤不变。
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras import Model
class MnistModel(Model):
def __init__(self):
super(MnistModel, self).__init__()
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.flatten(x)
x = self.d1(x)
y = self.d2(x)
return y
model = MnistModel()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary()
| 08. MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="eVq5-7DHmLfb" colab_type="text"
# # XGBOOST BASELINE for Semeval - Restaurants 2016 joint
# + id="P-P-06ynmGnt" colab_type="code" outputId="86c03931-1c66-4f61-bf25-eae613d399f7" executionInfo={"status": "ok", "timestamp": 1564066941814, "user_tz": -120, "elapsed": 25659, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 129}
from google.colab import drive
drive.mount('/content/drive/', force_remount=True)
# + id="Witu-paWnGW8" colab_type="code" colab={}
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from xgboost import XGBClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import multilabel_confusion_matrix, classification_report
# + [markdown] id="JwzREbQSmXZS" colab_type="text"
# ## Preprocessing
# + [markdown] id="9cxkLufhGZSF" colab_type="text"
# Function to add [CLS] and [SEP] as separator tokens at beginning/ending of each text item and to format aspect and polarity columns as lists:
# + id="KHhtaNffAz0x" colab_type="code" colab={}
def changeFormat(dataset):
df = pd.DataFrame(dataset)
text = df['text']
joint = df['joint']
df2 = pd.DataFrame({'text': '[CLS] ' + text + ' [SEP]',
'joint': joint})
df2['joint'] = [x.split(',') for x in df2['joint']]
return df2
# + [markdown] id="FHXOYzKStxsO" colab_type="text"
# Read csv files from Google Drive (directly from shared group drive "NLP Lab"):
# + id="t7deGyNmoRpm" colab_type="code" colab={}
R16_train = pd.read_csv("/content/drive/My Drive/NLP Lab/Colab Notebooks/semeval/DataPreprocessing/Data_Final/Train data/semevalRestaurants_16_joint_train.csv")
# + id="Ctf9gX-30VXD" colab_type="code" outputId="714f61ad-1835-44b5-b34e-b9225462cd27" executionInfo={"status": "ok", "timestamp": 1564066944257, "user_tz": -120, "elapsed": 28026, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 206}
R16_train = changeFormat(R16_train)
R16_train.head()
# + id="vSzVssx5tXdE" colab_type="code" colab={}
R16_test = pd.read_csv("/content/drive/My Drive/NLP Lab/Colab Notebooks/semeval/DataPreprocessing/Data_Final/Test data/semevalRestaurants_16_joint_test.csv")
# + id="nmq5SO1KG7Y0" colab_type="code" outputId="1df770dc-42de-482c-90e8-f6fdfaaaabad" executionInfo={"status": "ok", "timestamp": 1564066944587, "user_tz": -120, "elapsed": 28316, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 206}
R16_test = changeFormat(R16_test)
R16_test.head()
# + [markdown] id="3Kb8LUA0uCk2" colab_type="text"
# Shape of the train and test data:
# + id="RiaLjJbJuGQw" colab_type="code" outputId="620c3ec7-70fe-4445-aba2-d27e2778d9cb" executionInfo={"status": "ok", "timestamp": 1564066945011, "user_tz": -120, "elapsed": 28709, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
R16_train.shape
# + id="iqYqd_fQuGXK" colab_type="code" outputId="13f3600d-d888-40cf-c11f-b76d7707bb97" executionInfo={"status": "ok", "timestamp": 1564066945015, "user_tz": -120, "elapsed": 28682, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
R16_test.shape
# + [markdown] id="K6CKYVlsxw18" colab_type="text"
# ##**BERT embeddings**
# + [markdown] id="pVCaiGC4x3ap" colab_type="text"
# Preprocessing to import BERT embeddings file of group in Colab
# + id="I7ng_1TiGgnc" colab_type="code" outputId="b580fd04-5119-470f-f9d3-629a44cfaea0" executionInfo={"status": "ok", "timestamp": 1564066965677, "user_tz": -120, "elapsed": 49306, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 536}
# !pip install pytorch-pretrained-bert
# + id="aPRYHJoBJiL2" colab_type="code" outputId="6d0abe55-23ea-4a60-ae54-e2f5b42edcb8" executionInfo={"status": "ok", "timestamp": 1564066967824, "user_tz": -120, "elapsed": 51421, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# !pwd
# + id="cDNwOhXqJ1Ta" colab_type="code" outputId="902a48b6-f4e8-4c18-d75b-d1fa3a365b40" executionInfo={"status": "ok", "timestamp": 1564066967838, "user_tz": -120, "elapsed": 51403, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# %cd "/content/drive/My Drive/NLP Lab/Colab Notebooks/semeval/baseline"
# + id="a8bERqRkw7ev" colab_type="code" outputId="170d2939-4afe-49e2-9c27-b423a07f217c" executionInfo={"status": "ok", "timestamp": 1564066969941, "user_tz": -120, "elapsed": 53472, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# !ls
# + id="vKNGSfHdKAL4" colab_type="code" colab={}
from embeddings import Embeddings
# + id="lUvbPAt-xoOS" colab_type="code" outputId="5cbb5fdb-fe11-4a40-ee33-62edef1d3776" executionInfo={"status": "ok", "timestamp": 1564066991529, "user_tz": -120, "elapsed": 75048, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
embed = Embeddings()
# + [markdown] id="5mswUXifyJS-" colab_type="text"
# Get BERT embeddings for train and test data:
# + id="naMd7GNVyQOw" colab_type="code" colab={}
R16_train_embeddings = embed.get_embeddings(R16_train.text, all=False)
R16_test_embeddings = embed.get_embeddings(R16_test.text, all=False)
# + [markdown] id="VeZRa30iyhCk" colab_type="text"
# Plausibility check
# + id="kZtwoHxzwJmm" colab_type="code" outputId="da306512-0650-44e3-afd6-7cca784d3ef4" executionInfo={"status": "ok", "timestamp": 1564067212632, "user_tz": -120, "elapsed": 296117, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# number of vectors
len(R16_train_embeddings[0])
# + id="lbFmnQyT1zRs" colab_type="code" outputId="993a1d59-c620-4d73-d578-10470522742f" executionInfo={"status": "ok", "timestamp": 1564067212642, "user_tz": -120, "elapsed": 296092, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
#string is counted letter per letter
len(R16_train['text'][0])
# + id="OJLWm0dGyZ4M" colab_type="code" outputId="3466872e-c8ef-4b87-e662-075711aba066" executionInfo={"status": "ok", "timestamp": 1564067212648, "user_tz": -120, "elapsed": 296070, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# original text
R16_train['text'][0]
# + id="DNB4U4Mm1qOg" colab_type="code" outputId="0622a9a3-3bde-4c59-9066-05aece15caac" executionInfo={"status": "ok", "timestamp": 1564067212654, "user_tz": -120, "elapsed": 296040, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 146}
# vectors
R16_train_embeddings[0]
# + id="vnLxHsR_zTRb" colab_type="code" outputId="4abc5df8-fcdf-4c75-e63c-c61154060dd1" executionInfo={"status": "ok", "timestamp": 1564067212659, "user_tz": -120, "elapsed": 296016, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
len(R16_test_embeddings[0])
# + id="YSmSVQ9nzTgU" colab_type="code" outputId="06d52cc2-1002-4755-d880-b5456158126e" executionInfo={"status": "ok", "timestamp": 1564067212663, "user_tz": -120, "elapsed": 295991, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
R16_test['text'][0]
# + id="6JhXPHPrwJqb" colab_type="code" colab={}
R16_train['embeddings'] = [e[0].numpy() for e in R16_train_embeddings]
R16_test['embeddings'] = [e[0].numpy() for e in R16_test_embeddings]
# + [markdown] id="SGi8Nm5Nza4M" colab_type="text"
# ##**XGBOOST Baseline Model for Joint aspect and polarity**
# + id="cIOqtVrGzwv2" colab_type="code" outputId="d6e7a74d-8a0c-48b9-b4dd-c2d823b18f9d" executionInfo={"status": "ok", "timestamp": 1564067212679, "user_tz": -120, "elapsed": 295978, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 350}
R16_joint = MultiLabelBinarizer()
R16_joint.fit(R16_train.joint)
R16_joint.classes_
# + id="D0jtFeGj0SMQ" colab_type="code" outputId="568e408f-90ea-4f1a-a289-9d8095d05532" executionInfo={"status": "ok", "timestamp": 1564067212683, "user_tz": -120, "elapsed": 295957, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
y_R16_train_joint = R16_joint.transform(R16_train.joint)
y_R16_test_joint = R16_joint.transform(R16_test.joint)
y_R16_train_joint[3]
# + id="NdDi2PHO0xz2" colab_type="code" outputId="1f72e3c5-4ece-4fd8-ee5d-8c79aa1b6e40" executionInfo={"status": "ok", "timestamp": 1564067598084, "user_tz": -120, "elapsed": 380546, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 294}
R16_xgb_joint = OneVsRestClassifier(XGBClassifier(objective='binary:logistic',
n_estimators=500 ,
max_depth=3,
learning_rate=0.05,
n_jobs=24,
verbosity=1,
min_child_weight=20,
scale_pos_weight=6,
base_score=0.2))
R16_xgb_joint.fit(np.array(list(R16_train.embeddings)), y_R16_train_joint)
# + id="FpvVARe-1PIQ" colab_type="code" outputId="c8f1129c-27db-46cf-8618-39f197a9eaa0" executionInfo={"status": "ok", "timestamp": 1563311806302, "user_tz": -120, "elapsed": 598018, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 147}
R16_joint_report = classification_report(y_R16_test_joint, R16_xgb_joint.predict(np.array(list(R16_test.embeddings))),
target_names=R16_joint.classes_, output_dict=True)
# + id="3Caqo8hJO8qq" colab_type="code" outputId="d1e6da14-867d-424c-c3f4-6d47948f1e5d" executionInfo={"status": "ok", "timestamp": 1563311807792, "user_tz": -120, "elapsed": 599489, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 755}
R16_f1s_joint = []
for key in R16_joint_report.keys():
R16_f1s_joint.append(R16_joint_report.get(key).get('f1-score'))
sns.set_style("whitegrid")
sns.set(rc={'figure.figsize':(20,8)})
sns.barplot(list(R16_joint_report.keys())[:-2], R16_f1s_joint[:-2], palette='deep')
plt.xticks(rotation=90)
plt.xlabel('Class')
plt.ylabel('F1-Score')
plt.title('Multi-label aspect classification performance')
# + id="9-zfTAPfVs5U" colab_type="code" outputId="40297cc4-4d77-4660-cc8a-837ba7b1941e" executionInfo={"status": "ok", "timestamp": 1563311811050, "user_tz": -120, "elapsed": 602728, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12574253503612976945"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
[plt.text(v, i, '{:.2f}'.format(v)) for i, v in enumerate(R16_f1s_joint[:-2])]
| ColabNotebooks/semeval/baseline/Restaurant/Semeval baseline Restaurants 2016 joint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import osmnx as ox
import networkx as nx
from matplotlib import pyplot as plt
import pandas as pd
import time
# %matplotlib inline
place_name = "Kamppi, Helsinki, Finland"
graph = ox.graph_from_place(place_name, network_type='drive' )
edges = ox.graph_to_gdfs(graph, nodes=False, edges=True)
nodes = ox.graph_to_gdfs(graph, nodes=True, edges=False)
edges
fig, ax = ox.plot_graph(graph)
edges.columns
edges['highway'].value_counts()
graph_proj = ox.project_graph(graph)
fig, ax = ox.plot_graph(graph_proj, bgcolor='k', node_size=30, node_color='#999999', node_edgecolor='none', node_zorder=2,
edge_color='#555555', edge_linewidth=1.5, edge_alpha=1)
plt.tight_layout()
nodes_proj, edges_proj = ox.graph_to_gdfs(graph_proj, nodes=True, edges=True)
print("Coordinate system:", edges_proj.crs)
edges_proj.head()
stats = ox.basic_stats(graph_proj)
area = edges_proj.unary_union.convex_hull.area
stats = ox.basic_stats(graph_proj, area=area)
extended_stats = ox.extended_stats(graph_proj, ecc=True, bc=True, cc=True)
for key, value in extended_stats.items():
stats[key] = value
pd.Series(stats)
location_point = (24.921, 60.16)
nearest_node, distance = ox.get_nearest_node(graph, location_point, method='haversine', return_dist=True)
ox.add_edge_lengths(graph)
edges = ox.graph_to_gdfs(graph, nodes=False, edges=True)
nodes = ox.graph_to_gdfs(graph, nodes=True, edges=False)
| osmnx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="h0dkFwTrZoQD"
# # Inference in a discretized 1d SSM
#
# This script implements algorithms and examples for inference in a state space model with a real-valued scalar hidden state and a real-valued scalar observation. The basic method is based on discretization and the HMM smoother; a novel method is also derived based on KDE for the case where the likelihood cannot be evaluated pointwise. For details see this paper.
#
#
# "Computation and visualization of posterior densities
# in scalar nonlinear and non-Gaussian Bayesian filtering and smoothing problems",
# <NAME> and <NAME>
# Reference:
# https://liu.diva-portal.org/smash/get/diva2:1173608/FULLTEXT02.pdf
# Part of: 2017 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP),
# 2017, pp. 4686-4690. ISBN: 978-1-5090- 4117-6
# Book Series: International Conference on Acoustics Speech and Signal Processing ICASSP, 1520-6149.
#
# Original matlab code: http://users.isy.liu.se/en/rt/roth/rothICASSP2017.zip
#
# Converted to JAX by benlau6
# https://github.com/probml/pyprobml/pull/700
#
#
#
#
# + [markdown] id="bl3HrZyGcCrI"
# # Setup
# + id="bP46an7rZwA8"
from typing import Callable
import functools
import jax
from jax import lax
from jax import numpy as jnp
from jax import scipy as jsp
from jax import random
import matplotlib.pyplot as plt
plt.rcParams.update({"font.size": 16})
# + id="kQq6beNqZdbS"
def generate_x_true(
rng_key: jnp.DeviceArray, max_iter: int, x0_rvs: Callable,
v_rvs: Callable, f: Callable):
def get_next_x_true(x_prev, k, v):
x_true = f(x_prev, v[k - 1], k=k - 1)
return x_true, x_true
rng_keys = random.split(rng_key, num=2)
x0 = x0_rvs(rng_keys[0], shape=())
v = v_rvs(rng_keys[1], shape=(max_iter + 1,))
get_next_x_true_func = functools.partial(get_next_x_true, v=v)
_, x_true = lax.scan(get_next_x_true_func, init=x0, xs=jnp.arange(1, max_iter + 1))
return jnp.array([x0, *x_true])
def generate_y(
rng_key: jnp.DeviceArray, x_true: jnp.DeviceArray, e_rvs: Callable,
h: Callable):
shape = x_true.shape
e = e_rvs(rng_key, shape=shape)
y = h(x_true, e)
y = y.at[0].set(jnp.inf)
return y
def x_pdf(x_new, x, k, v_pdf, f):
v = f(x=x, v=0, k=k)
return v_pdf(x_new - v)
def y_likelihood(y, x, e_pdf, h):
e = h(x=x, e=0)
return e_pdf(y - e)
def point_mass_density(
y: jnp.DeviceArray, x_grid: jnp.DeviceArray, x0_pdf: Callable,
x_pdf: Callable, v_pdf: Callable, e_pdf: Callable,
f: Callable, h: Callable):
num_grid_points = x_grid.shape[0]
max_iter = len(y) - 1
delta = x_grid[1] - x_grid[0]
X = jnp.tile(x_grid, (num_grid_points, 1))
p_filter0 = x0_pdf(x_grid)
p_filter0 /= jnp.sum(p_filter0)
p_pred0 = [jnp.inf]*num_grid_points
def get_next_filter_pred_densities(p_filter_prev, k, x_grid, X, y):
# p(xk, xk-1 | y(1:k-1))
px = x_pdf(k=k - 1, x_new=X.T, x=X, v_pdf=v_pdf, f=f)
p_joint = px * p_filter_prev
# p(xk | y(1:k-1))
p_pred_k = jnp.sum(p_joint, axis=1)
p_pred_k /= jnp.sum(p_pred_k)
# p(xk | y(1:k))
p_filter_k = p_pred_k * y_likelihood(y[k], x_grid, e_pdf, h)
p_filter_k /= jnp.sum(p_filter_k)
return p_filter_k, [p_filter_k, p_pred_k]
get_next_filter_pred_densities_func = functools.partial(
get_next_filter_pred_densities, x_grid=x_grid, X=X, y=y
)
_, (p_filter, p_pred) = lax.scan(
get_next_filter_pred_densities_func,
init=p_filter0, xs=jnp.arange(1, max_iter + 1),
)
p_filter = jnp.array([p_filter0, *p_filter])
p_pred = jnp.array([p_pred0, *p_pred])
p_smooth_max_iter = jnp.array(p_filter[max_iter].copy())
def get_next_smooth_density(p_smooth_prev, k, X, p_filter, p_pred):
# p(xk, xk-1 | y(1:k-1))
px = x_pdf(k=k, x_new=X, x=X.T, v_pdf=v_pdf, f=f)
px = px * p_smooth_prev / p_pred[k + 1, :]
px = jnp.nan_to_num(px)
p_smooth_k = jnp.sum(px, axis=1) # marginalize
p_smooth_k *= p_filter[k, :] # multiply p(xk|yk)
p_smooth_k /= jnp.sum(p_smooth_k)
return p_smooth_k, p_smooth_k
get_next_smooth_density_func = functools.partial(
get_next_smooth_density, X=X, p_filter=p_filter, p_pred=p_pred,
)
_, p_smooth = lax.scan(
get_next_smooth_density_func,
init=p_smooth_max_iter, xs=jnp.arange(0, max_iter),
reverse=True
)
p_smooth = jnp.array([*p_smooth, p_smooth_max_iter])
return p_filter / delta, p_pred / delta, p_smooth / delta
def plot_density(
x_true, y, inv_h,
x_grid, p_pred, p_filter,
p_smooth=None, k=1, legend=True,
ax=None, vfill=None, title="",
linewidth=4.5):
if ax is None:
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(x_grid, p_pred[k], label="Prediction", linewidth=linewidth)
ax.plot(x_grid, p_filter[k], label="Filtering", color="k", linewidth=linewidth)
if p_smooth is not None:
ax.plot(
x_grid, p_smooth[k], label="Smoothing", color="orange", linewidth=linewidth
)
y_max = max(p_pred[k].max(), p_filter[k].max()) * 1.05
if p_smooth is not None:
y_max = max(y_max, p_smooth[k].max()) * 1.05
ax.vlines([x_true[k]], ymin=0, ymax=y_max, label="True state", color="k")
ax.vlines(
inv_h(y[k]),
ymin=0,
ymax=y_max,
color="r",
label="Measurement",
)
if vfill is not None:
ax.axvspan(*vfill, color="lightgrey", alpha=0.4, label="Measurement range")
ax.set_ylim(0)
ax.set_ylabel(f"$p(x_{{{k}}}|y_{{1:{k}}})$")
ax.set_xlabel("x")
if legend:
ax.legend(prop={"size": 16})
if title:
ax.set_title(title)
def plot_densities(
x_true, y, inv_h, x_grid,
p_pred, p_filter, p_smooth, max_iter, legend=True,
nrow=None, ncol=None):
if (nrow is None) or (ncol is None):
raise ValueError("Please provide nrow and ncol arguments")
fig, axes = plt.subplots(
nrow, ncol, figsize=(12, 6),
sharex=True, sharey=True, constrained_layout=True
)
axes = axes.ravel()
plt.suptitle("All density plots to look for weird pattern")
for k in range(1, nrow*ncol):
plot_density(
x_true, y, inv_h, x_grid=x_grid,
p_pred=p_pred, p_filter=p_filter, p_smooth=p_smooth,
k=k, ax=axes[k], legend=False,
linewidth=1.5,
)
# set off k=0 empty plot and attach legend
axes[0].axis('off')
handles, labels = axes[1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper left')
def experiment_setup(
rng_key, grid_minval, grid_maxval,
num_grid_points, x0_rvs, v_rvs,
e_rvs, f, h,
max_iter, plot_xy=False):
# create 1d grid
x_grid = jnp.linspace(grid_minval, grid_maxval, num_grid_points)
# generate true states
rng_key, rng_subkey = random.split(rng_key)
x_true = generate_x_true(
rng_subkey, max_iter=max_iter, x0_rvs=x0_rvs,v_rvs=v_rvs, f=f,
)
# generate measurement
rng_key, rng_subkey = random.split(rng_key)
y = generate_y(rng_subkey, x_true, e_rvs=e_rvs, h=h)
if plot_xy:
# plot trajectory and the measurement
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_title("Trajectory and Measurement versus k")
ax.plot(range(max_iter + 1), x_true, label="True state", color="k")
ax.plot(range(max_iter + 1), y, label="Measurements", color="r")
ax.set_ylabel("$x_k, y_k$")
ax.set_xlabel("k")
ax.legend(prop={"size": 16})
return x_grid, x_true, y
def mean_point_mass(xs, ps):
delta = xs[1] - xs[0]
return jnp.sum(xs*ps*delta, axis=1)
def variance_point_mass(xs, ps):
delta = xs[1] - xs[0]
return jnp.sum((xs**2)*ps*delta, axis=1) - mean_point_mass(xs, ps)**2
def plot_line(
x_true, y, mean_x_filter, variance_x_filter, max_iter,
mean_x_smooth=None, variance_x_smooth=None):
# To plot x, y, and variance
plt.figure(figsize=(12, 8))
plt.plot(x_true, label='true', color='k')
plt.plot(y, label='observed', color='r')
plt.errorbar(
range(max_iter+1),
mean_x_filter,
yerr=jnp.sqrt(variance_x_filter),
label='filtered mean',
)
if (mean_x_smooth is not None) and (variance_x_smooth is not None):
plt.errorbar(
range(max_iter+1),
mean_x_smooth,
yerr=jnp.sqrt(variance_x_smooth),
label='smoothed mean',
)
plt.legend()
plt.title('States versus time')
plt.xlabel('time')
plt.ylabel('value')
plt.xticks(range(max_iter+1))
def val2grid(x, grid_minval, grid_maxval, num_grid_points):
return (x-grid_minval)/(grid_maxval-grid_minval)*num_grid_points
def plot_heatmap(
density, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title=""):
# Plot heatmat to capture multi-modality
fig, ax = plt.subplots(figsize=(12, 8))
heatmap = ax.imshow(density.T, aspect='auto', interpolation='none')
ax.title.set_text(title)
xticks = range(max_iter+1)
yticks = jnp.arange(0, num_grid_points, num_grid_points/6)
ytick_labels = x_grid[yticks.astype(int)].round().astype(int)
plt.xticks(xticks)
plt.yticks(yticks, labels=ytick_labels)
x_true_ticks = val2grid(x_true, grid_minval, grid_maxval, num_grid_points)
p_actual = jnp.zeros((max_iter+1, num_grid_points))
p_actual = jax.vmap(lambda x, y: x.at[y].set(1))(p_actual, x_true_ticks.round().astype(int))
p_actual = p_actual
ax.set_xticks(jnp.arange(-.5, max_iter+1, 1), minor=True)
ax.grid(which='minor', color='w', linewidth=2)
plt.colorbar(heatmap, ax=ax, fraction=0.04, pad=0.04)
for x, y in zip(xticks, x_true_ticks.round().astype(int)):
ax.text(x, y, 'X',
ha="center", va="center", color="red", fontsize=18)
# + [markdown] id="uui8nhqab9Lh"
# # Non-linear, Gaussian example (the "Particle filter" example in 5.1)
# + id="qXzfrk_nf-5T" colab={"base_uri": "https://localhost:8080/"} outputId="51c41fbe-6721-4f48-c327-f7be3fa43d44"
# functions for the particle filter example
# state transition function
def state_trans_func1(x, v, k):
return x / 2 + 25 * x / (1 + x**2) + 8 * jnp.cos(1.2 * (k + 1)) + v
# measurement function
def measure_func1(x, e):
return x**2 / 20 + e
# to get x from measurement without noise
def inv_measure_func1(y):
x = jnp.sqrt(20 * y)
return [x, -x]
# functions to get sample
def v_rvs1(rng_key, shape):
return random.normal(rng_key, shape=shape) * jnp.sqrt(10)
def e_rvs1(rng_key, shape):
return random.normal(rng_key, shape=shape)
def x0_rvs1(rng_key, shape):
return random.normal(rng_key, shape=shape)
# functions to get density
v_pdf1 = functools.partial(jsp.stats.norm.pdf, scale=jnp.sqrt(10))
e_pdf1 = functools.partial(jsp.stats.norm.pdf, scale=1)
x0_pdf1 = jsp.stats.norm.pdf
# + id="2sqSkxBBb8it"
def the_particle_filter_example(
rng_key=random.PRNGKey(4),
grid_minval=-30,
grid_maxval=30,
num_grid_points=500,
max_iter=20,
iter_=14):
# generate data points and densities
x_grid, x_true, y = experiment_setup(
rng_key=rng_key, grid_minval=grid_minval, grid_maxval=grid_maxval,
num_grid_points=num_grid_points, x0_rvs=x0_rvs1, v_rvs=v_rvs1,
e_rvs=e_rvs1, f=state_trans_func1, h=measure_func1,
max_iter=max_iter,
)
p_filter, p_pred, p_smooth = point_mass_density(
y, x_grid, x0_pdf1,
x_pdf=x_pdf, v_pdf=v_pdf1, e_pdf=e_pdf1,
f=state_trans_func1, h=measure_func1,
)
return x_grid, x_true, y, p_filter, p_pred, p_smooth
# + id="smNsTu_8FRBp"
rng_key=random.PRNGKey(8)
grid_minval=-30
grid_maxval=30
num_grid_points=500
max_iter=20
iter_=17
x_grid, x_true, y, p_filter, p_pred, p_smooth = the_particle_filter_example(
rng_key=rng_key,
grid_minval=grid_minval,
grid_maxval=grid_maxval,
num_grid_points=num_grid_points,
max_iter=max_iter,
iter_=iter_,
)
# + colab={"base_uri": "https://localhost:8080/"} id="CCIJY-SYFRaG" outputId="4437196a-d7a0-4861-f19c-3d0285a9edce"
print(y)
print(x_true)
print(p_filter.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 515} id="FXOnlWPN0OyP" outputId="2679fac4-3381-4e44-83b2-3569ab9ab375"
# plot the kth density
plot_density(
x_true, y, inv_measure_func1,
x_grid, p_pred, p_filter,
p_smooth, k=17, legend=True,
ax=None, title=f"Particle filter example densities at $x_{{{iter_}}}$",
)
# + colab={"base_uri": "https://localhost:8080/", "height": 513} id="mVV5bZtEEXeT" outputId="61f195e7-74ef-43c3-e012-b454e9f41453"
# filtered mean E[x(k) | y(1:k)]
mean_x_filter = mean_point_mass(x_grid, p_filter)
# variance +- sqrt{Var[x(k)|y(1:k)}
variance_x_filter = variance_point_mass(x_grid, p_filter)
# smoothed mean E[x(k) | y(1:T)]
mean_x_smooth = mean_point_mass(x_grid, p_smooth)
# variance +- sqrt{Var[x(k)|y(1:T)}, as a line plot
variance_x_smooth = variance_point_mass(x_grid, p_smooth)
# To plot x, y, and variance
plot_line(
x_true, y, mean_x_filter, variance_x_filter, max_iter,
mean_x_smooth=mean_x_smooth, variance_x_smooth=variance_x_smooth,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 981} id="MpOq8eQWETCc" outputId="2a1c8b9b-161a-48b8-92fb-f7ceac2644a6"
# Plot heatmap to capture multi-modality
plot_heatmap(
p_filter, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title='Filtered density heatmap'
)
plot_heatmap(
p_smooth, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title='Smoothed density heatmap'
)
# + colab={"base_uri": "https://localhost:8080/", "height": 457} id="iOYaIC4xhUmV" outputId="94bd8c74-b86e-4092-cfee-2123b15c93f8"
# looking for weird density plot by plotting all max_iter densities
plot_densities(
x_true, y, inv_measure_func1,
x_grid, p_pred, p_filter,
p_smooth, max_iter,
nrow=4, ncol=5,
)
# + [markdown] id="N6x_D14ZcLXL"
# # Linear Gaussian dynamics, Student-t observations (5.2)
# + id="fTzOkyOScKM3"
# functions for student t random walk example
# state transition function
def state_trans_func2(x, v, k=None):
return x + v
# measurement function
def measure_func2(x, e):
return x + e
# to get x from measurement without noise
def inv_measure_func2(y):
return y
# functions to get sample
def v_rvs2(rng_key, shape):
return random.t(rng_key, df=2, shape=shape)
def e_rvs2(rng_key, shape):
return random.t(rng_key, df=2, shape=shape)
def x0_rvs2(rng_key, shape):
return random.t(rng_key, df=2, shape=shape)
# functions to get density
pdf2 = functools.partial(jsp.stats.t.pdf, df=2)
v_pdf2 = pdf2
e_pdf2 = pdf2
x0_pdf2 = pdf2
def student_t_random_walk_example(
rng_key=random.PRNGKey(0),
grid_minval=-60,
grid_maxval=30,
num_grid_points=500,
max_iter=25,
iter_=22):
# generate data points and densities
x_grid, x_true, y = experiment_setup(
rng_key=rng_key, grid_minval=grid_minval, grid_maxval=grid_maxval,
num_grid_points=num_grid_points, x0_rvs=x0_rvs2, v_rvs=v_rvs2,
e_rvs=e_rvs2, f=state_trans_func2, h=measure_func2,
max_iter=max_iter,
)
p_filter, p_pred, p_smooth = point_mass_density(
y, x_grid, x0_pdf2,
x_pdf=x_pdf, v_pdf=v_pdf2, e_pdf=e_pdf2,
f=state_trans_func2, h=measure_func2,
)
return x_grid, x_true, y, p_filter, p_pred, p_smooth
# + id="w6ahRNnIdQ5a"
rng_key=random.PRNGKey(1)
grid_minval=-20
grid_maxval=20
num_grid_points=500
max_iter=25
iter_=20
x_grid, x_true, y, p_filter, p_pred, p_smooth = student_t_random_walk_example(
rng_key=rng_key,
grid_minval=grid_minval,
grid_maxval=grid_maxval,
num_grid_points=num_grid_points,
max_iter=max_iter,
iter_=iter_,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 515} id="Ps4sogG91sAd" outputId="4e078c7b-415c-40f6-dc03-53b897f4c4cb"
plot_density(
x_true, y, inv_measure_func2,
x_grid, p_pred, p_filter,
p_smooth, k=iter_, legend=True,
ax=None, title=f"Student's t random walk example densities at $x_{{{iter_}}}$",
)
# + colab={"base_uri": "https://localhost:8080/", "height": 513} id="yDOhovdLEa40" outputId="4c6bc8e2-31f2-4ef5-eba7-df627d6bb488"
# filtered mean E[x(k) | y(1:k)]
mean_x_filter = mean_point_mass(x_grid, p_filter)
# variance +- sqrt{Var[x(k)|y(1:k)}
variance_x_filter = variance_point_mass(x_grid, p_filter)
# smoothed mean E[x(k) | y(1:T)]
mean_x_smooth = mean_point_mass(x_grid, p_smooth)
# variance +- sqrt{Var[x(k)|y(1:T)}, as a line plot
variance_x_smooth = variance_point_mass(x_grid, p_smooth)
# To plot x, y, and variance
plot_line(
x_true, y, mean_x_filter, variance_x_filter, max_iter,
mean_x_smooth=mean_x_smooth, variance_x_smooth=variance_x_smooth,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 981} id="9DNZBhTG6Aft" outputId="077fd459-0c50-4172-d341-ca43b7d30e99"
# Plot heatmap to capture multi-modality
plot_heatmap(
p_filter, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title='Filtered density heatmap'
)
plot_heatmap(
p_smooth, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title='Smoothed density heatmap'
)
# + [markdown] id="NYTRLHgxcNbU"
# # Saturated measurement with intractable likelihood (sec 5.3)
# + id="0jHS0ezjc-vP"
def inversion_sampling(rng_key, x_grid, px_grid, num_samples):
rng_keys = random.split(rng_key, num=2)
u = random.uniform(rng_keys[0], shape=(num_samples, 1))
delta = x_grid[1] - x_grid[0]
noise = random.uniform(
rng_keys[1], minval=-delta / 2, maxval=delta / 2, shape=(num_samples,)
)
# It only works for sufficient dense uniformly spaced grid
point_mass = px_grid
cdf = jnp.cumsum(point_mass)
bound_cdf = jnp.where(cdf < u, cdf, 0)
idx = jnp.argmax(bound_cdf, axis=1)
x = x_grid[idx]
return x + noise
def kde(x_grid, x, kernel_variance):
delta = x_grid[1] - x_grid[0]
# broadcast it into (n_x_grid, nx)
x_grid = jnp.tile(x_grid[..., jnp.newaxis], (1, x.shape[0]))
px = jsp.stats.norm.pdf(x_grid, loc=x, scale=kernel_variance)
px = jnp.sum(px, axis=1)
px = px / jnp.sum(px) / delta
return px
def novel_density(
rng_key: jnp.DeviceArray, y: jnp.DeviceArray, x_grid: jnp.DeviceArray,
x0_pdf: Callable, v_rvs: Callable, e_rvs: Callable,
f: Callable, h: Callable, num_samples: int,
max_iter: int, kernel_variance: float):
num_grid_points = x_grid.shape[0]
delta = x_grid[1] - x_grid[0]
rng_keys = random.split(rng_key, num=3)
v = v_rvs(rng_keys[0], shape=(max_iter + 1, num_samples))
e = e_rvs(rng_keys[1], shape=(max_iter + 1, num_samples))
p_filter0 = x0_pdf(x_grid)
p_filter0 /= jnp.sum(p_filter0)
p_pred0 = [jnp.inf]*num_grid_points
def get_next_novel_density(
p_filter_prev, k, x_grid, v, e, y_measured, num_samples, kernel_variance, rng_key):
x = inversion_sampling(rng_key, x_grid, p_filter_prev, num_samples)
x = f(x, v[k], k - 1)
# p(xk | y(1:k-1))
p_pred_k = kde(x_grid, x, kernel_variance)
p_pred_k /= jnp.sum(p_pred_k)
# measurement
y = h(x, e[k])
# p(xk | y(1:k))
threshold = 3 * jnp.sqrt(kernel_variance)
distance = jnp.abs(y_measured[k] - y)
def update(xi, yi, distance_i):
return jnp.where(
distance_i < threshold,
jsp.stats.norm.pdf(x_grid, xi, kernel_variance)
* jsp.stats.norm.pdf(y[k], yi, kernel_variance),
0,
)
update_vals = jax.vmap(update)(x, y, distance)
p_filter_k = jnp.sum(update_vals, axis=0)
p_filter_k /= jnp.sum(p_filter_k)
return p_filter_k, [p_filter_k, p_pred_k]
get_next_novel_density_func = functools.partial(
get_next_novel_density,
x_grid=x_grid, v=v, e=e, y_measured=y, num_samples=num_samples,
kernel_variance=kernel_variance, rng_key=rng_keys[2]
)
_, (p_filter, p_pred) = lax.scan(
get_next_novel_density_func, init=p_filter0, xs=jnp.arange(1, max_iter + 1)
)
p_filter = jnp.array([p_filter0, *p_filter])
p_pred = jnp.array([p_pred0, *p_pred])
return p_filter / delta, p_pred / delta
# + id="ckfNNppSb1dI"
# functions for saturated measurements example
# state transition function
def state_trans_func3(x, v, k=None):
return 0.7 * x + v
# measurement function
def saturate(x, minval, maxval):
return jnp.maximum(jnp.minimum(x, maxval), minval)
def measure_func3(x, e, minval=-1.5, maxval=1.5):
return saturate(x + e, minval=minval, maxval=maxval)
# to get x from measurement without noise
def inv_measure_func3(y):
return y
# functions to get sample
def v_rvs3(rng_key, shape):
return random.normal(rng_key, shape=shape)
def e_rvs3(rng_key, shape):
return random.normal(rng_key, shape=shape) * jnp.sqrt(0.5)
def x0_rvs3(rng_key, shape):
return random.normal(rng_key, shape=shape) * jnp.sqrt(0.1)
# functions to get density
x0_pdf3 = functools.partial(jsp.stats.norm.pdf, scale=jnp.sqrt(0.1))
def saturated_measurements_example(
rng_key=random.PRNGKey(0),
num_samples=10000,
grid_minval=-6,
grid_maxval=6,
num_grid_points=500,
max_iter=24,
iter_=18):
# generate data points and densities
rng_key, subkey = random.split(rng_key, num=2)
x_grid, x_true, y = experiment_setup(
rng_key=rng_key, grid_minval=grid_minval, grid_maxval=grid_maxval,
num_grid_points=num_grid_points, x0_rvs=x0_rvs3, v_rvs=v_rvs3,
e_rvs=e_rvs3, f=state_trans_func3, h=measure_func3,
max_iter=max_iter,
)
p_filter, p_pred = novel_density(
subkey, y, x_grid,
x0_pdf3, v_rvs3, e_rvs3,
state_trans_func3, measure_func3, num_samples,
max_iter, kernel_variance=0.15,
)
p_smooth = None
return x_grid, x_true, y, p_filter, p_pred, p_smooth
# + id="iWbxHl2gZtnT"
rng_key = rng_key=random.PRNGKey(0)
num_samples=10000
grid_minval=-6
grid_maxval=6
num_grid_points=500
max_iter=24
iter_=18
x_grid, x_true, y, p_filter, p_pred, p_smooth = saturated_measurements_example(
rng_key=rng_key,
num_samples=num_samples,
grid_minval=grid_minval,
grid_maxval=grid_maxval,
num_grid_points=num_grid_points,
max_iter=max_iter,
iter_=iter_,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 515} id="C4kSPJrt4Wk5" outputId="faf61d66-4dd6-4049-fa52-311299463228"
plot_density(
x_true, y, inv_measure_func3,
x_grid, p_pred, p_filter,
p_smooth, k=iter_, legend=True,
ax=None, title=f"Saturated measurements example densities at $x_{{{iter_}}}$",
)
# + colab={"base_uri": "https://localhost:8080/", "height": 513} id="g9Todc7rEkIp" outputId="a9b1cfaa-4eff-414d-d889-ba546d028e90"
# filtered mean E[x(k) | y(1:k)]
mean_x_filter = mean_point_mass(x_grid, p_filter)
# variance +- sqrt{Var[x(k)|y(1:k)}
variance_x_filter = variance_point_mass(x_grid, p_filter)
# To plot x, y, and variance
plot_line(
x_true, y, mean_x_filter, variance_x_filter, max_iter,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 499} id="yHu3AnOB6txH" outputId="1fb3e492-2da2-4869-d091-d786e0d178dd"
# Plot heatmap to capture multi-modality
plot_heatmap(
p_filter, x_true, grid_minval, grid_maxval,
num_grid_points, max_iter, title='Filtered density heatmap'
)
| notebooks/discretized_ssm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
impressions = pd.read_csv("impressions_one_hour.csv")
# drops "unnecessary" columns
impressions_cleaned = impressions.drop(columns=['Unnamed: 0', 'log_entry_time', 'partner_id',
'private_contract_id', 'creative_id', 'ad_format',
'frequency', 'supply_vendor_publisher_id', 'deal_id',
'site', 'referrer_categories_list', 'fold_position',
'user_hour_of_week', 'ip_address', 'td_id', 'metro',
'recency', 'language_code', 'media_cost',
'fee_feature_cost', 'data_usage_total_cost',
'ttd_cost_in_usd', 'partner_cost_in_usd',
'advertiser_cost_in_usd', 'device_id',
'processed_time', 'rendering_context',
'temperature_in_celsius_name',
'temperature_bucket_start_in_celsius_name',
'temperature_bucket_end_in_celsius_name',
'impression_placement_id', 'file_date',
'advertiser_id'])
impressions_cleaned.head()
brazil_impressions = impressions_cleaned.copy()[impressions_cleaned['country'] == 'Brazil']
brazil_impressions.dropna(subset=['city'], inplace=True)
brazil_impressions["city"].value_counts()
# ## Population Distribution Comparison
ad_brazil_cities = pd.DataFrame(brazil_impressions['city'].value_counts(normalize=True)).reset_index()
ad_brazil_cities.rename(columns={"index": "name", "city": "ad_frequency"}, inplace=True)
# +
brazil_city_pops = pd.read_csv('brazil_pop_by_city.csv')
brazil_city_pops = brazil_city_pops.drop(columns=['location'])
total_pop = brazil_city_pops['pop'].sum()
brazil_city_pops['proportion'] = brazil_city_pops['pop']/total_pop
brazil_city_pops = brazil_city_pops.drop(columns=['pop'])
ad_brazil_cities.set_index('name', inplace=True)
brazil_city_pops.set_index('name', inplace=True)
ad_brazil_cities
joined = brazil_city_pops.join(ad_brazil_cities)
joined.sort_values(by='ad_frequency', ascending=False)[:100]
# -
# ## Device Comparison
# i.e. samsung, motorola, apple, etc.
# +
brazil_impressions["device_make"].value_counts()
ad_device_data_brazil = pd.DataFrame(brazil_impressions["device_make"].value_counts(normalize=True)).reset_index()
ad_device_data_brazil.rename(columns={"index": "device_make", "device_make": "ad_frequency"}, inplace=True)
ad_device_data_brazil = ad_device_data_brazil.set_index("device_make")
len(ad_device_data_brazil)
#https://gs.statcounter.com/vendor-market-share/mobile-tablet-console/brazil/#monthly-201909-202009
#make sure its all devices, not just mobile
brazil_device_dist = pd.read_csv("brazil_all_device.csv")
distribution_2020 = brazil_device_dist[brazil_device_dist["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_mobile_devices = pd.DataFrame(distribution_2020).rename(columns={12:'true frequency'})
brazil_mobile_devices.index = brazil_mobile_devices.index.rename("device_make")
len(brazil_mobile_devices)
joined_devices = brazil_mobile_devices.join(ad_device_data_brazil)
joined_devices
# -
brazil_impressions['device_type'].value_counts()
# # Browsers
# ### i.e. Chrome, Safari, Firefox
# ## Browsers: All Devices
# +
#https://gs.statcounter.com/browser-market-share/all/brazil
#distribution of all browsers
brazil_browser = pd.read_csv('brazil_browser.csv')
brazil_browser_2020 = brazil_browser[brazil_browser["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_browser_2020 = pd.DataFrame(brazil_browser_2020).rename(columns={12:'actual dist'})
brazil_browser_2020.index.name = 'browser_name'
ad_brazil_browser = pd.DataFrame(brazil_impressions['browser'].value_counts(normalize=True)).reset_index()
ad_brazil_browser.rename(columns={"index": "browser_name", "browser": "browser_frequency"}, inplace=True)
browser_map = {1: 'other', 7: 'Safari', 8: 'Opera', 6: 'Chrome', 15:'other' }
ad_brazil_browser['browser_name'] = ad_brazil_browser['browser_name'].map(browser_map)
ad_brazil_browser = ad_brazil_browser.set_index('browser_name')
ad_brazil_browser.join(brazil_browser_2020)
# -
# ## Browser: Mobile
# +
#https://gs.statcounter.com/browser-market-share/mobile/brazil/#monthly-201909-202009
#browser on mobile devices
browser_map = {1: 'other', 7: 'Safari', 8: 'Opera', 6: 'Chrome', 15:'other' }
brazil_mobile_browser = pd.read_csv('brazil_mobile_browser.csv')
brazil_mobile_browser = brazil_mobile_browser[brazil_mobile_browser["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_mobile_browser = pd.DataFrame(brazil_mobile_browser).rename(columns={12:'actual dist'})
brazil_mobile_browser.index.name = 'browser_name'
mobile = brazil_impressions[brazil_impressions['device_type']==4]
ad_brazil_mobile_browser = pd.DataFrame(mobile['browser'].value_counts(normalize=True)).reset_index()
ad_brazil_mobile_browser.rename(columns={"index": "browser_name", "browser": "browser_frequency"}, inplace=True)
ad_brazil_mobile_browser['browser_name'] = ad_brazil_mobile_browser['browser_name'].map(browser_map)
ad_brazil_mobile_browser = ad_brazil_mobile_browser.set_index('browser_name')
ad_brazil_mobile_browser.join(brazil_mobile_browser)
# -
# ## Browser: Tablet
# +
# https://gs.statcounter.com/browser-market-share/tablet/brazil/#monthly-201909-202009
# browser on tablet distrbution comparison
browser_map = {1: 'other', 7: 'Safari', 8: 'Opera', 6: 'Chrome', 15:'other' }
brazil_tablet_browser = pd.read_csv('brazil_tablet_browser.csv')
brazil_tablet_browser = brazil_tablet_browser[brazil_tablet_browser["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_tablet_browser = pd.DataFrame(brazil_tablet_browser).rename(columns={12:'actual dist'})
brazil_tablet_browser.index.name = 'browser_name'
tablet = brazil_impressions[brazil_impressions['device_type']==3]
ad_brazil_tablet_browser = pd.DataFrame(tablet['browser'].value_counts(normalize=True)).reset_index()
ad_brazil_tablet_browser.rename(columns={"index": "browser_name", "browser": "browser_frequency"}, inplace=True)
ad_brazil_tablet_browser['browser_name'] = ad_brazil_tablet_browser['browser_name'].map(browser_map)
ad_brazil_tablet_browser = ad_brazil_tablet_browser.set_index('browser_name')
ad_brazil_tablet_browser.join(brazil_tablet_browser)
# -
# ## Device Type: General
# ### i.e. Desktop vs mobile vs Tablet Distribution
# +
#https://gs.statcounter.com/platform-market-share/desktop-mobile-tablet/brazil
#distribution of device types
device_type_map = {1: 'Other', 2: 'PC', 3: 'Tablet', 4:'Mobile', 5:'Roku', 6:'ConnectedTV'}
brazil_impressions['device_type'].map(device_type_map)
ad_device_type = brazil_impressions['device_type'].map(device_type_map).value_counts(normalize=True)
device_type = pd.read_csv('brazil_desktop_v_mobile.csv')
device_type_distribution_2020 = device_type[device_type["Date"]=='2020-09'].iloc[0, 1:]/100
device_type_distribution_2020 = pd.DataFrame(device_type_distribution_2020)
device_type_distribution_2020 = device_type_distribution_2020.rename(columns={12: 'actual_device_dist'})
device_type_distribution_2020.join(ad_device_type)
# -
# # Operating Systems
# ## Operating System: All
#
# +
#https://gs.statcounter.com/os-market-share/all/brazil/#monthly-201909-202009
#all operating system comparisons
os_map = {1: 'other', 2:'Windows', 3:'OS X', 4: 'Linux', 5:'iOS', 6:'Android', 7:'Windows Phone'}
ad_os = pd.DataFrame(brazil_impressions['os_family'].map(os_map).value_counts(normalize=True))
ad_os.index.name = 'name'
ad_os = ad_os.rename(columns={'os_family':'ad_mobile_os'})
brazil_os = pd.read_csv('brazil_os_general.csv')
brazil_os = brazil_os[brazil_os["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_os = pd.DataFrame(brazil_os).rename(columns={12:'actual dist'})
brazil_os.index.name = 'name'
brazil_os.join(ad_os)
# -
# ## Operating System: Mobile Device
# +
#https://gs.statcounter.com/os-market-share/mobile/brazil/#monthly-201909-202009
#operating system broken down by mobile device
os_map = {1: 'other', 2:'Windows', 3:'OS X', 4: 'Linux', 5:'iOS', 6:'Android', 7:'Windows Phone'}
mobile = brazil_impressions[brazil_impressions['device_type']==4]
ad_mobile_os = pd.DataFrame(mobile['os_family'].map(os_map).value_counts(normalize=True))
ad_mobile_os.index.name = 'name'
ad_mobile_os = ad_mobile_os.rename(columns={'os_family':'ad_mobile_os'})
brazil_os_mobile = pd.read_csv('brazil_os_for_mobile.csv')
brazil_os_mobile = brazil_os_mobile[brazil_os_mobile["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_os_mobile = pd.DataFrame(brazil_os_mobile).rename(columns={12:'actual dist'})
brazil_os_mobile.index.name = 'name'
brazil_os_mobile.join(ad_mobile_os)
# -
# ## Operating System: Tablet
# +
#https://gs.statcounter.com/os-market-share/tablet/brazil/#monthly-201909-202009
#operating system broken down by tablet
os_map = {1: 'other', 2:'Windows', 3:'OS X', 4: 'Linux', 5:'iOS', 6:'Android', 7:'Windows Phone'}
tablet = brazil_impressions[brazil_impressions['device_type']==3]
ad_mobile_os = pd.DataFrame(tablet['os_family'].map(os_map).value_counts(normalize=True))
ad_mobile_os.index.name = 'name'
ad_mobile_os = ad_mobile_os.rename(columns={'os_family':'ad_mobile_os'})
brazil_os_mobile = pd.read_csv('brazil_tablet_os.csv')
brazil_os_mobile = brazil_os_mobile[brazil_os_mobile["Date"]=='2020-09'].iloc[0, 1:]/100
brazil_os_mobile = pd.DataFrame(brazil_os_mobile).rename(columns={12:'actual dist'})
brazil_os_mobile.index.name = 'name'
brazil_os_mobile.join(ad_mobile_os)
| student-projects/fall-2020/Kinesso-AdShift-Diversifies-Marketing-Audiences/eda/[DEPRECATED] international_eda/brazil/brazil_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Heading Text used by Hash symbol
# By: <NAME>
#
# seperate written text by double enter
#
# Date: bla bla bla...
3+(5*4)
weight_kg = 60
weight_kg
weight_kg = 60.0
weight_kg
weight_kg_text = 'weight in kilograms'
print(weight_kg)
print(weight_kg, weight_kg_text)
# # There are 2.2 pounds per kilogram
# ### the more hash symbols, the smaller the text
print('weight in lbs.', 2.2*weight_kg)
x0 = 2.4 # where x0 is weight
x1 = 1.5; x2 = 3.0; # other variables
x1+x2*x0 # playing around in code.
weight_kg = 60.0
weight_kg_text = 'weight in kilograms:'
print(weight_kg_text,weight_kg)
print('weight in lbs.:',weight_kg*2.2)
weight_kg
| My+First+Jupyter+Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import sys
MODULES_PATH = '../code/'
if MODULES_PATH not in sys.path:
sys.path.append(MODULES_PATH)
import mfuncs
import pandas as pd
import numpy as np
from tqdm import tqdm
tqdm.pandas()
pd.options.display.max_columns = 1000
import lightgbm as lgb
from sklearn.neighbors import NearestNeighbors
# %pylab inline
# -
df_train = pd.read_csv('../data/train_set.csv')
df_test = pd.read_csv('../data/test_set.csv')
rnm = {
'atm_address_lat': 'atm_lat',
'atm_address_lon': 'atm_lon',
'pos_adress_lat': 'pos_lat',
'pos_adress_lon': 'pos_lon',
'home_add_lat': 'home_lat',
'home_add_lon': 'home_lon',
'work_add_lat': 'work_lat',
'work_add_lon': 'work_lon',
}
df_train.rename(columns=rnm, inplace=True)
df_test.rename(columns=rnm, inplace=True)
df_train['target_work'] = df_train.progress_apply(mfuncs.add_poswork_target, axis=1)
df_train['target_home'] = df_train.progress_apply(mfuncs.add_poshome_target, axis=1)
df_train.to_csv('../data/train_1.csv', index=None)
df_train.info()
df_train.head()
# # Чистка мусора
# Сужение до россии
df_train.country.value_counts(normalize=True)[:10]
print(df_train.shape, df_test.shape)
df_train = df_train[df_train.country.isin(['RUS', 'RU'])]
df_test = df_test[df_test.country.isin(['RUS', 'RU'])]
print(df_train.shape, df_test.shape)
del df_train['country'], df_test['country']
# Сужение до рублей
print(df_train.shape, df_train.currency.value_counts(normalize=True))
df_train = df_train[df_train.currency == 643]
print(df_train.shape)
del df_train['currency']
# Уберем людей с множественными адресами
print(df_train.shape)
gb = df_train.groupby('customer_id')['work_lat'].agg('nunique')
cid_incorrect = gb[gb == 2].index
df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]
print(df_train.shape)
gb = df_train.groupby('customer_id')['home_lat'].agg('nunique')
cid_incorrect = gb[gb == 2].index
df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]
print(df_train.shape)
# отберем участки у которых бракованные координаты
print(df_train.shape)
df_train = df_train[df_train[['atm_lat', 'pos_lat']].isnull().sum(axis=1) == 1]
print(df_train.shape)
df_train['type'] = 'atm'
df_train.loc[~df_train['pos_lat'].isnull(), 'type'] = 'pos'
df_train['type'].value_counts()
# # Признаки с кластеризации
# +
cid = df_train.sample(1)['customer_id'].values[0]
df_an = df_train[df_train.customer_id == cid]
df_point_dup = df_an.groupby(['pos_lat', 'pos_lon']).agg('size').reset_index()
df_point_dup.columns = ['pos_lat', 'pos_lon', 'pos_customer_freq']
df_an = pd.merge(df_an, df_point_dup, on=['pos_lat', 'pos_lon'], how='left')
df_an.head()
# -
# # Визуализации Местоположения терминала
df_train.head()
df_train[df_train.type=='pos'].drop_duplicates(['pos_lat',
'pos_lon']).groupby(['terminal_id']).agg('size').value_counts()
df_train[df_train.type=='atm'].drop_duplicates(['atm_lat',
'atm_lon']).groupby(['terminal_id']).agg('size').value_counts()
df_train[df_train.terminal_id=='1e15d02895068c3a864432f0c06f5ece']['atm_address'].unique()
df_train[df_train.type=='atm'].drop_duplicates(['atm_lat',
'atm_lon']).groupby(['terminal_id']).agg('size')
import gmaps
API_KEY = '<KEY>'
gmaps.configure(api_key=API_KEY) # Your Google API key
cid = '0dc0137d280a2a82d2dc89282450ff1b'
cid = df_train.sample(1)['customer_id'].values[0]
df_an = df_train[df_train.customer_id == cid]
center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values
center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values
points_pos = df_an[['pos_lat', 'pos_lon']].dropna().values
points_atm = df_an[['atm_lat', 'atm_lon']].dropna().values
print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)
# +
gmap = gmaps.Map()
if len(points_pos) > 0:
gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',
fill_color="blue", stroke_color="blue", scale=3))
if len(points_atm) > 0:
gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',
fill_color="red", stroke_color="red",scale=3))
if not np.isnan(center_home)[0][0]:
gmap.add_layer(gmaps.marker_layer(center_home, label='home'))
if not np.isnan(center_work)[0][0]:
gmap.add_layer(gmaps.marker_layer(center_work, label='work'))
gmap
# -
# плотность работ
center_home = df_train[['home_lat', 'home_lon']].dropna().values
center_work = df_train[['work_lat', 'work_lon']].dropna().values
gmap = gmaps.Map()
gmap.add_layer(gmaps.symbol_layer(center_home, fill_color="red", stroke_color="red"))
gmap
np.isnan(center_home)
df_train.groupby(['customer_id']).agg('size').sort_values().value_counts()
df_test.customer_id.drop_duplicates().isin(df_train.customer_id.unique()).mean()
# # Классификатор
# ## Попадает ли точка в работу/дом
df_train['duplicated'] = df_train.duplicated()
df_pos = df_train[df_train['type'] == 'pos']
# target == pos in
df_pos['target_work'] = df_pos.progress_apply(mfuncs.add_poswork_target, axis=1)
df_pos['target_home'] = df_pos.progress_apply(mfuncs.add_poshome_target, axis=1)
df_pos['target_work'].mean(), df_pos['target_home'].mean()
df_pos.to_csv('../data/df_pos.csv', index=None)
df_pos = pd.read_csv('../data/df_pos.csv')
df_point_dup = df_pos.groupby(['customer_id', 'pos_lat', 'pos_lon']).agg('size').reset_index()
df_point_dup.columns = ['customer_id', 'pos_lat', 'pos_lon', 'pos_customer_freq']
df_pos = pd.merge(df_pos, df_point_dup, on=['customer_id', 'pos_lat', 'pos_lon'], how='left')
dfs = []
for cid in tqdm(df_pos.customer_id.unique()):
df_an = df_pos[df_pos.customer_id == cid]
df_an = mfuncs.add_dist_to_neighbours(df_an)
dfs.append(df_an)
df_pos['transaction_date'] = pd.to_datetime(df_pos['transaction_date'], format='%Y-%m-%d')
df_pos['month'] = df_pos.transaction_date.dt.month
df_pos['day'] = df_pos.transaction_date.dt.day
df_pos['dayofyear'] = df_pos.transaction_date.dt.dayofyear
df_pos['dayofweek'] = df_pos.transaction_date.dt.dayofweek
df_pos.transaction_date.dtype
# добавим признаки после групбая
df_gb = df_pos.groupby('customer_id')
coord_stat_df = df_gb[['amount', 'pos_lat', 'pos_lon']].agg(['mean', 'max', 'min'])
coord_stat_df['transactions_per_user'] = df_gb.agg('size')
coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]
coord_stat_df.reset_index(inplace=True)
df_pos = pd.merge(df_pos, coord_stat_df, on='customer_id', how='left')
# разности со средними значениями
cols = ['pos_lat', 'pos_lon']
types = ['min', 'max', 'mean']
for c in cols:
for t in types:
df_pos['{}_diff_{}'.format(c, t)] = np.abs(df_pos[c] - df_pos['{}_{}'.format(c, t)])
df_pos = pd.concat([df_pos, pd.get_dummies(df_pos['mcc'], prefix='mcc')], axis=1)
del df_pos['mcc']
df_pos.head()
drop_cols = ['customer_id', 'terminal_id', 'target_home', 'target_work', 'atm_address',
'pos_address', 'work_add_lat', 'work_add_lon', 'home_add_lat', 'home_add_lon',
'city', 'type', 'transaction_date']
drop_cols += ['atm_address', 'atm_address_lat', 'atm_address_lon']
df_pos.drop(drop_cols, 1, errors='ignore').head()
# drop_cols = ['pos_address', 'pos_address_lat', 'pos_address_lon']
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
df_pos_id = df_pos.customer_id.drop_duplicates().reset_index(drop=True)
skf_id = list(KFold(n_splits=5, shuffle=True, random_state=15).split(df_pos_id))
skf = []
for train_ind, test_ind in skf_id:
train_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[train_ind].values)].index.values
test_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[test_ind].values)].index.values
skf.append([train_ind_, test_ind_])
df_pos['target_work'].mean()
df_pos.head()
cid = '442fd7e3af4d8c3acd7807aa65bb5e85'
df_an = df_pos[df_pos.customer_id == cid]
df_an = mfuncs.add_dist_to_neighbours(df_an)
df_pos.customer_id.unique
if np.array([1]).size:
print(1)
# +
lgb_train = lgb.Dataset(df_pos.drop(drop_cols, 1, errors='ignore'), df_pos['target_home'])
params = {
'objective': 'binary',
'num_leaves': 511,
'learning_rate': 0.05,
# 'metric' : 'error',
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 1,
'num_threads': 12,
'verbose': 0,
}
gbm = lgb.cv(params,
lgb_train,
num_boost_round=2000,
folds=skf,
verbose_eval=10,
early_stopping_rounds=500)
# -
df_pos.loc[i2].shape
i1, i2 = skf[0]
df_pos[df_pos.loc[i1]]['customer_id'].unique
# df_pos[df_pos.loc[i2]]['customer_id']
df_pos.loc[i1]
df_pos.dtypes
# # submission
df_sample = pd.read_csv('../submissions/sample.csv')
print(df_sample.shape)
df_sample.head()
| Raif/notebooks/1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib
# %matplotlib inline
import sys
sys.path.insert(0, '../python')
import post_process_hyperparameters
import plot_info
import print_table
import intersections
print(plot_info.get_git_metadata())
# -
# # Intesections where speedup => 1.8 and prediction <0.1
# ## Sobol
data_source = 'QMC_from_data'
convergence_rate = 0.75 # measured emperically
filenames = {
'Q1' : '../data/laxsod_es_Q1.json.bz2',
'Q2' : '../data/laxsod_es_Q2.json.bz2',
'Q3' : '../data/laxsod_es_Q3.json.bz2',
}
intersections.find_intersections_acceptable(filenames, data_source, convergence_rate,
min_speedup=1.8, max_prediction=0.1,
print_filename='acceptable_laxsod.json',
table_filename='acceptable_laxsod')
# # Monte Carlo
data_source = 'MC_from_data'
convergence_rate = 0.5 # measured emperically
filenames = {
'Q1' : '../data/laxsod_mc_es_Q1.json.bz2',
'Q2' : '../data/laxsod_mc_es_Q2.json.bz2',
'Q3' : '../data/laxsod_mc_es_Q3.json.bz2',
}
intersections.find_intersections_acceptable(filenames, data_source, convergence_rate,
min_speedup=1.8, max_prediction=0.1,
print_filename='acceptable_laxsodmc.json',
table_filename='acceptable_laxsodmc')
| notebooks/Intersections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter Lab - Web IDE
# * By <NAME>
# * Copyright: Creative Commons Attribution 3.0 Unported License. To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/
print("hello")
# ## Live Demo
# * Files
# * Notebooks
# * Terminal
# * Kernels
# * Keyboard Shortcuts
# * Save and Export
# ## Cells
# * Code vs Markup
# * Execute Cells
# ## Markup Cells
# ### Jupyter Markup
#
# #### Headings
# ```
# # Heading 1
# # Heading 2
# ## Heading 2.1
# ## Heading 2.2
# ```
#
# #### Lists
# * a
# * bullet point list
# 1. numbered
# 2. list
# 3. is possible
# #### Text Formating
# * *Italic*
# * **blod**
# * ***bold and italic***
# #### Tables
# | This | is |
# |------|------|
# | a | table|
# #### Latex Formulars
# You can write formulars in text, like $e^{i\pi} + 1 = 0$.
#
# Or block equations
#
# $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$
# #### Links
# Add links, like one to more information on latex [link](https://www.overleaf.com/learn/latex/Free_online_introduction_to_LaTeX_(part_1))
# #### HTML
# or simply write HTML code:
#
# <H3> HTML test</H3>
# <img src="logo.png" width=300>
# #### Source Code
# Python:
# ```python
# print "Hello World"
# ```
#
# Java Script
# ```javascript
# console.log("Hello World")
# ```
#
# ## Code Cells
print ("this is python code")
# #### Autocompletion
a = "This is a STRING"
# press tab to see the available methods
a.l
# #### API Documentation and Help
# add '?' in front of call
# ## System Calls
# Call OS cmd-line tools via the "!" opperator:
# !ls -lha
# ## Magic-Commands
# Jupyter has build in, so-called "magic commands" (full list [https://ipython.readthedocs.io/en/stable/interactive/magics.html](here))
#single runtime
# %time
for i in range(1000):
pass
#runtime test
# %timeit a=34*56
# load the autoreload extension
# %load_ext autoreload
# Set extension to reload modules every time before executing code
# %autoreload 2
a=5
f="test"
# Outputs a list of all interactive variables in your environment
# %who_ls
# Reduces the output to interactive variables of type "function"
# %who_ls function
| Day_1_Intro/Short_Python_Intro/01_Jupyter-Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
import statsmodels.api as sm
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pylab as plt
matplotlib.rcParams['axes.unicode_minus'] = False
# -
# ## ARMA(1, 1)
# ___
# α = 0.5 and β = −0.5로 정하고 시계열을 생성해보자.
# $${ x }_{ t }=0.5{ x }_{ t-1 }+{ w }_{ t }-0.5{ w }_{ t-1 }$$
# +
np.random.seed(1)
proc = sm.tsa.ArmaProcess([1, -0.5], [1, -0.5])
samples = proc.generate_sample(1000)
plt.plot(samples, 'o-')
plt.title("Realisation of an ARMA(1,1) Model, with α = 0.5 and β = −0.5")
plt.xlabel("index")
plt.ylabel("x")
plt.show()
sm.graphics.tsa.plot_acf(samples, lags=20)
plt.title("Correlogram")
plt.xlabel("Lag")
plt.ylabel("ACF")
plt.show()
# -
# 위의 시계열로 α, β을 추정해보자.
arma = sm.tsa.ARMA(samples, (1, 1))
ret = arma.fit(disp=False)
print(ret.summary())
print("\n========== 신뢰구간(95%) ==========")
print(ret.conf_int(alpha=0.05))
# ## ARMA(2, 2)
# ___
# ${ \alpha }_{ 1 }=0.5,{ \alpha }_{ 2 }=-0.25,{ \beta }_{ 1 }=0.5,{ \beta }_{ 2 }=-0.3$로 정하고 시계열을 생성해보자.
# $${ x }_{ t }=0.5{ x }_{ t-1 }-0.25{ x }_{ t-2 }+\dots +0.5{ w }_{ t-1 }-0.3{ w }_{ t-2 }$$
# +
np.random.seed(1)
proc = sm.tsa.ArmaProcess([1, -0.5, 0.25], [1, 0.5, -0.3])
samples = proc.generate_sample(1000)
plt.plot(samples, 'o-')
plt.title("Realisation of an ARMA(2,2) Model, with α = 0.5 and β = −0.5")
plt.xlabel("index")
plt.ylabel("x")
plt.show()
sm.graphics.tsa.plot_acf(samples, lags=20)
plt.title("Correlogram")
plt.xlabel("Lag")
plt.ylabel("ACF")
plt.show()
arma = sm.tsa.ARMA(samples, (2, 2))
ret = arma.fit(disp=False)
print(ret.summary())
print("\n========== 신뢰구간(95%) ==========")
print(ret.conf_int(alpha=0.05))
# -
# ## Choosing the Best ARMA(p,q) Model
# ___
#
# ARMA(3, 2)로 시계열을 생성
# +
np.random.seed(1)
proc = sm.tsa.ArmaProcess([1, -0.5, 0.25, -0.4], [1, 0.5, -0.3])
samples = proc.generate_sample(1000)
aics = np.zeros((4, 4))
aics.fill(np.infty)
for i in range(4):
for j in range(4):
try:
arma = sm.tsa.ARMA(samples, (i, j))
ret = arma.fit(disp=False)
aics[i][j] = ret.aic
except Exception as e:
pass
print(aics)
min_idx = np.unravel_index(np.argmin(aics), aics.shape)
print("\nMinimum AIC: {0}\nIdx: {1}\n".format(np.min(aics), min_idx))
arma = sm.tsa.ARMA(samples, (min_idx[0], min_idx[1]))
ret = arma.fit(disp=False)
print(ret.summary())
# -
# ## Ljung-Box Test
# +
sm.graphics.tsa.plot_acf(ret.resid, lags=30)
plt.title("Series resid")
plt.xlabel("Lag")
plt.ylabel("ACF")
plt.show()
qvalue, pvalue = sm.stats.acorr_ljungbox(ret.resid, 20)
plt.stem(pvalue)
plt.title("Ljung-Box Test")
plt.xlabel("Lag")
plt.ylabel("p-value")
plt.show()
for i in range(len(pvalue)):
print("pvalue (Lag: {0}): {1}".format(i, pvalue[i]))
# -
# ## Financial Data
# +
day_data = pd.read_csv("GSPC.csv")
day_data["Date"] = pd.to_datetime(day_data["Date"], format='%Y-%m-%d')
day_data = day_data.set_index("Date", inplace=False)
day_data["Close"].plot()
plt.title("Close of S&P 500")
plt.xlabel("date")
plt.ylabel("price")
plt.show()
day_data['log_return'] = np.log(day_data['Close']).diff()
day_data = day_data.dropna()
aics = np.zeros((4, 4))
aics.fill(np.infty)
for i in range(4):
for j in range(4):
try:
arma = sm.tsa.ARMA(day_data['log_return'], (i, j))
ret = arma.fit(disp=False)
aics[i][j] = ret.aic
except Exception as e:
pass
print(aics)
min_idx = np.unravel_index(np.argmin(aics), aics.shape)
print("\nMinimum AIC: {0}\nIdx: {1}\n".format(np.min(aics), min_idx))
arma = sm.tsa.ARMA(day_data['log_return'], (min_idx[0], min_idx[1]))
ret = arma.fit(disp=False)
print(ret.summary())
sm.graphics.tsa.plot_acf(ret.resid, lags=30)
plt.title("Series resid")
plt.xlabel("Lag")
plt.ylabel("ACF")
plt.show()
qvalue, pvalue = sm.stats.acorr_ljungbox(ret.resid, 20)
plt.stem(pvalue)
plt.title("Ljung-Box Test")
plt.xlabel("Lag")
plt.ylabel("p-value")
plt.show()
for i in range(len(pvalue)):
print("pvalue (Lag: {0}): {1}".format(i, pvalue[i]))
# -
| 20180908_AdvancedAlgorithmicTrading_ch10/ARMA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ravi-kr/Universe/blob/main/Time_Profiling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="-H314HNm7ZK5" outputId="867950a8-51b4-4299-8392-547c9bcbae15"
# %timeit new_list = list()
# + colab={"base_uri": "https://localhost:8080/"} id="zMLeorT31kUi" outputId="ccb0c92c-27ac-4d7d-c702-7aa45aed9532"
# %timeit new_list = []
# + colab={"base_uri": "https://localhost:8080/"} id="ZBqYm6ud1prE" outputId="de727766-720b-40d5-b826-b43677cf2ae9"
# %timeit new_dict = dict()
# + colab={"base_uri": "https://localhost:8080/"} id="7n9eLDUf1v1B" outputId="42f03b0c-2cbc-4961-d238-68be34e5155d"
# %timeit new_dict = {}
# + colab={"base_uri": "https://localhost:8080/"} id="6TDXVabB1ysZ" outputId="277b38a7-e1ca-440b-83c6-c44a23b1b59a"
# %timeit sorted_list = list(set(sorted([1, 3, 4, 1, 10, 4])))
# + colab={"base_uri": "https://localhost:8080/"} id="MpIg0g0e17T-" outputId="af06e57e-1ec1-4639-f459-cf6e2c615eb1"
# %timeit -r2 -n10 new_list = list()
# + colab={"base_uri": "https://localhost:8080/"} id="EjkBnOn52B31" outputId="d4b554e3-fdd8-4764-e167-e81980805136"
# %timeit -r2 -n10 new_list = []
# + colab={"base_uri": "https://localhost:8080/"} id="3q_BfpX82DrY" outputId="8eebb131-1272-411a-8008-eee1959d9107"
# %%timeit
new_list = list()
for num in range(0, 51):
new_list.append(num)
# + colab={"base_uri": "https://localhost:8080/"} id="-oJqGg0o44Pw" outputId="a68b9fb4-3114-40b5-9375-6a1b3064fa8e"
# %%timeit
new_list = [num for num in range(0, 51)]
# + colab={"base_uri": "https://localhost:8080/"} id="IsvZ8O8l4_0x" outputId="4f7cf223-0e51-47a8-feb0-3d23e411c625"
# %%timeit
new_list = [*range(0, 51)]
# + id="-hLWXAw15Fvy"
# + id="56qJuGul5Vo5"
| Time_Profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="TA21Jo5d9SVq"
#
#
# 
#
# [](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_IT.ipynb)
#
#
#
# + [markdown] id="CzIdjHkAW8TB"
# # **Detect entities in Italian text**
# + [markdown] id="wIeCOiJNW-88"
# ## 1. Colab Setup
# + id="CGJktFHdHL1n" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="77cd7d09-b7a8-4c6c-b04a-ffa1f104eb6d"
# !wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
# # !bash colab.sh
# -p is for pyspark
# -s is for spark-nlp
# # !bash colab.sh -p 3.1.1 -s 3.0.1
# by default they are set to the latest
# Install Spark NLP Display for visualization
# !pip install --ignore-installed spark-nlp-display
# + [markdown] id="eCIT5VLxS3I1"
# ## 2. Start the Spark session
# + id="sw-t1zxlHTB7"
import json
import pandas as pd
import numpy as np
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
spark = sparknlp.start()
# + [markdown] id="9RgiqfX5XDqb"
# ## 3. Select the DL model
# + id="LLuDz_t40be4"
# If you change the model, re-run all the cells below.
# Applicable models: wikiner_840B_300
MODEL_NAME = "wikiner_840B_300"
# + [markdown] id="2Y9GpdJhXIpD"
# ## 4. Some sample examples
# + id="vBOKkB2THdGI"
# Enter examples to be transformed as strings in this list
text_list = [
"""<NAME> III (nato il 28 ottobre 1955) è un magnate d'affari americano, sviluppatore di software, investitore e filantropo. È noto soprattutto come co-fondatore di Microsoft Corporation. Durante la sua carriera in Microsoft, Gates ha ricoperto le posizioni di presidente, amministratore delegato (CEO), presidente e capo architetto del software, pur essendo il principale azionista individuale fino a maggio 2014. È uno dei più noti imprenditori e pionieri del rivoluzione dei microcomputer degli anni '70 e '80. Nato e cresciuto a Seattle, Washington, Gates ha co-fondato Microsoft con l'amico d'infanzia Paul Allen nel 1975, ad Albuquerque, nel New Mexico; divenne la più grande azienda di software per personal computer al mondo. Gates ha guidato l'azienda come presidente e CEO fino a quando non si è dimesso da CEO nel gennaio 2000, ma è rimasto presidente e divenne capo architetto del software. Alla fine degli anni '90, Gates era stato criticato per le sue tattiche commerciali, che erano state considerate anticoncorrenziali. Questa opinione è stata confermata da numerose sentenze giudiziarie. Nel giugno 2006, Gates ha annunciato che sarebbe passato a un ruolo part-time presso Microsoft e un lavoro a tempo pieno presso la Bill & Melinda Gates Foundation, la fondazione di beneficenza privata che lui e sua moglie, <NAME>, hanno fondato nel 2000. [ 9] A poco a poco trasferì i suoi doveri a <NAME> e <NAME>. Si è dimesso da presidente di Microsoft nel febbraio 2014 e ha assunto un nuovo incarico come consulente tecnologico per supportare il neo nominato CEO <NAME>.""",
"""La Gioconda è un dipinto ad olio del XVI secolo creato da Leonardo. Si tiene al Louvre di Parigi."""
]
# + [markdown] id="XftYgju4XOw_"
# ## 5. Define Spark NLP pipeline
# + id="lBggF5P8J1gc" colab={"base_uri": "https://localhost:8080/"} outputId="4e848fdb-2e69-4a39-9b55-2271692b71a9"
document_assembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
# The wikiner_840B_300 is trained with glove_840B_300, so the embeddings in the
# pipeline should match. Same applies for the other available models.
if MODEL_NAME == "wikiner_840B_300":
embeddings = WordEmbeddingsModel.pretrained('glove_840B_300', lang='xx') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
elif MODEL_NAME == "wikiner_6B_300":
embeddings = WordEmbeddingsModel.pretrained('glove_6B_300', lang='xx') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
elif MODEL_NAME == "wikiner_6B_100":
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
ner_model = NerDLModel.pretrained(MODEL_NAME, 'it') \
.setInputCols(['document', 'token', 'embeddings']) \
.setOutputCol('ner')
ner_converter = NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
embeddings,
ner_model,
ner_converter
])
# + [markdown] id="mv0abcwhXWC-"
# ## 6. Run the pipeline
# + id="EYf_9sXDXR4t"
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({'text': text_list}))
result = pipeline_model.transform(df)
# + [markdown] id="UQY8tAP6XZJL"
# ## 7. Visualize results
# + id="Ar32BZu7J79X" colab={"base_uri": "https://localhost:8080/", "height": 585} outputId="c387f87d-fc8f-4559-ce80-ae6cbe26f990"
from sparknlp_display import NerVisualizer
NerVisualizer().display(
result = result.collect()[0],
label_col = 'ner_chunk',
document_col = 'document'
)
| tutorials/streamlit_notebooks/NER_IT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook shows a simplified application of the gradient descent algorithm, to fit a regression model with one parameter (i.e. finding the slope of the regression line to get the best estimation).
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from IPython.display import display
# %matplotlib inline
# -
# The data consists in a sample of 20 observations. For each individual, we have the age, the weight and the systolic pressure. Column "age" is removed from dataframe for simplicity
df=pd.read_csv("systolic_blood_press.csv")
del df['age']
display(df.head())
display(df.describe())
m = len(df) # number of observations
# A scatter plot between weight and systolic pressure shows a strong correlation. This can be modelized with a regression line. In this example, the intercept is deliberately null to keep the example simple. The instruction <i>sm.OLS</i> finds the best regression line slope so that the SSR (sum of squared residuals) is the lowest possible.
# +
# Fit regression model
plt.scatter(x=df.weight, y=df.systolic_press)
model=sm.OLS(df.systolic_press, df.weight) #no intercept
res=model.fit()
slope=res.params['weight']
# Plot the regression line
abline_values = [slope*i for i in df.weight]
plt.plot(df.weight, abline_values, color="red")
plt.xlabel("Weight")
plt.ylabel("Systolic Pressure")
# Values found by stats_model
print("Slope from statsmodels OLS:", slope)
print("SSR from statsmodels", res.ssr)
# -
# The more the line fits, the less SSR is, and vice versa. This can be expressed as a cost function. This last takes domain of parameter to find in input (here the slope of the regression line), and outputs the corresponding SSR. This function has a minimum where x is the best parameter, and y the minimum corresponding SSR.
# +
def cost_function(coefficient):
error_squared=0
# iterate through the sample and sum the squares of the distance between each point to the regression line
for row in df.itertuples():
index, systolic_press, weight = row
estimated_y=coefficient*weight
error_squared += np.square(systolic_press-estimated_y)
return error_squared/len(df)
# Visualize the cost function
cost_x = np.arange(slope-0.5, slope+0.55, 0.05)
cost_y = [cost_function(i) for i in cost_x]
plt.plot(cost_x, cost_y)
plt.xlabel("variable to find")
plt.ylabel("SSR")
print("SSR returned by the cost function:", cost_function(slope)*m)
print("SSR from statsmodels:", res.ssr)
# -
# All the point of gradient descent is to find this minimum. Because the cost function is convex, it has a unique minimum which is local and global. Thus, one could use its derivative to find its minimum. Gradient descent starts with an initial guess and improves it at each iteration, so that it tends to the value minimizing the cost function. While approaching the minimum, the slope tends to null, and gradients are smaller and smaller (convergence).
def gradient_descent_iter(min_x):
# if alpha is too big, the algorithm will not converge and "jump" above the minimum
alpha = 0.0001
epsilon = 0.00001
max_iteration = 100 #in case of no convergence (alpha too big)
iter = 0
while True:
iter += 1
# at each gradient, it iterates through the sample (sum(..), not efficient on large samples)
derivative = sum([(min_x*df.weight[i] - df.systolic_press[i])*df.weight[i] for i in range(m)]) / m
min_x = min_x - (alpha*derivative)
if (abs(derivative) < epsilon) or (iter > max_iteration):
return min_x
min_x = gradient_descent_iter(0)
print("Found by gradient descent:", min_x)
print("From Statsmodels:", slope)
| GradientDescent_LinearRegression/GradientDescent_LR_1variable.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exoplanets - Data Modelling
#
# In this notebook we will model the data provided in the dataset and evaluate the results. After the implementation of data-loading fuctions, two models will be used to evaluate how different algorimths classify the dataset.
#
# +
import numpy as np
import datetime, os
import matplotlib.pyplot as plt
# custom code
from utils import data_loader_txt, plot_confusion_matrix
# -
# ### Import data in train and test set
TRAIN_SET_PATH = "/data/Exoplanets/exoTrain.csv"
TEST_SET_PATH = "data/Exoplanets/exoTest.csv"
# define label column
LABEL_COLUMN_INDEX = 0
# loading train set
x_train, y_train = data_loader_txt(path=TRAIN_SET_PATH, label_column_index=LABEL_COLUMN_INDEX)
# loading test set
x_test, y_test = data_loader_txt(path=TEST_SET_PATH,label_column_index=LABEL_COLUMN_INDEX)
# ### Baseline model
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
svc = LinearSVC(C=0.5, max_iter=3000, verbose=0, class_weight='balanced')
print("SVC - baseline training...")
svc.fit(x_train, np.squeeze(y_train))
y_pred = svc.predict(x_test)
print("SVC - training and evaluation completed")
# calculare confusion matrix
scv_cm = confusion_matrix(y_true=np.squeeze(y_test), y_pred=y_pred)
plot_confusion_matrix(scv_cm, ["Non-Exoplanet", "Exoplanet"], normalize=False)
print("Recall score:",recall_score(y_test, y_pred))
# ### Tensorflow CNN model
import tensorflow as tf
# %load_ext tensorboard
OUTDIR = "logs"
# + language="bash"
# python m gcp_ai_platform_job/task.py \
# --train_data_path=${TRAIN_SET_PATH} \
# --eval_data_path=${TEST_SET_PATH} \
# --output_dir=${OUTDIR} \
# --num_epochs=5 \
# --batch_size=32
# -
# %tensorboard --logdir logs
# ### Final notes
# Two models have been implemeted in this notebook a SVC and a (small) CNN. The results prove that the CNN did worked better than the SVC. Nevertheless, some remarks are reported below:
#
# - SVC could improve its performances by working on a smaller set of engineered features.
# - CNN should be did archive respectivelly 81% and 100% of recall in train and test set. Since the test set is actually quite small it might makes sense to revaluate the results with a different split i.e cross-validation.
# - No specific HPO has been performed. That's could improve the results/robustness of both algorithms.
| training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
a = {}
a[1] = 10
a['abc'] = 'def'
a[0] = [0, 1, 2]
a[(0, 1)] = 'a'
a[1] +=5
a['abc'] += a['abc']
a[0].append(3)
a[0]=a[0][1::]
a.pop((0, 1))
print(a)
# -
| 2021 Осенний семестр/Практическое задание 3_5/Фахрутдинов_Задание 3_5_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
from astropy.io import fits
import os
import glob
from astropy import units as u
from astropy.coordinates import SkyCoord
# -
def filesorter(filename, foldername, fitskeyword_to_check, keyword, verbose, science):
'''
Function to sort HDI images taken on a single night into the following example directory structure:
20180116/
├── bias/
├── flats/
│ ├── dome/
│ ├── V/
│ ├── R/
│ ├── I/
│ ├── Haon/
│ └── Haoff/
│ └── twilight/
│ ├── V/
│ ├── R/
│ ├── I/
│ ├── Haon/
│ └── Haoff/
├── science/
│ ├── Taurus1/
│ ├── V/
│ ├── R/
│ ├── I/
│ ├── Haon/
│ └── Haoff/
│ ├── Taurus2/
│ └── (same filter substructure)/
│ ├── Praesepe/
│ └── (same filter substructure)/
│ └── ComaBer/
│ └── (same filter substructure)/
Usage:
filename : should be full path to file location, e.g.,
/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight4/foo.fits.gz
science : if RA/Dec string, use a more sophisticated sorting approach based on RA/Dec (RASTRNG, DECSTRNG).
format of desired target match should be '+04:19:57.9,+28:24:56'
if None, perform usual sorting.
'''
if os.path.exists(filename):
pass
else:
print(filename + " does not exist or has already been moved.")
return
header = fits.getheader(filename)
fits_type = header[keyword]
datadir = '/'.join(filename.split('/')[:-1])
destination = datadir + '/' + foldername
shortfilename = filename.split('/')[-1]
if science is None:
if os.path.exists(destination):
pass
else:
if verbose == True:
print("Making new directory: " + destination)
os.mkdir(destination)
if fits_type == fitskeyword_to_check:
if verbose == True:
print("Moving " + filename + " to: " + destination + '/' + shortfilename)
os.rename(filename, destination + '/' + shortfilename)
else:
if os.path.exists(datadir + '/science'):
pass
else:
if verbose == True:
print("Making new directory: " + datadir + '/science')
os.mkdir(datadir + '/science')
destination = datadir + '/science/' + foldername
if os.path.exists(destination):
pass
else:
if verbose == True:
print("Making new directory: " + destination)
os.mkdir(destination)
desired_ra = science.split(',')[0]
desired_dec = science.split(',')[1]
image_ra = header['RASTRNG']
image_dec = header['DECSTRNG']
c1 = SkyCoord(desired_ra, desired_dec, unit=(u.hourangle, u.deg), frame='icrs')
c2 = SkyCoord(image_ra, image_dec, unit=(u.hourangle, u.deg), frame='icrs')
sep = c1.separation(c2)
if sep.arcminute < 15.0:
if verbose == True:
print("Match Coords: ", desired_ra, desired_dec, ", Image Coords: ", image_ra, image_dec, ", Sep (arcmin): ", sep.arcminute)
print("Moving " + filename + " to: " + destination + '/' + shortfilename)
os.rename(filename, destination + '/' + shortfilename)
else:
if verbose == True:
print("Image coords. do not match criteria.")
return
# +
# use MP's code to check filters:
def verify_hdi_filter(infile,des_filter,verbose=False):
'''
verify_filter
---------------
check that the filter we think is being used IS being used
inputs
---------
infile : (string) filename of image to check
des_filter : (string) common filter name (see filter_dict below)
verbose : (boolean, default=False) if True, print reason for failure
returns
---------
0,1,2 : (int code) 0=filter matches; 1=filter does not match; 2=one wheel slot not empty
'''
# specify the filter dictionary; hardcoded because it will not change
filter_dict = {'V':'102',\
'R':'103',\
'I':'104',\
'Ha':'200',\
'Haoff':'204',\
'empty':['105','106','107','205','206','207']}
# get the header
phdr = fits.getheader(infile,0)
# check that a wheel slot is empty
if ((phdr['FILTER1'] not in filter_dict['empty']) & (phdr['FILTER2'] not in filter_dict['empty']) ):
if verbose:
print('verify_filter: failed with multiple filters.')
return 2
# check that filter matches the desired wheel position
elif (( phdr['FILTER1'] == filter_dict[des_filter]) | ( phdr['FILTER2'] == filter_dict[des_filter]) ):
return 0
# default to failure mode
else:
if verbose:
print('verify_filter: failed with non-matching filter.')
return 1
# -
# # some random testing stuff.
hdr = fits.getheader(bias_frames[0])
hdr['OBJECT']
science = '+04:19:57.9,+28:24:56'
desired_ra = science.split(',')[0]
desired_dec = science.split(',')[1]
image_dec = hdr['DECSTRNG']
image_ra, image_dec
c1 = SkyCoord(desired_ra, desired_dec, frame='icrs', unit=(u.hourangle, u.deg))
c2 = SkyCoord(image_ra, image_dec, frame='icrs', unit=(u.hourangle, u.deg))
c1
# test on one image
testimage = '/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight4/testbias.fits.gz'
filesorter(testimage, 'testbias', 'BIAS', 'OBSTYPE', verbose=True, science=None)
# +
# test on single science file.
filesorter('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight4/test_sci.fits.gz', \
'test', 'foo', 'OBSTYPE', verbose=True, science='+12:27:04.3,+26:49:07')
# -
# # Sort all biases and flats!
# +
# run on all the bias frames
bias_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*b00.fits.gz')
for fitsfile in bias_frames:
filesorter(fitsfile, 'bias', 'BIAS', 'OBSTYPE', verbose=True, science=None)
# +
# run on all the flat frames
flat_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*f00.fits.gz')
for fitsfile in flat_frames:
filesorter(fitsfile, 'flats', 'FLAT', 'OBSTYPE', verbose=True, science=None)
# -
# # Sort all science files!
# +
# run on all the "ComaBer" frames
sci_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*o00.fits.gz')
for fitsfile in sci_frames:
filesorter(fitsfile, 'ComaBer', 'foo', 'OBSTYPE', verbose=True, science='+12:27:04.3,+26:49:07')
# +
# run on all the "Praesepe" frames
sci_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*o00.fits.gz')
for fitsfile in sci_frames:
filesorter(fitsfile, 'Praesepe', 'foo', 'OBSTYPE', verbose=True, science='+08:40:51.3,+19:34:07')
# +
# run on all the "Taurus2" frames
sci_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*o00.fits.gz')
for fitsfile in sci_frames:
filesorter(fitsfile, 'Taurus2', 'foo', 'OBSTYPE', verbose=True, science='+04:19:59.6,+28:25:14')
# +
# run on all the "Taurus1" frames
sci_frames = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/c*o00.fits.gz')
for fitsfile in sci_frames:
filesorter(fitsfile, 'Taurus1', 'foo', 'OBSTYPE', verbose=True, science='+04:32:03.4,+18:14:32')
# -
# # Sort by filters!
#
# +
'''
Note to self:
0,1,2 : (int code) 0=filter matches; 1=filter does not match; 2=one wheel slot not empty
'''
filter2names = {'100':"HarrisU",
'101':"HarrisB",
'102':"HarrisV",
'103':"HarrisR",
'104':"HarrisI",
'105':"empty",
'106':"empty",
'107':"empty"}
filter2names = {'200':"6580",
'201':"6620",
'202':"6660",
'203':"6700",
'204':"6740",
'205':"empty",
'206':"empty",
'207':"empty"}
# smarter.
filter_dict = {'V':'102',\
'R':'103',\
'I':'104',\
'Ha':'200',\
'Haoff':'204',\
'empty':['105','106','107','205','206','207']}
# +
# run through the various filters for different fields:
fields = ['Taurus1', 'Taurus2', 'ComaBer', 'Praesepe']
filters = ['V', 'R', 'I', "Ha", "Haoff"]
for field in fields:
for filtername in filters:
imframes = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight3/science/' \
+ field + '/*fits.gz')
filterval = filter_dict[filtername]
for fitsfile in imframes:
verify = verify_hdi_filter(fitsfile, filtername, verbose=False)
if verify == 0:
if filtername == "Ha" or filtername == "Haoff":
filesorter(fitsfile, filtername, filterval, 'FILTER2', verbose=True, science=None)
else:
filesorter(fitsfile, filtername, filterval, 'FILTER1', verbose=True, science=None)
# +
# run through the filters for the flats
# N.B. - this requires you to sort the flats into dome or twilight manually based on the logs!
flats = ['dome', 'twilight']
filters = ['V', 'R', 'I', "Ha", "Haoff"]
for flattype in flats:
for filtername in filters:
imframes = glob.glob('/Volumes/mybookduo/Teaching/AST341_Spring2018/WIYNdata/WorkingNight4/flats/' \
+ flattype + '/*fits.gz')
filterval = filter_dict[filtername]
for fitsfile in imframes:
verify = verify_hdi_filter(fitsfile, filtername, verbose=False)
if verify == 0:
if filtername == "Ha" or filtername == "Haoff":
filesorter(fitsfile, filtername, filterval, 'FILTER2', verbose=True, science=None)
else:
filesorter(fitsfile, filtername, filterval, 'FILTER1', verbose=True, science=None)
# -
| HDIFileSorting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="klsFNbTF8j62" colab_type="text"
# # Install Dependencies
# + id="878QD18d7JDC" colab_type="code" outputId="ea14447f-cf00-470f-eb7a-8fb883e789d1" colab={"base_uri": "https://localhost:8080/", "height": 4555}
# !apt install swig cmake libopenmpi-dev zlib1g-dev
# !pip install gym
# !pip install box2d_py
# + [markdown] id="jAJVYB0fmsly" colab_type="text"
# # Check if we are allocated a GPU
#
#
# + id="dlFeN7DQDovH" colab_type="code" outputId="2ab28607-877f-46cd-ea34-c6c3f5b574ad" colab={"base_uri": "https://localhost:8080/", "height": 36}
import tensorflow as tf
tf.test.gpu_device_name()
# + [markdown] id="lN5wW0Y18oMR" colab_type="text"
# # Connect to Google Drive
# + id="1W29us8L6-Go" colab_type="code" outputId="a8b7ddc7-45e2-4f33-f6f4-9f1c64855295" colab={"base_uri": "https://localhost:8080/", "height": 56}
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="2AooYTa76TDn" colab_type="text"
# # Discrete DQN
#
# In this implementation, the actions of the BipedalWalker are discretized into 81 actions, each action being a permutation of {-1,0,1} for each of the four outputs.
# + [markdown] id="UTrPpfVa6TDr" colab_type="text"
# ## Import Modules
# + id="zIQ2JJ_p6TDv" colab_type="code" colab={}
import keras
import gym
import os
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import tensorflow as tf
import pickle # for saving episodes -> rewards
import numpy as np
from collections import deque
import random
# + [markdown] id="cWVPsYUt6TD8" colab_type="text"
# ## Build the Model
# + [markdown] id="TCOvLXLK6TD-" colab_type="text"
# ### Replay Buffer
# + id="Nx5ibJ9n6TEA" colab_type="code" colab={}
class ReplayBuffer:
"""
This class represents the experience replay buffer
"""
def __init__(self, buffer_size):
self.buffer = deque(maxlen=buffer_size)
self.capacity = buffer_size
self.len = 0
def sample(self, n_samples):
batch = []
n_samples = min(self.len, n_samples)
batch = random.sample(self.buffer, n_samples)
curr_states = np.float32([arr[0] for arr in batch])
actions = np.int32([arr[1] for arr in batch])
rewards = np.float32([arr[2] for arr in batch])
next_states = np.float32([arr[3] for arr in batch])
return np.array(curr_states), np.array(actions), np.array(rewards), np.array(next_states)
def add(self, curr_state, action, reward, next_state):
self.buffer.append([curr_state, action, reward, next_state])
self.len = self.len + 1
if (self.len > self.capacity):
self.len = self.capacity
# + [markdown] id="qauv6GHB6TEJ" colab_type="text"
# ## Q Network
# + id="VadZr9_N6TEL" colab_type="code" colab={}
class DQN:
def __init__(self, n_inputs, n_output_dim, learning_rate):
self.learning_rate = learning_rate
self.model = self.get_model(n_inputs, n_output_dim)
def get_model(self, n_input_dim, n_output_dim):
# Output can be sigmoid since we are computing Q-values and not the regressing
# to the actual value of the action.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(32, input_dim=n_input_dim, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(3**n_output_dim, activation='relu'))
model.summary()
model.compile(
optimizer=tf.train.AdamOptimizer(learning_rate=self.learning_rate, ),
loss=tf.keras.losses.MSE
)
return model
def predict(self, states):
return self.model.predict(states)
def fit(self, states, targets, epochs=1, verbose=0):
self.model.fit(states, targets, epochs=1, verbose=0)
# + [markdown] id="g-MSGXm06TES" colab_type="text"
# ## Create the Model
# + id="S6tv0vTh6TEU" colab_type="code" colab={}
class DQNAgent:
def __init__(self, state_dim, action_dim, buffer_size=30000,
learning_rate=0.001, batch_size=64, gamma=0.9,
epsilon=1.00, epsilon_decay=0.99999, epsilon_min=0.001,
name='discreteDQN'):
self.state_dim = state_dim
self.action_dim = action_dim
self.batch_size = batch_size
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.name = name;
self.n_actions = 3**action_dim
self.model = DQN(state_dim, action_dim, learning_rate)
self.buffer = ReplayBuffer(buffer_size)
self.actions = self.init_actions()
def init_actions(self):
actions = []
for action_idx in range(self.n_actions):
prev_divisor = self.n_actions
action = []
for _ in range(self.action_dim):
next_divisor = prev_divisor / 3
val = int((action_idx % prev_divisor) / next_divisor) - 1
action.append(val)
prev_divisor = next_divisor
actions.append(action)
return actions
def get_action_idx(self, state):
if (np.random.rand() < self.epsilon):
return int(random.randrange(self.n_actions))
else:
qvalues = self.model.predict(state);
return np.argmax(qvalues)
def get_action(self, action_idx):
action = []
#1
output = int(action_idx / 27) - 1
rest = action_idx - 27 * int(action_idx / 27)
action.append(output)
#2
output = int(rest / 9) - 1
rest = rest - 9*int(rest / 9)
action.append(output)
#3
output = int(rest / 3) - 1
rest = rest - 3*int(rest / 3)
action.append(output)
#4
action.append(rest -1)
return action
def train_model(self):
states, actions, rewards, next_states = self.buffer.sample(self.batch_size)
qvalues = self.model.predict(next_states)
qvalues = np.float32([np.amax(qvalue) for qvalue in qvalues])
#print(qvalues.shape)
targets = rewards + self.gamma * qvalues
training_targets = self.model.predict(states)
for i in range(self.batch_size):
#print(actions[i])
training_targets[i][actions[i]] = targets[i]
self.model.fit(states, training_targets, epochs=1, verbose=0)
if (self.epsilon > self.epsilon_min):
self.epsilon = self.epsilon * self.epsilon_decay
def store_transition(self, state, action, reward, next_state):
self.buffer.add(state, action, reward, next_state)
def save_model(self, n_episodes):
self.model.model.save('/content/gdrive/My Drive/cs4246_project/models/discrete_dqn/trained_models/' + self.name + '_ep' + str(n_episodes) + '.h5')
pass
def load_model(self, model_name):
self.model = keras.models.load_model(model_name)
pass
# + [markdown] id="gHEuO73V6TEd" colab_type="text"
# ## Setup Gym Environment and Initialize Model
# + id="JVp6RZN76TEg" colab_type="code" outputId="89859e68-f423-4db8-eeda-704f18661929" colab={"base_uri": "https://localhost:8080/", "height": 426}
env = gym.make('BipedalWalker-v2')
n_state_params = env.observation_space.shape[0]
n_actions = env.action_space.shape[0]
agent = DQNAgent(n_state_params, n_actions)
BATCH_SIZE = 64
MAX_EPISODES = 100000
MAX_REWARD = 300
MAX_STEPS = env._max_episode_steps
# + [markdown] id="nRZVGKeT6TEu" colab_type="text"
# ## Run Model
# + id="dU6wltrp6TEv" colab_type="code" outputId="466bc9c1-a491-4537-f65e-cfff602bb02e" colab={"base_uri": "https://localhost:8080/", "height": 222}
for ep in range(MAX_EPISODES):
state = env.reset()
total_reward = 0
for t in range(MAX_STEPS):
state = np.reshape(state, [1, n_state_params])
action_idx = agent.get_action_idx(state)
action = agent.get_action(action_idx)
state = np.reshape(state, [n_state_params])
next_state, reward, isDone, _ = env.step(action)
agent.store_transition(state, action_idx, reward, next_state)
state = next_state
total_reward += reward
if (isDone):
print("episode: {}/{}, score: {}, e: {:.2}".format(ep, MAX_EPISODES, total_reward, agent.epsilon))
break
if (agent.buffer.len > BATCH_SIZE):
agent.train_model()
# record rewards dynamically
record_filename = '/content/gdrive/My Drive/cs4246_project/models/discrete_dqn/record.dat'
data = [ep, total_reward]
with open(record_filename, "ab") as f:
pickle.dump(data, f)
if (total_reward > 200):
agent.save_model(ep)
break
# save model every 100 episodes
if ((ep % 100) == 0):
agent.save_model(ep)
ienv.close()
# + id="gb57315V6TE-" colab_type="code" colab={}
import pandas as pd
data = []
with open(record_filename, 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
data = pd.DataFrame(np.array(data))
# + id="GVShNuG46TFG" colab_type="code" colab={}
| models/discrete_dqn/discrete_dqn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# importing libraries
from ipywidgets import *
from IPython.display import clear_output, display, Javascript
from tkinter import Tk, filedialog
import pandas as pd
import ipywidgets as widgets
import plotly.express as px
import random
import datetime
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, hamming_loss, classification_report, f1_score
from sklearn.ensemble import RandomForestClassifier
from skmultilearn.adapt import BRkNNaClassifier
from sklearn.model_selection import train_test_split
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import LabelPowerset
from skmultilearn.adapt import MLkNN
import seaborn as sns
import numpy as np
def loginLogout():
return True
# +
out11 = widgets.Output()
def on_select_Column_Change(event):
if (columnNullList.value !=0):
dic = getNullDict()
key_list = list(dic.keys())
value_list = list(dic.values())
position = value_list.index(columnNullList.value)
s = key_list[position]
getNullValuesfor(getGeneralDataset(),s)
else:
out11.clear_output()
columnNullList = Dropdown(
options={'',0},
value=0,
description='Choose attribute:',
style= {'description_width':'auto'}
)
columnNullList.observe(on_select_Column_Change, names=['value'])
# +
# Functions
# 1 - Showing file chooser dialog -> Returns file selected
def select_files(b):
#clear_output() # Button is deleted after it is clicked.
root = Tk()
root.withdraw() # Hide the main window.
root.call('wm', 'attributes', '.', '-topmost', True) # Raise the root to the top of all windows.
b.files = filedialog.askopenfilename(multiple=False) # List of selected files will be set button's file attribute.
return b.files # Print the list of files selected.
# -------------------------------------------------------------------------------------------------------------------------
# 2 - Read dataset -> This function is so specific for "PersonTasks" dataset -> Returns dataset
def read_file(file_path):
# Reading dataset:
if withConsole==True:
outputText.append('>> Reading dataset file : '+str(file_path))
dataset = pd.read_json(file_path,orient="columns");
if checkIfPersonTasksDataset(dataset) == True:
# Flattening dataset
if withConsole==True:
outputText.append('>> Flattening dataset file : '+str(file_path))
dataset = showJSONDataset(file_path)
# Check how many people in the dataset
numberOfPersons = dataset.personID.unique();
if withConsole==True:
outputText.append('>> Found '+str(len(numberOfPersons))+" volunteer(s) in dataset")
# Takes the first person
selectedPersonID = numberOfPersons[0]
if withConsole==True:
if (len(numberOfPersons)>1):
outputText.append('>> Filtering dataset to one volunteer which personID = '+str(selectedPersonID))
# Filter dataset
dataset = dataset[dataset.personID==selectedPersonID]
# Replacing personID by 1 and adding personName with a random number from namesList list
# dataset = dataset.drop(["personID"],axis=1)
dataset.personID = dataset.personID.replace(selectedPersonID,1)
outputText.append('>> Changing personID from '+str(selectedPersonID)+' to 1')
namesList = ['Jack','Jordan','Axel','Ian','Luis','Nicolas',
'Sophia','Charlotte','Scarlett','Ivy','Lucy','Ariana']
name = random.choice(namesList)
dataset['personName'] = name
if withConsole==True:
outputText.append('>> Adding personName attribute with name : '+str(name))
# Visualizing personID, taskType1, taskType2 and taskDuration
if withConsole==True:
outputText.append('>> Making a Sunburst Visualization for taskType1, taskType2 and taskDuration for : '+str(name))
datasetVisualization = dataset[['personName','taskType1','taskType2','taskDuration']]
fig = px.sunburst(datasetVisualization, path=[datasetVisualization["personName"].tolist(),
datasetVisualization["taskType1"].tolist(),
datasetVisualization["taskType2"].tolist()],
values=datasetVisualization["taskDuration"].tolist())
fig.update_layout(showlegend=True,
title_text="Sunburst chart for: "+ name)
# Statistics for all attributes
if withConsole==True:
outputText.append('>> Making some statistics for: '+str(name))
datasetNumericStatistics = dataset.describe()
datasetStatistics = dataset.describe(include='all')
# Getting all Null/NaN columns which null/nan values != 0 and show them in columnNullList drop down list
allNaNNullValues = showNullNaNValues(dataset)
dropDownValue = 1;
allNaNNullValuesNotZero = allNaNNullValues[allNaNNullValues["# of null/nan values"]!=0].reset_index(drop=True)
global nullDict
nullDict = {'':0}
for i in range(len(allNaNNullValuesNotZero)):
nullDict[str(allNaNNullValuesNotZero["Columns"][i])] = dropDownValue;
dropDownValue = dropDownValue + 1
columnNullList.options = nullDict
columnNullList.value = 0
with output:
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
out4 = widgets.Output()
out5 = widgets.Output()
out7 = widgets.Output()
out10 = widgets.Output()
if withConsole==True:
out6 = widgets.Output()
finalResult = Accordion(children=[out6,out1,out2,out3], selected_index=None)
finalResult.set_title(0,"Console:")
finalResult.set_title(1,"Dataset View:")
finalResult.set_title(2,"Visulaization:")
finalResult.set_title(3,"Statistics:")
# Tab inside Statistics accordion
tabChildren = [out4,out5,out7,out10]
tab = Tab(children=tabChildren)
tab.set_title(0,'Numeric data:')
tab.set_title(1,'All data:')
tab.set_title(2,'Null/NaN values:')
tab.set_title(3,'Null/NaN rows:')
consoleText = Textarea(value="Console:", rows=10, disabled=True, layout=Layout(width="auto"))
for text in outputText:
consoleText.value = consoleText.value + "\r" + text
else:
finalResult = Accordion(children=[out1,out2,out3], selected_index=None)
finalResult.set_title(0,"Dataset View:")
finalResult.set_title(1,"Visulaization:")
finalResult.set_title(2,"Statistics:")
# Tab inside Statistics accordion
tabChildren = [out4,out5,out7,out10]
tab = widgets.Tab(children=tabChildren)
tab.set_title(0,'Numeric data:')
tab.set_title(1,'All data:')
tab.set_title(2,'Null/NaN values:')
tab.set_title(3,'Null/NaN rows:')
display(finalResult)
with out1:
display(dataset)
with out2:
display(fig)
with out3:
display(tab)
with out4:
display(datasetNumericStatistics)
with out5:
display(datasetStatistics)
with out7:
display(allNaNNullValues)
with out10:
# out11 is global
display(VBox([columnNullList,out11]))
if withConsole==True:
with out6:
display(consoleText)
'''
display(title1)
display(dataset)
display(title2)
display(fig)
display(title3)
display(datasetStatistics)
'''
enableDisableButton(preprocessingButton,1)
return dataset
else:
errorMessage = getMessage('error','You select a wrong dataset! Please select a suitable one!')
enableDisableButton(preprocessingButton,0)
with output:
display(errorMessage)
return False
def getNullDict():
return nullDict
def getNullValuesfor(dataset,columnName):
out11.clear_output()
nullValues = dataset[dataset[columnName].isna()]
with out11:
display(nullValues)
# -------------------------------------------------------------------------------------------------------------------------
# 3 - After reading a dataset -> ordering events
def order_events(b):
fileselect1.icon = "spinner"
global outputText;
outputText = [];
# Checking if we want it with console or not
global withConsole
withConsole = False
file = select_files(b)
if '.json' in file:
output.clear_output()
# defining global variable in order accessing it in the next cells.
global finalDataset
finalDataset = read_file(file)
fileselect1.icon = "file"
return finalDataset
else:
output.clear_output()
fileselect1.icon = "file"
with output:
informationMessage = getMessage('information','You have to choose a dataset with a JSON format!!')
enableDisableButton(preprocessingButton,0)
display(informationMessage)
# ------------------------------------------------------------------------------------------------------------------------
def order_eventsWithOutput(b):
fileselect2.icon = "spinner"
global outputText;
outputText = [];
# Checking if we want it with console or not
global withConsole
withConsole = True
file = select_files(b)
if '.json' in file:
output.clear_output()
# defining global variable in order accessing it in the next cells.
global finalDataset
finalDataset = read_file(file)
fileselect2.icon = "info-circle"
return finalDataset
else:
fileselect2.icon = "info-circle"
output.clear_output()
with output:
informationMessage = getMessage('information','You have to choose a dataset with a JSON format!!')
enableDisableButton(preprocessingButton,0)
display(informationMessage)
# ------------------------------------------------------------------------------------------------------------------------
# 4 - Check if this is the target dataset "PersonTasks" dataset
def checkIfPersonTasksDataset(dataset):
columns = dataset.columns
if len(columns) == 13:
if ('taskType1' in columns) & ('taskType2' in columns) & ('taskDuration' in columns):
return True
else:
return False
else:
return False
# ------------------------------------------------------------------------------------------------------------------------
# 5 - Messages
def getMessage(messsageType,messageText):
finalMessage = ''
if messsageType == 'error':
finalMessage = Button(description=str(messageText),icon="remove",layout=Layout(width='auto'),button_style="danger",disabled=True)
# widgets.HTML(value='<img style="float: left" src="./icons/error.png" width="30px" height="30px"/> <h4 style="color:red;">Error: <h4>  <span>'+ +'</span>')
else:
finalMessage = Button(description=str(messageText),icon="info-circle",layout=Layout(width='auto'),button_style="info",disabled=True)
# widgets.HTML(value='<img style="float: left" src="./icons/information.png" width="30px" height="30px"/> <h4 style="color:blue;">Information: <h4>  <span>'+ str(messageText) +'</span>')
return finalMessage
# ------------------------------------------------------------------------------------------------------------------------
# 6 - These three functions to flatten dataset if it has nested JSON code:
# Knowing nested JSON columns in dataset
def extractNestedColumns(dataframe):
nestedColumns = [];
col = dataframe.columns;
for column in col:
value = dataframe[column][0];
if "{" in str(value):
nestedColumns.append(column);
return nestedColumns
def splittingNestedColumns(dataframe):
# Taking nested json columns in dataframe
nestedColumns = extractNestedColumns(dataframe);
if len(nestedColumns)!=0:
for i in nestedColumns:
# taking every column in nestedColumns attributes and adding it to the original dataframe
p = pd.DataFrame(dataframe[i]);
f = [];
for j in p[i]:
f.append(j);
s = pd.DataFrame(f);
scol = s.columns;
for k in scol:
s = s.rename(columns={k:"{}{}{}".format(i,"_",k)})
scol = s.columns;
for n in scol:
dataframe[n] = s[n];
for j in scol:
dataframe[j] = s[j];
newDataFrame = dataframe.drop(nestedColumns,axis=1)
return newDataFrame
# A function to show [flattened] dataset
def showJSONDataset(datasetPath):
# Reading the JSON File and put it in a variable
datasetVariable = pd.read_json(datasetPath,orient='columns')
datasetVariable = splittingNestedColumns(datasetVariable)
return datasetVariable
# ------------------------------------------------------------------------------------------------------------------------
# 7 - Function to enable/disable a button 0-> disable , 1->enable
def enableDisableButton(buttonName,value):
if value == 0:
buttonName.layout = Layout(width="0",height="0")
output2.clear_output()
buttonName.disabled=False
'''
buttonName.button_style="danger"
buttonName.disabled=True
'''
else:
buttonName.layout = Layout(width="auto",height="auto")
output2.clear_output()
buttonName.disabled=False
'''
buttonName.button_style="success"
buttonName.disabled=False
'''
# -----------------------------------------------------------------------------------------------------------------------
# 8 - Function to show categorical/numeric attributes with null/nan values
def showNullNaNValues(dataset):
dataset.convert_dtypes().dtypes;
col = dataset.columns;
nullValues = list(dataset.isnull().sum());
colType = [];
for i in col:
if (dataset[i].dtype == "O") | (dataset[i].dtype == "bool"):
colType.append("Categorical");
else:
colType.append("Numeric");
# Making dataframe
columnsDict={"Columns":list(col),"Types":list(colType),"# of null/nan values":nullValues};
df = pd.DataFrame(columnsDict);
return df
# -----------------------------------------------------------------------------------------------------------------------
# 9 - Function to get general dataset
def getGeneralDataset():
return finalDataset.copy();
# -
# # <font color="blue"><center>Learning Volunteer Competencies</center></font>
# ***
# ## <img style="float: left" src="./icons/mode.png" width="25px" height="25px"/> Assessment Mode:
# +
assessmentMode = Dropdown(
options={'Learning Method': 1},
value=1,
description='Assessment Mode : ',
style= {'description_width':'auto'}
)
display(assessmentMode)
# -
# ***
#
# ## <img style="float: left" src="./icons/dataset.png" width="25px" height="25px"/> Dataset:
# +
# For printing widgets
output = widgets.Output()
# Buttons
layout = widgets.Layout(width='auto')
fileselect1 = Button(description="Select Dataset", icon="file", layout=layout, tooltip="Select dataset and show the result")
fileselect1.on_click(order_events)
fileselect2 = Button(description="Select Dataset with console",
icon="info-circle", layout=layout, tooltip="Select dataset and show the result step by step")
fileselect2.on_click(order_eventsWithOutput)
buttons = HBox([fileselect1,fileselect2])
display(buttons)
display(output)
# -
# ***
# ## <img style="float: left" src="./icons/pre-processing.png" width="25px" height="25px"/> Pre-processing:
# +
# functions for preprocessing
def extractingDateTimeFeatures(dataset,column1,column2):
dataset[column1] = pd.to_datetime(dataset[column1])
dataset[column2] = pd.to_datetime(dataset[column2])
# Extracting date and time
dateFrom = [d.date() for d in dataset[column1]]
dateTo = [d.date() for d in dataset[column2]]
timeFrom = [d.time() for d in dataset[column1]]
timeTo = [d.time() for d in dataset[column2]]
# Insert them in dataset in specific locations
dataset.insert(7,"dateFrom",dateFrom)
dataset.insert(8,"dateTo",dateTo)
dataset.insert(9,"timeFrom",timeFrom)
dataset.insert(10,"timeTo",timeTo)
# Return dataset
return dataset
def deleteAttributes(dataset,listOfAttributes):
dataset = dataset.drop(listOfAttributes,axis=1)
return dataset
def getTaskDuration(t1,t2):
duration = t2-t1
duration_in_s = duration.total_seconds()
duration_in_h = divmod(duration_in_s, 3600)
duration_in_m = round(duration_in_h[1] / 3600,2)
finalDuration = duration_in_h[0] + duration_in_m
return finalDuration
# This function is specific for PersonTasks dataset
def handlingTaskDuration(dataset):
numberOfRows = len(dataset);
numberOfAffectedRows = 0;
for i in range(numberOfRows):
taskD = dataset.taskDuration[i];
if (pd.isna(taskD)):
t1 = pd.to_datetime(dataset.taskDateFrom[i]);
t2 = pd.to_datetime(dataset.taskDateTo[i]);
taskDuration = getTaskDuration(t1,t2)
dataset.loc[i, "taskDuration"] = taskDuration;
numberOfAffectedRows = numberOfAffectedRows + 1;
return dataset, numberOfAffectedRows
# A function to return number of tasks that are accomplished between two specific hours
# Parameters hour1, hour2 from string datatype
def getNumberOfTasksBetweenTwoHours(dataset,column,personID,hour1,hour2):
resultedRows = dataset[dataset.personID==personID];
list1=[]; # A list of all hours between the two defined hours
hoursRange = pd.date_range(hour1,hour2,freq="1min")
for data in hoursRange:
list1.append(data.time())
filteredColumn = resultedRows[column];
list2 = []; # list of all values in column which its value is in list1
for i in filteredColumn:
if (i in list1):
list2.append(i)
return len(list2)
def getGeneratedDataset(dataset):
# Column of generated dataset
personName = [];
NumberOfAccomplishedTasks = [];
OrganizationManagementProportion = [];
TrainingLearningProportion = [];
CoorporationProportion = [];
VarietyWorkProportion = [];
StartWorkingMorning = [];
WorkingLongTime = [];
taskLocation = [];
numberOfVolunteers = list(dataset.personID.unique());
for pId in numberOfVolunteers:
# Taking tasks for each volunteer
volunteerTasks = dataset[dataset.personID==pId]
# Begin extracting new attributes
# 1 - personName
volunteerName = volunteerTasks.personName.unique()
personName.append(volunteerName[0]);
# 2- Knowing the number of tasks:
totalNumberOfTasks = len(volunteerTasks.taskType1);
NumberOfAccomplishedTasks.append(totalNumberOfTasks)
# 3 - OrganizationManagementProportion -> Number of tasks that from
# taskType1='Organisation | Verwaltung', 'Bewerb | Leistungsprüfung (inkl. Vorbereitung)' and 'Einsatz'
# Number between 0-1.
OrganizationManagementProportion.append((round(len(volunteerTasks[((volunteerTasks.taskType1=='Organisation | Verwaltung') |
(volunteerTasks.taskType1=='Einsatz'))]) / totalNumberOfTasks,2)))
# 4 - TrainingLearningProportion -> Number of tasks that from
# taskType1='Schulung', 'Übung' and 'Kurs | Weiterbildung'
# Number between 0-1.
TrainingLearningProportion.append((round(len(volunteerTasks[((volunteerTasks.taskType1=='Schulung') |
(volunteerTasks.taskType1=='Übung') |
(volunteerTasks.taskType1=='Kurs | Weiterbildung'))]) / totalNumberOfTasks,2)))
# 5 - CoorporationProportion -> Number of tasks that from
# taskType1='Jugend'
# Number between 0-1.
CoorporationProportion.append(round(len(volunteerTasks[(volunteerTasks.taskType1=='Jugend')])/ totalNumberOfTasks,2))
# 6 - VarietyWorkProportion -> Number of tasks that from
# taskType1='Sonstiges'
# Number between 0-1.
VarietyWorkProportion.append(round(len(volunteerTasks[(volunteerTasks.taskType1=='Sonstiges')|
(volunteerTasks.taskType1=='Bewerb | Leistungsprüfung (inkl. Vorbereitung)') |
(volunteerTasks.taskType1=='Arbeiten in der FW')])/ totalNumberOfTasks,2))
# 7 - StartWorkingMorning -> Number of tasks that from
# timeFrom and using getNumberOfTasksBetweenTwoHours function "08:00:00" and "12:00:00"
# Number between 0-1.
StartWorkingMorning.append(round(getNumberOfTasksBetweenTwoHours(volunteerTasks,'timeFrom',pId,'08:00:00','12:00:00')/totalNumberOfTasks,2))
# 8 - WorkingLongTime -> Number of tasks in which
# Taking the mean of all volunteer's taskDuration and compare each task to it and take all them that >= mean
# Number between 0-1 and Expresses about telorate work prassure.
taskDurationMean = volunteerTasks.taskDuration.mean();
WorkingLongTime.append(round(len(volunteerTasks[volunteerTasks.taskDuration >= taskDurationMean])/totalNumberOfTasks,2))
# 9 - taskLocation -> Number of unique locations
# Number between 0-1.
# -> Mobility.
taskLocation.append(round(len(volunteerTasks.taskLocation.unique())/totalNumberOfTasks,2))
# ---------------------------------------------------------------------------------------------------------
# -------------------------------------------GETTING DICTIONARY--------------------------------------------
# ---------------------------------------------------------------------------------------------------------
generatedDatasetDict = {
'personName':personName,
'NumberOfAccomplishedTasks':NumberOfAccomplishedTasks,
'OrganizationManagementProportion':OrganizationManagementProportion,
'TrainingLearningProportion':TrainingLearningProportion,
'CoorporationProportion':CoorporationProportion,
'VarietyWorkProportion': VarietyWorkProportion,
'StartWorkingMorning': StartWorkingMorning,
'WorkingLongTime': WorkingLongTime,
'taskLocation':taskLocation
}
# ---------------------------------------------------------------------------------------------------------
# -------------------------------------------GETTING DATAFRAME---------------------------------------------
# ---------------------------------------------------------------------------------------------------------
generatedDatasetDataFrame = pd.DataFrame(generatedDatasetDict)
return generatedDatasetDataFrame
def getFinalPreprocessedDataset():
return finalPreprocessedDataset.copy()
# ------------------------------------------------------------------------------------------------------------------------------
# activating prediction button
def activatePredictionButton():
if (runModelButton.disabled==False) & (preprocessingButton.disabled==True):
predictionButton.disabled=False
else:
predictionButton.disabled=True
def preprocessingOnClickEvent(event):
preprocessedDataset = getGeneralDataset()
preprocessingOutput = [];
preprocessingOutput.append(">> Extracting time and date from taskDateFrom and taskDateTo attributes.")
preprocessedDataset = extractingDateTimeFeatures(preprocessedDataset,'taskDateFrom','taskDateTo')
listOfDeletedAttributes = ['taskName', 'iVolunteerSource', 'iVolunteerUUID', 'taskGeoInformation_latitude', 'taskGeoInformation_longitude','taskGeoInformation_gridID']
preprocessedDataset = deleteAttributes(preprocessedDataset,listOfDeletedAttributes)
preprocessingOutput.append(">> Deleting unnecessary "+str(len(listOfDeletedAttributes))+" attributes:")
for attribute in listOfDeletedAttributes:
preprocessingOutput.append(">> Deleting "+str(attribute)+".")
nullValuesList = showNullNaNValues(preprocessedDataset)
nullValuesList = nullValuesList[nullValuesList["# of null/nan values"] != 0].reset_index(drop=True)
for i in range(len(nullValuesList)):
if (nullValuesList["# of null/nan values"][i] > 1):
preprocessingOutput.append('>> Found '+str(nullValuesList["# of null/nan values"][i])+' Null/NaN values in '+str(nullValuesList['Columns'][i])+' attribute')
else:
preprocessingOutput.append('>> Found '+str(nullValuesList["# of null/nan values"][i])+' Null/NaN value in '+str(nullValuesList['Columns'][i])+' attribute')
preprocessingOutput.append('>> Handling Null/NaN values.')
preprocessedDataset, numberOfAffectedRows = handlingTaskDuration(preprocessedDataset)
preprocessingOutput.append('>> '+str(numberOfAffectedRows)+' rows has affected and handled!!')
preprocessingOutput.append('>> Making dataset fit the learning model......')
global finalPreprocessedDataset
finalPreprocessedDataset = getGeneratedDataset(preprocessedDataset)
with output2:
out9 = widgets.Output()
out12 = widgets.Output()
preporcessingResult = Accordion(children=[out9,out12], selected_index=None)
preporcessingResult.set_title(0,"Console for pre-processing step:")
preporcessingResult.set_title(1,"Pre-processed Dataset:")
preprocessingConsoleText = Textarea(value="Preprocessing Console:", rows=10, disabled=True, layout=Layout(width="auto"))
for text in preprocessingOutput:
preprocessingConsoleText.value = preprocessingConsoleText.value + "\r" + text
display(preporcessingResult)
with out9:
display(preprocessingConsoleText)
with out12:
display(finalPreprocessedDataset)
preprocessingButton.disabled = True
activatePredictionButton()
# +
output2 = widgets.Output()
preprocessingButton = Button(description="Make preprocessing", layout=Layout(width="0", height="0"), icon='gears')
preprocessingButton.on_click(preprocessingOnClickEvent)
display(preprocessingButton)
display(output2)
# -
# ***
# ## <img style="float: left" src="./icons/learningModel.png" width="25px" height="25px"/> Learning Model:
# +
# Functions for learning model
# When clicking EACH Classifier button
def check_event(clickEvent):
tempButton = clickEvent
if (tempButton.icon == 'plus-square'):
if tempButton.description not in choosenClassifier:
choosenClassifier.append(tempButton.description)
tempButton.icon = 'minus-square'
tempButton.button_style='primary'
else:
tempButton.icon = 'plus-square'
for i in choosenClassifier:
if i == tempButton.description:
choosenClassifier.remove(i)
tempButton.button_style=''
checkForStepper()
checkIfOneSelected(listOfButtons)
def checkForStepper():
if (MLkNNButton.button_style == '') & (BRMLkNNButton.button_style == ''):
k.disabled = True
else:
k.disabled = False
# ------------------------------------------------------------------------------------------------------------------------------
# When selecting "select all unselected" and "unselect all selected" buttons
def selectAllButtonOnClick(eventClick):
tempButton = eventClick
if tempButton.description=='Select All Unselected':
for button in listOfButtons:
if button.button_style == '':
button.click()
else:
for button in listOfButtons:
if button.button_style == 'primary':
button.click()
checkIfOneSelected(listOfButtons)
# ------------------------------------------------------------------------------------------------------------------------------
# Activating "Run Model" button
def checkIfOneSelected(listOfButtons):
nb = 0
for button in listOfButtons:
if button.button_style=='primary':
nb = nb + 1
if nb>=1:
runModelButton.disabled=False
else:
runModelButton.disabled=True
# ------------------------------------------------------------------------------------------------------------------------------
# Filling dropListClassifier drop down list according to choosen classifiers
def fillDropListClassifier(dropList,choosenClassifier):
try:
dropList.options = {}
except:
dropList.options = {}
index = 0;
classifierDict = {}
for classifier in choosenClassifier:
classifierDict[str(classifier)] = index
index = index + 1
dropList.options = classifierDict
# ------------------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------LEARNING MODEL FUNCTIONS-------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------
# For determining quality for each classifier in order to give an advice
def determineQuality(accuracyValue):
if accuracyValue < 0.5:
Quality.append('BAD')
elif 0.5 <= accuracyValue <= 0.7:
Quality.append('So-So')
elif 0.71 <= accuracyValue <= 0.88:
Quality.append('GOOD')
else:
Quality.append('VERY GOOD')
# ------------------------------------------------------------------------------------------------------------------------------
# PT_build_Model for Problem Transformation methods
def PT_build_Model(model,mlb_estimater,classifierName,algorithmName,Xtrain,ytrain,Xtest,ytest):
Methods.append('Problem Transformation')
clf = mlb_estimater(model,require_dense=[False,True])
clf.fit(Xtrain,ytrain)
# Prediction
clf_predictions = clf.predict(Xtest)
# Check the accuracy score and hamming loss
acc = accuracy_score(ytest,clf_predictions)
hmm = hamming_loss(ytest,clf_predictions)
ClassifierName.append(classifierName)
MethodsAlgorithms.append(algorithmName)
accuracyArray.append(acc)
hammingArray.append(hmm)
determineQuality(acc)
return clf
# ------------------------------------------------------------------------------------------------------------------------------
# AA_build_Model for Alogrithm Adaptation methods
def AA_build_Model(algorithm,kN,classifierName,algorithmName,X_train,y_train,X_test,y_test):
Methods.append('Algorithm Adaptation')
if algorithm==MLkNN:
classifier = algorithm(k=kN,s=0.5)
else:
classifier = algorithm(k=kN)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
acc = accuracy_score(y_test,y_pred)
hmm = hamming_loss(y_test,y_pred)
ClassifierName.append(classifierName)
MethodsAlgorithms.append(algorithmName)
accuracyArray.append(acc)
hammingArray.append(hmm)
determineQuality(acc)
return classifier
# ------------------------------------------------------------------------------------------------------------------------------
# Reading training dataset -> Specific function
def readTrainingDataset():
dataset = pd.read_json('./dataset/finalDataset1.json',orient='columns')
return dataset
# ------------------------------------------------------------------------------------------------------------------------------
# Mapping classifiers
def MapClassifierToModelsWithFinalResult(choosenClassifier,numberOfKNN,X_train, X_test, y_train, y_test):
listOfRunningClassifiers = []
# For giving final result
global Methods
Methods = []
global ClassifierName
ClassifierName = []
global MethodsAlgorithms
MethodsAlgorithms = []
global accuracyArray
accuracyArray = []
global hammingArray
hammingArray = []
global Quality
Quality = []
for classifier in choosenClassifier:
if classifier == 'Binary Relevance with NomialNB':
# BinaryRelevance
BR1 = PT_build_Model(MultinomialNB(),BinaryRelevance,'BinaryRelevance','MultinominalNB',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(BR1)
elif classifier == 'Binary Relevance with RandomForestClassifier':
# BinaryRelevance
BR2 = PT_build_Model(RandomForestClassifier(),BinaryRelevance,'BinaryRelevance','RandomForestClassifier',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(BR2)
elif classifier == 'ClassifierChain with NomialNB':
# ClassifierChain
CC1 = PT_build_Model(MultinomialNB(),ClassifierChain,'ClassifierChain','MultinominalNB',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(CC1)
elif classifier == 'Classifier Chain with RandomForestClassifier':
# ClassifierChain
CC2 = PT_build_Model(RandomForestClassifier(),ClassifierChain,'ClassifierChain','RandomForestClassifier',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(CC2)
elif classifier == 'LabelPowerset with NomialNB':
# LabelPowerset
LP1 = PT_build_Model(MultinomialNB(),LabelPowerset,'LabelPowerset','MultinominalNB',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(LP1)
elif classifier == 'LabelPowerset with RandomForestClassifier':
# LabelPowerset
LP2 = PT_build_Model(RandomForestClassifier(),LabelPowerset,'LabelPowerset','RandomForestClassifier',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(LP2)
elif classifier == 'MLkNN':
# ML-KNN
MK1 = AA_build_Model(MLkNN,numberOfKNN,'MLkNN','MLkNN',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(MK1)
elif classifier == 'BRMLkNN':
# ML-KNN
MK2 = AA_build_Model(BRkNNaClassifier,numberOfKNN,'BRkNNaClassifier','BRkNNaClassifier',X_train,y_train,X_test,y_test)
listOfRunningClassifiers.append(MK2)
# Making Final DataFrame
dataDict = {'Method': Methods,
'Classifier':ClassifierName,
'Algorithm': MethodsAlgorithms,
'Accuracy': accuracyArray,
'Hamming Loss': hammingArray,
'Quality': Quality}
# Making a dataframe
resultDf = pd.DataFrame(dataDict)
# Returning final result and list of running classifiers
return listOfRunningClassifiers, resultDf
# ------------------------------------------------------------------------------------------------------------------------------
def appendTextToConsole(textAreaPane,Text):
textAreaPane.value = textAreaPane.value + "\r" + Text
# ------------------------------------------------------------------------------------------------------------------------------
# Train model
def runModelClickEvent(event):
runModelButton.icon = "spinner"
output3.clear_output()
fillDropListClassifier(dropListClassifier,choosenClassifier)
# For console
out13 = widgets.Output()
trainingResult = Accordion(children=[out13], selected_index=0)
trainingConsoleText = Textarea(value="Training Console:", rows=10, disabled=True, layout=Layout(width="auto"))
trainingResult.set_title(0,"Training Console:")
with output3:
display(trainingResult)
with out13:
display(trainingConsoleText)
# Reading training dataset
trainingDataset = readTrainingDataset()
datasetRows = trainingDataset.shape[0]
datasetColumns = trainingDataset.shape[1]
appendTextToConsole(trainingConsoleText,"▶ Reading training dataset with size: "+str(datasetRows)+" rows * "+str(datasetColumns)+" columns")
# Spliting training dataset
X = trainingDataset[["NumberOfAccomplishedTasks", "OrganizationManagementProportion", "TrainingLearningProportion", "CoorporationProportion", "VarietyWorkProportion", "StartWorkingMorning", "WorkingLongTime", "taskLocation"]].values
y = trainingDataset[["SelfManagement","WillingnessToLearn","Energy","Persistence","Mobility"]].values
appendTextToConsole(trainingConsoleText,"▶ Droping personID column..")
appendTextToConsole(trainingConsoleText,"▶ Spliting dataset into training set (80%) and test set (20%):")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
trainingsetRows = X_train.shape[0]
trainingsetColumns = X_train.shape[1]
testsetRows = X_test.shape[0]
testsetColumns = X_test.shape[1]
appendTextToConsole(trainingConsoleText," ▶▶ Training set size: "+str(trainingsetRows)+" rows * "+str(trainingsetColumns)+" columns")
appendTextToConsole(trainingConsoleText," ▶▶ Test set size: "+str(testsetRows)+" rows * "+str(testsetColumns)+" columns")
global listOfTrainedRunningClassifiers
numberOfKNN = k.value
listOfTrainedRunningClassifiers, adviceResult = MapClassifierToModelsWithFinalResult(choosenClassifier,numberOfKNN,X_train, X_test, y_train, y_test)
for item in choosenClassifier:
appendTextToConsole(trainingConsoleText,"▶ Training model with "+str(item))
adviceResultVeryGood = adviceResult[adviceResult.Quality=='VERY GOOD'].reset_index(drop=True)
adviceResultGood = adviceResult[adviceResult.Quality=='GOOD'].reset_index(drop=True)
adviceResultSoSo = adviceResult[adviceResult.Quality=='So-So'].reset_index(drop=True)
adviceResultBAD = adviceResult[adviceResult.Quality=='BAD'].reset_index(drop=True)
listOfPrettyGoodResult = []
for vgr in range(len(adviceResultVeryGood)):
text = str(adviceResultVeryGood.Classifier[vgr]) + ' with ' + str(adviceResultVeryGood.Algorithm[vgr])
listOfPrettyGoodResult.append(Button(description=text,layout=Layout(width="auto"),disabled=True,icon='star', button_style='success'))
for gr in range(len(adviceResultGood)):
text = str(adviceResultGood.Classifier[gr]) + ' with ' + str(adviceResultGood.Algorithm[gr])
listOfPrettyGoodResult.append(Button(description=text,layout=Layout(width="auto"),disabled=True,icon='star-half-o', button_style='primary'))
for ssr in range(len(adviceResultSoSo)):
text = str(adviceResultSoSo.Classifier[ssr]) + ' with ' + str(adviceResultSoSo.Algorithm[ssr])
listOfPrettyGoodResult.append(Button(description=text,layout=Layout(width="auto"),disabled=True,icon='star-o', button_style='warning'))
for br in range(len(adviceResultBAD)):
text = str(adviceResultBAD.Classifier[br]) + ' with ' + str(adviceResultBAD.Algorithm[br])
listOfPrettyGoodResult.append(Button(description=text,layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
out14 = widgets.Output()
out15 = widgets.Output()
trainingResult.children = [out13,out14,out15]
trainingResult.set_title(1,"Result:")
trainingResult.set_title(2,"Advice which classifier you should use:")
if len(listOfTrainedRunningClassifiers)!=0:
predictionButton.disabled=False
else:
predictionButton.disabled=True
activatePredictionButton()
with out14:
display(adviceResult)
with out15:
display(HBox(listOfPrettyGoodResult,layout=Layout(width='100%',display='inline-flex',flex_flow='row wrap')))
runModelButton.icon = "gear"
# +
output3 = widgets.Output()
layout = Layout(display='flex',
align_items='center',
width='50%')
runModelButton = Button(description="Run Model", tooltip="Learn Model on different Algorithms to give you an advice",
layout = Layout(width="auto"), icon="gear", disabled=True)
runModelButton.on_click(runModelClickEvent)
myModelAlgorithms = Label(value='▶ Choose Classifiers you want to train model on : ', layout= Layout(width="auto"))
mymodelLabel = Label(value='▶ Now run learning model : ', layout= Layout(width="auto"))
BRNBButton = Button(description="Binary Relevance with NomialNB", disabled=False, layout = layout, icon="plus-square")
BRRFButton = Button(description="Binary Relevance with RandomForestClassifier", disabled=False, layout = layout, icon="plus-square")
CCNBButton = Button(description="ClassifierChain with NomialNB", disabled=False, layout = layout, icon="plus-square")
CCRFButton = Button(description="Classifier Chain with RandomForestClassifier", disabled=False, layout = layout, icon="plus-square")
LPNBButton = Button(description="LabelPowerset with NomialNB", disabled=False, layout = layout, icon="plus-square")
LPRFButton = Button(description="LabelPowerset with RandomForestClassifier", disabled=False, layout = layout, icon="plus-square")
MLkNNButton = Button(description="MLkNN", disabled=False, layout = layout, icon="plus-square")
BRMLkNNButton = Button(description="BRMLkNN", disabled=False, layout = layout, icon="plus-square")
k = IntSlider(description= "k =", min=2, max=10, step=1, readout=True, value=3, disabled = True)
listOfButtons = [BRNBButton, BRRFButton, CCNBButton, CCRFButton, LPNBButton, LPRFButton, MLkNNButton, BRMLkNNButton]
global choosenClassifier
choosenClassifier = []
global allSelected
BRNBButton.on_click(check_event)
BRRFButton.on_click(check_event)
CCNBButton.on_click(check_event)
CCRFButton.on_click(check_event)
LPNBButton.on_click(check_event)
LPRFButton.on_click(check_event)
MLkNNButton.on_click(check_event)
BRMLkNNButton.on_click(check_event)
BR = HBox([BRNBButton, BRRFButton])
CC = HBox([CCNBButton, CCRFButton])
LP = HBox([LPNBButton, LPRFButton])
KN = HBox([MLkNNButton, BRMLkNNButton])
selectAllButton = Button(description="Select All Unselected", layout=Layout(width="auto"), icon='check-square')
selectAllButton.on_click(selectAllButtonOnClick)
UnselectAllButton = Button(description="UnSelect All Selected", layout=Layout(width="auto"), icon='square-o')
UnselectAllButton.on_click(selectAllButtonOnClick)
display(HBox([myModelAlgorithms,selectAllButton,UnselectAllButton,k]))
display(VBox([BR,CC,LP,KN]))
display(HBox([mymodelLabel,runModelButton]))
display(output3)
# -
# ***
# ## <img style="float: left" src="./icons/prediction.png" width="35px" height="35px"/> Predict volunteer competency:
# +
# Functions for prediction
# Getting the selected value from dropListClassifier
def changeEvent(change):
global selection
allOptions = dropListClassifier.options
allKeys = list(allOptions.keys())
allValues = list(allOptions.values())
for i in allValues:
if dropListClassifier.value == i:
selection = i
return selection
# -----------------------------------------------------------------------------------------------------------------------------
# Prediction Button Click
def predictionClickEvent(event):
# clearing output4
output4.clear_output()
# Get pre-processed dataset
generatedDataset = getFinalPreprocessedDataset()
# taking the same columns of training dataset
columnsWithoutOutput = generatedDataset[["NumberOfAccomplishedTasks", "OrganizationManagementProportion", "TrainingLearningProportion", "CoorporationProportion", "VarietyWorkProportion", "StartWorkingMorning", "WorkingLongTime", "taskLocation"]].values
predictionResult = np.array(columnsWithoutOutput).reshape((1, -1))
# Prediction process using the choosed classifier
predictionClassifier = listOfTrainedRunningClassifiers[selection]
predictionClassifier = predictionClassifier.predict(predictionResult).toarray()
# Making result in a pretty output
for item in predictionClassifier:
finalPredictionResult = item
volumnteerName = generatedDataset.personName[0]
predictionLabel = Label(value="Prediction "+str(volumnteerName)+"'s competencies using "+str(listOfTrainedRunningClassifiers[selection]),
layout = Layout(width='auto'))
listOfPredictedResult = [];
for counter in range(len(finalPredictionResult)):
if counter==0: # Self Management Competency
if finalPredictionResult[counter] == 0:
listOfPredictedResult.append(Button(description="Self Management",layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
else:
listOfPredictedResult.append(Button(description="Self Management",layout=Layout(width="auto"),disabled=True,icon='check', button_style='success'))
elif counter==1: # Willingness To Learn Competency
if finalPredictionResult[counter] == 0:
listOfPredictedResult.append(Button(description="Willingness To Learn",layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
else:
listOfPredictedResult.append(Button(description="Willingness To Learn",layout=Layout(width="auto"),disabled=True,icon='check', button_style='success'))
elif counter==2: # Energy Competency
if finalPredictionResult[counter] == 0:
listOfPredictedResult.append(Button(description="Energy",layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
else:
listOfPredictedResult.append(Button(description="Energy",layout=Layout(width="auto"),disabled=True,icon='check', button_style='success'))
elif counter==3: # Persistence Competency
if finalPredictionResult[counter] == 0:
listOfPredictedResult.append(Button(description="Persistence",layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
else:
listOfPredictedResult.append(Button(description="Persistence",layout=Layout(width="auto"),disabled=True,icon='check', button_style='success'))
elif counter==4: # Mobility Competency
if finalPredictionResult[counter] == 0:
listOfPredictedResult.append(Button(description="Mobility",layout=Layout(width="auto"),disabled=True,icon='close', button_style='danger'))
else:
listOfPredictedResult.append(Button(description="Mobility",layout=Layout(width="auto"),disabled=True,icon='check', button_style='success'))
out16 = widgets.Output();
out17 = widgets.Output();
with output4:
display(VBox([out16,out17]))
with out16:
display(predictionLabel)
with out17:
display(HBox(listOfPredictedResult,layout=Layout(width='100%',display='inline-flex',flex_flow='row wrap')))
# +
output4 = widgets.Output()
layout = Layout(width="auto")
dropListClassifier = Dropdown(
options={'':0},
value=0,
description='Choose Algorithm/Estimator:',
layout = layout,
style= {'description_width':'auto', 'description_color':'red'}
)
dropListClassifier.observe(changeEvent, names=['value'])
predictionButton = Button(description="Predict volunteer competencies", layout =Layout(width="auto"), icon='question-circle', disabled=True)
predictionButton.on_click(predictionClickEvent)
items = HBox([dropListClassifier])
display(items)
display(predictionButton)
display(output4)
# -
pip freeze > requirements.txt
| Learning Volunteer Competencies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Shahid-coder/python-colab/blob/main/12_python_dates.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="aMoTqmrwBAys"
# ## Python Datetime
#
# + [markdown] id="I1mVwwrfBHWJ"
# ## Python Dates
# A date in Python is not a data type of its own, but we can import a module named datetime to work with dates as date objects.
# + colab={"base_uri": "https://localhost:8080/"} id="z4VY2wx1AzWp" outputId="911170e8-88f5-4737-efc9-8a7d2493ae39"
import datetime
x = datetime.datetime.now()
print(x)
# + [markdown] id="ae-hVcMmBWvb"
# ## Date Output
# When we execute the code from the example above the result will be:
#
# 2021-06-07 11:18:30.935113
# The date contains year, month, day, hour, minute, second, and microsecond.
#
# The datetime module has many methods to return information about the date object.
#
# Here are a few examples, you will learn more about them later in this chapter:
# + colab={"base_uri": "https://localhost:8080/"} id="QGVEDcNJBOMs" outputId="5fbc42f3-02a7-4bdb-d966-ae3230ce89f2"
import datetime
x = datetime.datetime.now()
print(x.year)
print(x.strftime("%A"))
# + [markdown] id="jotahJuLBoWb"
# ## Creating Date Objects
# To create a date, we can use the datetime() class (constructor) of the datetime module.
#
# The datetime() class requires three parameters to create a date: year, month, day.
# + colab={"base_uri": "https://localhost:8080/"} id="P0iUcgkFBkW_" outputId="b13f64a8-0c43-43b1-f3a5-9b590cd3a0cd"
import datetime
x = datetime.datetime(2020, 5, 17)
print(x)
# + [markdown] id="JKOER6HwByZB"
# The datetime() class also takes parameters for time and timezone (hour, minute, second, microsecond, tzone), but they are optional, and has a default value of 0, (None for timezone).
#
#
# + [markdown] id="dlZEhjFpB1ve"
# ## The strftime() Method
# The datetime object has a method for formatting date objects into readable strings.
#
# The method is called strftime(), and takes one parameter, format, to specify the format of the returned string:
# + colab={"base_uri": "https://localhost:8080/"} id="27Pz7HwtBtk7" outputId="cfeb92d7-f59b-48b1-9f67-4f4239921007"
#Display the name of the month:
import datetime
x = datetime.datetime(2018, 6, 1)
print(x.strftime("%B"))
# + [markdown] id="d5mALVRvCNj2"
# python-datetime reference :[https://www.w3schools.com/python/python_datetime.asp](https://www.w3schools.com/python/python_datetime.asp)
| 12_python_dates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Analyzers Basic Tutorial
#
# This Jupyter notebook will give a basic tutorial on how to use PyDeequ's Analyzers module.
# +
from pyspark.sql import SparkSession, Row, DataFrame
import json
import pandas as pd
import sagemaker_pyspark
import pydeequ
classpath = ":".join(sagemaker_pyspark.classpath_jars())
spark = (SparkSession
.builder
.config("spark.driver.extraClassPath", classpath)
.config("spark.jars.packages", pydeequ.deequ_maven_coord)
.config("spark.jars.excludes", pydeequ.f2j_maven_coord)
.getOrCreate())
# -
# ### We will be using the Amazon Product Reviews dataset -- specifically the Electronics subset.
# +
df = spark.read.parquet("s3a://amazon-reviews-pds/parquet/product_category=Electronics/")
df.printSchema()
# +
from pydeequ.analyzers import *
analysisResult = AnalysisRunner(spark) \
.onData(df) \
.addAnalyzer(Size()) \
.addAnalyzer(Completeness("review_id")) \
.addAnalyzer(ApproxCountDistinct("review_id")) \
.addAnalyzer(Mean("star_rating")) \
.addAnalyzer(Compliance("top star_rating", "star_rating >= 4.0")) \
.addAnalyzer(Correlation("total_votes", "star_rating")) \
.addAnalyzer(Correlation("total_votes", "helpful_votes")) \
.run()
analysisResult_df = AnalyzerContext.successMetricsAsDataFrame(spark, analysisResult)
analysisResult_df.show()
# -
analysisResult_pandas_df = AnalyzerContext.successMetricsAsDataFrame(spark, analysisResult, pandas=True)
analysisResult_pandas_df
# ### For more info ... look at full list of analyzers in `docs/analyzers.md`
| tutorials/analyzers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chatbot using RNN
# ### Northwestern University - Fall 2017
# ### Student: <NAME>
# ### E-mail: <EMAIL>
# # Introduction
#
# The idea of this project is to create a simple chatbot by training a Recurrent Neural Networks.
#
# ## Chatbots
# There are many ways one can go about creating a chat-bot. For example, many chatbots rely on pre-defined rules to answer questions. Those can work well but requires intese human work to create as many rules as possible.
#
# Machine learning greately simplify this task by enableing to learn from pre-existing conversation corpus. The two main types of ML chatbots are:
#
# - Retrieval-based: answer questions by choosing from one of the answers available in the data-set.
# - Generative: generates the conversation dialog word by word based on the query. The generated sentense is normally not included in the original data-set.
#
# For this project, I decided to create a chatbot using the generative approch, which normally makes more mistakes, such as grammar mistakes, but can respond a broader set of questions and contexts.
#
# ## Dataset
# The model was trained using the [Cornell Movie Dialog Corpus](http://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html), that contains a collection of fictional conversations extracted from raw movie scripts. The data was split in 108136 conversation pairs for training, and 30000 conversation pairs for testing.
#
# ## Implementation Architecture
# Here I use a Recurrent Neural Network to train on the data set. More specifically I use a seq2seq model with bucketing and attention mechanism, which is described in more details below:
#
# ### Seq2Seq:
# Sequence to Sequence RNN models are composed of two main components: encoder and decoder. The encoder is responsible for reading the input, word by word, and generating a hidden state that "represents" the input. The decoder outputs words according to the hidden states generated by the encoder. The following image gives a general idea of this architecture:
# <img src="seq2seq.png" alt="seq2seq" style="width: 700px;"/>
#
# ### Padding and Bucketing:
# One of the limitations of the simple Seq2Seq arquitectures is that it has fixed size input and output. Therefore we need to use padding and special symbols to deal with the fact that both input and output sentences can have different length (the ones used here are: EOS = "End of sentence", PAD = "Filler", GO = "Start decoding", plus a special symbol for unknown words: UNK).
#
# To efficiently handle sentenses with different lengths the bucketing method is used. This model uses buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]. This means that if the input is a sentence with 3 tokens, and the corresponding output is a sentence with 6 tokens, then they will be put in the first bucket and padded to length 5 for encoder inputs, and length 10 for decoder inputs.
#
# ### Attention mechanism:
# The attention mechanism tries to address the following limitations:
# - The decoder is not aware of which parts of the encoding are relevant at each step of the generation.
# - The encoder has limited memory and can't "remember" more than a single fixed size vector.
#
# The attention model comes between the encoder and the decoder and helps the decoder to pick only the encoded inputs that ar important for each step of the decoding process.
#
# <img src="attention.jpg" alt="Attention mechanism" style="width: 400px;"/>
# # Code
#
# The code will be split between:
# - Preprocessing data (tokenizing, creating vobabulary, transforming input from words to word ids)
# - Training
# - Testing
#
# ##### Software requirements
# - Python 3.6.2
# - Numpy
# - TensorFlow
#
# **_Note_**: the following code is largely based on the code from the chatbot tutorial by [<NAME>](http://suriyadeepan.github.io/2016-06-28-easy-seq2seq/) and uses the more general seq2seq model provided by the [Google Tensorflow tutorial on NMT](https://github.com/tensorflow/nmt), which is imported from a separate code file.
# ### Preprocessing
# +
# IMPORTS
import os
import numpy as np
import re
import tensorflow as tf
import math
from seq2seq_model import Seq2SeqModel
# +
# GLOBAL VARIABLES AND PARAMS
# encoding and decoding paths
TRAIN_END_PATH = os.path.join('data', 'train.enc')
TRAIN_DEC_PATH = os.path.join('data', 'train.dec')
TEST_END_PATH = os.path.join('data', 'test.enc')
TEST_DEC_PATH = os.path.join('data', 'test.dec')
TRAIN_END_ID_PATH = os.path.join('data', 'train.enc.id')
TRAIN_DEC_ID_PATH = os.path.join('data', 'train.dec.id')
TEST_END_ID_PATH = os.path.join('data', 'test.enc.id')
TEST_DEC_ID_PATH = os.path.join('data', 'test.dec.id')
# vocabulary paths
VOCAB_ENC_PATH = os.path.join('data', 'vocab.enc')
VOCAB_DEC_PATH = os.path.join('data', 'vocab.dec')
MAX_VOCAB_SIZE = 20000
# data utils
SPLIT_REGEX = re.compile("([.,!?\"':;)(])")
PAD_TOKEN = "_PAD"
START_TOKEN = "_GO"
END_TOKEN = "_EOS"
UNKNOWEN_TOKEN = "_UNK"
INIT_VOCAB = [PAD_TOKEN, START_TOKEN, END_TOKEN, UNKNOWEN_TOKEN]
# args
BUCKETS = [(5, 10), (10, 15), (20, 25), (40, 50)]
LSTM_LAYES = 3
LAYER_SIZE = 256
BATCH_SIZE = 64
LEARNING_RATE = 0.5
LEARNING_RATE_DECAY_FACTOR = 0.99
MAX_GRADIENT_NORM = 5.0
STEP_CHECKPOINTS = 5
MAX_ITERATIONS = 1000
# pre training
TRAINED_MODEL_PATH = 'pre_trained'
TRAINED_VOCAB_ENC = os.path.join('pre_trained', 'vocab.enc')
TRAINED_VOCAB_DEC = os.path.join('pre_trained', 'vocab.dec')
# +
# SIMPLE TOKENIZER
def tokenize(sentense):
tokens = []
for token in sentense.strip().split():
tokens.extend([x for x in re.split(SPLIT_REGEX, token) if x])
return tokens
# +
# CREATING VOCABULARY
def create_vocab(data_path, vocab_path):
vocab = {}
# only creates new file if file doesn't exist
if os.path.exists(vocab_path):
print("file ", vocab_path, " already exists")
else:
with open(data_path, 'r') as data_file:
for line in data_file:
tokens = tokenize(line)
for token in tokens:
if token not in vocab:
vocab[token] = 1
else:
vocab[token] += 1
# use the default tokens as initial vocabulity words
vocab_list = INIT_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
# trim vocabulary
vocab_list = vocab_list[:MAX_VOCAB_SIZE]
print("final vacabulary size for ", data_path, " = ", len(vocab_list))
# save to file
with open(vocab_path, 'w') as vocab_file:
for word in vocab_list:
vocab_file.write(word + "\n")
# update vocab with new order
vocab = dict([(y, x) for (x, y) in enumerate(vocab_list)])
return vocab
# +
# TRANSFORM WORDS IN DATA TO IDS
def from_text_data_to_id_list(data_path, ouput_path, vocab):
# only creates new file is file doesn't exist
if os.path.exists(ouput_path):
print("file ", ouput_path, " already exists")
else:
with open(data_path, 'r') as data_file:
with open(ouput_path, 'w') as ouput_file:
for line in data_file:
tokens = tokenize(line)
id_list = [str(vocab.get(word, vocab.get(UNKNOWEN_TOKEN))) for word in tokens]
ouput_file.write(" ".join(id_list) + "\n")
# +
# DATA PREPROCESSING
def preprocess_data():
encoding_vocab = create_vocab(TRAIN_END_PATH, VOCAB_ENC_PATH)
decoding_vocab = create_vocab(TRAIN_DEC_PATH, VOCAB_DEC_PATH)
from_text_data_to_id_list(TRAIN_END_PATH, TRAIN_END_ID_PATH, encoding_vocab)
from_text_data_to_id_list(TRAIN_DEC_PATH, TRAIN_DEC_ID_PATH, decoding_vocab)
from_text_data_to_id_list(TEST_END_PATH, TEST_END_ID_PATH, encoding_vocab)
from_text_data_to_id_list(TEST_DEC_PATH, TEST_DEC_ID_PATH, decoding_vocab)
print("Data preprocessing complete.")
preprocess_data()
# -
# ### Training
def read_data(source_path, target_path):
data_set = [[] for _ in BUCKETS]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
while source and target:
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(INIT_VOCAB.index(END_TOKEN))
for bucket_id, (source_size, target_size) in enumerate(BUCKETS):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
# +
# CREATE MODEL
def create_model(forward_only):
# TODO: remove
return Seq2SeqModel(
MAX_VOCAB_SIZE, MAX_VOCAB_SIZE, BUCKETS, LAYER_SIZE, LSTM_LAYES, MAX_GRADIENT_NORM,
BATCH_SIZE, LEARNING_RATE, LEARNING_RATE_DECAY_FACTOR, forward_only)
# +
# TRAIN MODEL
def train():
# setup config to use BFC allocator
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
with tf.Session(config=config) as sess:
print("creating model...")
model = create_model(forward_only = False)
sess.run(tf.global_variables_initializer())
# Read data into buckets and compute their sizes.
dev_set = read_data(TEST_END_ID_PATH, TEST_DEC_ID_PATH)
train_set = read_data(TRAIN_END_ID_PATH, TRAIN_DEC_ID_PATH)
train_bucket_sizes = [len(train_set[b]) for b in range(len(BUCKETS))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in range(len(train_bucket_sizes))]
print("Running main loop...")
for current_step in range(MAX_ITERATIONS):
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number = np.random.random_sample()
bucket_id = min([i for i in range(len(train_buckets_scale))
if train_buckets_scale[i] > random_number])
# Get a batch and make a step.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
# Print statistics.
perplexity = math.exp(step_loss) if step_loss < 300 else float('inf')
print ("global step %d perplexity %.2f" % (model.global_step.eval(), perplexity))
# train model
train()
# -
# ### Testing
#
#
# +
# LOAD PRE-TRAINED MODEL
def load_vocabulary_list(vocabulary_path):
with open(vocabulary_path, mode="r") as vocab_file:
return [line.strip() for line in vocab_file.readlines()]
def load_pre_trained_model(session):
print("Loading vocab...")
enc_vocab_list = load_vocabulary_list(TRAINED_VOCAB_ENC)
dec_vocab_list = load_vocabulary_list(TRAINED_VOCAB_DEC)
enc_vocab = dict([(x, y) for (y, x) in enumerate(dec_vocab_list)])
rev_dec_vocab = dict(enumerate(dec_vocab_list))
print("Creting model...")
model = create_model(forward_only = True)
print("Loading saved model...")
ckpt = tf.train.get_checkpoint_state(TRAINED_MODEL_PATH)
model.saver.restore(session, ckpt.model_checkpoint_path)
return (model, enc_vocab, rev_dec_vocab)
# +
# DECODING
def decode(sentence, model, session, enc_vocab):
# Get token-ids for the input sentence.
token_ids = [enc_vocab.get(w, INIT_VOCAB.index(UNKNOWEN_TOKEN)) for w in tokenize(sentence)]
bucket_id = min([b for b in range(len(BUCKETS)) if BUCKETS[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(session, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
return outputs
# +
# CHATBOT MAIN APP
def run_chatbot():
print("Starting chatbot...")
with tf.Session() as sess:
model, enc_vocab, rev_dec_vocab = load_pre_trained_model(sess)
model.batch_size = 1 # We decode one sentence at a time.
# Decode from standard input.
sentence = input("Chatbot started, ask anything!\n> ")
while sentence:
outputs = decode(sentence, model, sess, enc_vocab)
# If there is an EOS symbol in outputs, cut them at that point.
if INIT_VOCAB.index(END_TOKEN) in outputs:
outputs = outputs[:outputs.index(INIT_VOCAB.index(END_TOKEN))]
print(" ".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs]))
sentence = input("> ")
run_chatbot()
# -
# # Evaluation
# In the last stop of training the model reported global [perplexity](https://www.tensorflow.org/tutorials/recurrent#loss_function) around 8.3
#
# Follows an image with a sample conversation, to help evaluate the qualitative side of the model:
#
# <img src="chat_sample.png" alt="Sample conversation"/>
# # Conclusion
# This project showed how a simple generative chatbot can be created using a Recurrent Neural Net. The final results indicates that the model can perform reasonably well for a open conversation chatbot, even though it still makes grammar mistakes and sometimes gives very vague or unrelated answers.
#
# ## Project Challenges
# Here I present some of the challenges I faced when tring to train the model for this project:
#
# - Initially I tried to use the [ubuntu-dialog corpus](http://dataset.cs.mcgill.ca/ubuntu-corpus-1.0/). The dataset proved to be very large (a few Gb) and it took several hours just to preprocess the data. I decided that this corpus would be to complex to train on and decided to use the The Cornell Movie Dialog Corpus.
# - The Cornell Movie Dialog Corpus is a smaller and more manageble dataset, but training the model still took several hours (almost two days), while consuming a big part of my computer's resources.
| .ipynb_checkpoints/chatbot_using_RNN-checkpoint.ipynb |