code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
raul = pd.read_csv('./finaldataset.csv')
print(raul.columns)
final0318 = pd.read_csv('./final_0318.csv')
print(final0318.columns)
weather = raul[['city', 'Coldday_Count', 'Hotday_Count', 'Rainday_Count']]
weather.head()
final2 = final0318.merge(weather)
final2.columns
final2.head()
final2.to_csv('final_0427.csv')
| Weather - Anika.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#library imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#read data
data_folder = ('../Data/')
train = pd.read_csv(data_folder+'monthly_train.csv')
test = pd.read_csv(data_folder+'test.csv')
train = train.drop(['Unnamed: 0'],axis=1)
train.head()
# features for iten price
grouped_price = train.sort_values('date_block_num').groupby(['item_id'], as_index=False).agg({'mean_item_price':[np.min, np.max]})
# min max over time
grouped_price.columns = ['item_id', 'item_price_min', 'item_price_max']
train = pd.merge(train, grouped_price, on='item_id', how='left')
# price increase decrease in value over whole data
train['price_inc'] = train['mean_item_price'] - train['item_price_min']
train['price_dec'] = train['item_price_max'] - train['mean_item_price']
#
# +
# create lag features
for i in range(1,3):
#new feature name
feature = ('item_cnt_lag'+str(i))
train[feature] = train.sort_values('date_block_num').groupby(['shop_id', 'item_category_id', 'item_id'])['item_cnt'].shift(i)
# fill nan values with 0
train[feature] = train[feature].fillna(0)
# +
train['trend'] = train['item_cnt']
for i in range(1,3):
feature = ('item_cnt_lag'+str(i))
train['trend'] = train['trend'] - train[feature]
train['trend'] = train['trend'] / 4
# -
# Add seasonality features
train['year'] = train['date_block_num'].apply(lambda x: ((x//12) + 2013))
train['month'] = train['date_block_num'].apply(lambda x: (x % 12 + 1))
train['no-of-days-in-month'] = pd.to_datetime(train['month'],
format='%m').dt.days_in_month
train.head()
training = train.query('date_block_num >= 3 and date_block_num < 27').copy()
training = training.dropna()
validation = train.query('date_block_num >= 27 and date_block_num < 32').copy()
validation = validation.dropna()
testing = train.query('date_block_num == 33').copy()
testing = testing.dropna()
# +
# add mean encoding for categrical features so as to capture them better
# one hot encong will take too much space
# add mean encoding for shop id , item id , year and month
mean_col_list = ['shop_id','item_id','year','month']
for i in mean_col_list:
groupedmean= training.groupby([i]).agg({'item_cnt_month': ['mean']})
groupedmean.columns = [i+'_mean']
groupedmean.reset_index(inplace=True)
training = pd.merge(training, groupedmean, on=[i], how='left')
validation = pd.merge(validation, groupedmean, on=[i], how='left')
testing = pd.merge(testing, groupedmean, on=[i], how='left')
# add mean encoding on item id and shop id level
groupedmean = training.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})
groupedmean.columns = ['itemshop_mean']
groupedmean.reset_index(inplace=True)
training = pd.merge(training, groupedmean, on=[i], how='left')
validation = pd.merge(validation, groupedmean, on=[i], how='left')
testing = pd.merge(testing, groupedmean, on=[i], how='left')
# -
# make train, validation, test to run models.
X_train = training.drop(['item_cnt_month', 'date_block_num'], axis=1)
Y_train = training['item_cnt_month'].astype(int)
X_validation = validation.drop(['item_cnt_month', 'date_block_num'], axis=1)
Y_validation = validation['item_cnt_month'].astype(int)
X_test = testing.drop(['item_cnt_month', 'date_block_num'], axis=1)
Y_test = testing['item_cnt_month'].astype(int)
# save files
X_train.to_csv("X_train.csv",index=False)
Y_train.to_csv("Y_train.csv",index=False)
X_validation.to_csv("X_validation.csv",index=False)
Y_validation.to_csv("Y_validation.csv",index=False)
X_test.to_csv("X_test.csv",index=False)
Y_test.to_csv("Y_test.csv",index=False)
from matplotlib.pylab import rcParams
import xgboost as xgb
from xgboost import XGBRegressor
from xgboost import plot_importance
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
def cal_rmse(actual,pred):
return np.sqrt(mean_squared_error(actual, pred))
# apply random forest
rf = RandomForestRegressor(n_estimators=100, max_depth=8, n_jobs=-1)
rf.fit(X_train, Y_train)
rf_trainpred = rf.predict(X_train)
rf_validationpred = rf.predict(X_validation)
rf_testpred = rf.predict(X_test)
print('RMSE Train:', cal_rmse(Y_train, rf_trainpred))
print('RMSE Validation :', cal_rmse(Y_validation, rf_validationpred))
print('RMSE Test :', cal_rmse(Y_test, rf_test))
xgb_model = XGBRegressor(max_depth=8,
n_estimators=500,
min_child_weight=1000,
colsample_bytree=0.7,
subsample=0.7,
eta=0.3,
seed=0,
tree_method = 'exact')
xgb_model.fit(X_train,
Y_train,
eval_metric="rmse",
eval_set=[(X_train, Y_train), (X_validation, Y_validation)],
verbose=20,
early_stopping_rounds=10)
params = {
'min_child_weight': [300, 500, 1000],
'gamma': [0.5, 1, 1.5, 2, 5],
'subsample': [0.6, 0.7, 0.8],
'colsample_bytree': [0.6, 0.7, 0.8],
'max_depth': [5, 7, 8]
}
cv_results = xgb.cv(
params,
X_train,Y_train,
eval_set=[(X_train, Y_train), (X_validation, Y_validation)]
num_boost_round=num_boost_round,
seed=42,
nfold=5,
metrics={'rmse'},
early_stopping_rounds=10
)
cv_results
xgb_model.best_estimator_
plt.rcParams["figure.figsize"] = (20, 10)
plot_importance(xgb_model)
plt.show()
xgbmodel_trainpred = xgbmodel.predict(X_train)
xgbmodel_valpred = xgbmodel.predict(X_validation)
xgbmodel_testpred = xgbmodel.predict(X_test)
print('RMSE Train :', cal_rmse(Y_train, xgbmodel_trainpred))
print('RMSE Validation :', cal_rmse(Y_validation, xgbmodel_validaionpred)))\
print('RMSE Test :', cal_rmse(Y_test, xgbmodel_testpred))
| Code/3feature_engg_run_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''venv'': venv)'
# name: python3
# ---
# Similar script to algo_refit.ipynb but for the thermo dataset
import os
os.chdir("..")
from thermo_final_exp import *
context_step_perc()
# +
data = [
{'conf cont.': [102.0, 5.0], 'our': [0.12055278664163231, 0.1937425829762061, 0.12397287058250026], 'zero': [0.48506137545806205, 0.46239495604413394], 'static': [0.17279415099796075, 0.44000000000000006, 0.18528040562422426]},
{'conf cont.': [109.0, 11.0], 'our': [0.13515159849498995, 0.207615673296937, 0.14179413868516846], 'zero': [0.49399580327459147, 0.44871285464108723], 'static': [0.16179670528800297, 0.38577097992898274, 0.1823276804634261]},
{'conf cont.': [76.0, 5.0], 'our': [0.11939648546789565, 0.3520085835967822, 0.1337552569573331], 'zero': [0.5035982843572738, 0.47251197050805943], 'static': [0.15508159403964403, 0.5259662816869224, 0.17797571056108097]},
{'conf cont.': [109.0, 11.0], 'our': [0.13515159849498995, 0.207615673296937, 0.14179413868516846], 'zero': [0.49399580327459147, 0.44871285464108723], 'static': [0.16179670528800297, 0.38577097992898274, 0.1823276804634261]},
{'conf cont.': [119.0, 8.0], 'our': [0.12249249834646983, 0.16149809840362817, 0.1249495440193617], 'zero': [0.5193244603330698, 0.48661110850106537], 'static': [0.1597291847580029, 0.3815228446565024, 0.17370043892483752]},
{'conf cont.': [120.0, 7.0], 'our': [0.1245834146405309, 0.2140646615486359, 0.12951545187168628], 'zero': [0.48562163059625635, 0.45885508402795877], 'static': [0.15628117474856226, 0.42469079663159265, 0.1710754058759734]},
{'conf cont.': [75.0, 4.0], 'our': [0.14223961229797316, 0.26622682434370765, 0.14851744581927617], 'zero': [0.47894336819931244, 0.4546930710752966], 'static': [0.16909290125745494, 0.4641692039081263, 0.1840334735435649]},
{'conf cont.': [98.0, 7.0], 'our': [0.1559433868529646, 0.30367819351704345, 0.16579237396390317], 'zero': [0.5302355592330806, 0.49488652195087524], 'static': [0.22990098948421478, 0.41374436062590053, 0.24215721422699382]},
{'conf cont.': [103.0, 1.0], 'our': [0.10687256854514744, 0.20701119923657063, 0.10783543999410343], 'zero': [0.4616039533426714, 0.4571654537912995], 'static': [0.11798848664980015, 0.5114167213875447, 0.12177145044535538]},
{'conf cont.': [93.0, 10.0], 'our': [0.14785480138049348, 0.3249967497224602, 0.1650530487923349], 'zero': [0.5720456395758249, 0.5165072279665215], 'static': [0.17805140317594723, 0.4603148087324855, 0.20545561730764997]},
]
import numpy as np
import math
rr = {}
for r in data:
for k, v in r.items():
if k == "conf cont.":
v.append(v[0] + v[1])
if k not in rr:
rr[k] = []
for x in v:
rr[k].append([x])
else:
for idx, x in enumerate(v):
rr[k][idx].append(x)
res = {}
for k, v in rr.items():
res[k] = []
for x in v:
sd = math.sqrt(np.var(x))
mean = sum(x) / len(x)
res[k].append([mean, sd])
print(res)
# +
# FULL TEST
from thermo_final_exp import *
import numpy as np
sum_res = {}
for i in range(0,10):
r = full_test()
print(r)
if len(sum_res) == 0:
for k, v in r.items():
sum_res[k] = np.array(v)
else:
for k, v in r.items():
sum_res[k] += np.array(v)
for k,v in sum_res.items():
sum_res[k] = v / 10
print (sum_res)
# +
import json
import numpy as np
import math
from typing import Dict, List, Tuple
import copy
from itertools import combinations
from collections import Counter
import logging
from datetime import timedelta
from config import *
from utils import *
from main import *
from GtConflictFinder import GtConflictFinder
from ConflictDetector import ConflictDetector, ConflictPredicator
from GridPatternBuilder import build_habit_groups
root_folder = os.path.join(DATA_ROOT, THERMO_ROOT)
test_projects = [
"HS3301",
# "HS3304",
# "HS3307",
# "HS3308",
"HS3309",
# "HS3310",
# "HS3311",
# "HS3313",
# "HS5302",
# "HS5303",
# "HS5304",
# "HS5305",
"HS5309",
# "HS5314",
]
# all_setpoints = [18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 24.0, 24.5]
# setpoint_mapping = {
# "thermostat": {"off": 0, 0: "off"}
# }
# for idx, point in enumerate(all_setpoints):
# state_full_name = "auto" + DEVICE_MULTI_STATE_SUFFIX + "#" + str(point)
# setpoint_mapping["thermostat"][state_full_name] = idx + 1
# setpoint_mapping["thermostat"][idx + 1] = state_full_name
setpoint_mapping = {
"thermostat": thermo_state
}
time_step = 1
ctx_info = ContextAccessor({
TIME_CTX: {
"range" : (0, 24*60),
"interval" : 60,
},
"OutTemp#NUM": {
"range": (-25, 35),
"interval": 5,
},
THERMO_MODE_CTX: {},
# WEEKDAY_CTX: {
# "range": (0, 6),
# "interval": 1,
# },
})
ccp_alpha = 5e-5
capacity = {
"thermostat":0
}
BOOL_SIM = False
BOOL_UMASS= False
test_dates = generate_test_date(root_folder, test_projects, test_ratio = 0.4, true_random=False, is_sim=BOOL_SIM, is_umass=BOOL_UMASS)
grid_pattern_cfg = {
# "time_delta" : timedelta(minutes=10),
"context_info" : ctx_info,
"alpha": ccp_alpha,
"test_dates": test_dates,
"device_state_map": setpoint_mapping,
"time_delta": timedelta(minutes=time_step),
}
# +
habit_groups = {}
grid_data = {}
for p in test_projects:
grid_data[p] = test_umass(
root_folder = root_folder,
test_project=p,
ctx_info=ctx_info,
grid_cfg = grid_pattern_cfg,
is_sim=BOOL_SIM,
is_umass=BOOL_UMASS)
for p in test_projects:
habit_groups[p] = build_habit_groups(grid_data[p], ccp_alpha)
c_detector = ConflictDetector(ctx_info, capacity)
final_conflicts = c_detector.predict_conflict_scenarios(habit_groups)
print("Final predicted conflicts" + str({x:len(final_conflicts[x]) for x in final_conflicts}))
# +
for home, groups in habit_groups.items():
print("The habit groups found in {}".format(home))
# for d, d_groups in groups.items():
# print(d + " " + str(len(d_groups)))
# for g in d_groups:
# print(ctx_info.coor_box_to_range(g["box"][0] + g["box"][1]))
# print(g["dis"])
probs_i = [(x["prob"], i) for i, x in enumerate(final_conflicts["thermostat"])]
# print(sorted(probs_i))
for x in sorted(probs_i):
c = final_conflicts["thermostat"][x[1]]
ctx_str = ctx_info.coor_box_to_range(c["box"])
p = c["prob"]
us = list(c["users"])
users = us[0][0] + " " + us[1][0]
if users == "HS3301 HS3309":
print("{} conflicts {} at {}".format(users, p, ctx_str))
# for home, groups in habit_groups.items():
# for d, d_groups in groups.items():
# for i, g in enumerate(d_groups):
# for j in range(i+1, len(d_groups)):
# box_1 = g["box"][0] + g["box"][1]
# box_2 = d_groups[j]["box"][0] + d_groups[j]["box"][1]
# if does_intersect(box_1, box_2):
# print("!!!")
# +
import copy
# Make ground truth:
gt_ctx_info = ContextAccessor({
TIME_CTX: {
"range" : (0, 24*60),
"interval" : 60,
},
"OutTemp#NUM": {
"range": (-25, 35),
"interval": 5,
},
# THERMO_MODE_CTX: {},
WEEKDAY_CTX: {
"range": (0, 6),
"interval": 1,
},
})
device_events = {}
for p in test_projects:
ctx_evts, device_evts = load_processed(root_folder, p, is_sim=BOOL_SIM, is_umass=BOOL_UMASS)
device_events[p] = device_evts
gtconflict_cfg = {
"context_info": gt_ctx_info,
"capacity": capacity
}
# test_dates = {}
conflict_finder = GtConflictFinder(gtconflict_cfg)
gt_conflicts, test_state_cnt = conflict_finder.get_Gt_conflict(ctx_evts, device_events, test_dates)
print(len(gt_conflicts))
# +
conflict_device = {
d:[]
for d in capacity
}
conflict_state_device = {
d:{}
for d in capacity
}
MIN_TEST_OBS = 19
cnts = []
for c in gt_conflicts:
d = c["device"]
conflict_device[d].append(c)
s = gt_ctx_info.get_coor_by_ctx(c["ctx"])
users = frozenset(c["device_states"].keys())
cnts.append(test_state_cnt[d][users][s])
if test_state_cnt[d][users][s] < MIN_TEST_OBS:
continue
if s not in conflict_state_device[d]:
conflict_state_device[d][s] = {}
conflict_state_device[d][s][users] = conflict_state_device[d][s].get(users, 0) + 1
# if conflict_state_device[d][s][users] > test_state_cnt[d][s]:
# print(conflict_state_device[d][s][users], test_state_cnt[d][s])
# print(users, d, c["cur_time"], s)
print({d:len(c) for d,c in conflict_device.items()})
print({d:len(c) for d,c in conflict_state_device.items()})
print(len(test_dates))
print(Counter(cnts))
# +
# print(conflict_state_device["thermostat"])
# +
all_users = test_projects
all_devices = capacity.keys()
user_pairs = list(combinations(all_users, 2))
conflict_predicator = ConflictPredicator(ctx_info, final_conflicts)
exp_cnt = 0
exp_gt_c = 0
exp_result = {
d: {
frozenset(u_pair): [0,0,0,0,0]
for u_pair in user_pairs
}
for d in all_devices}
max_p = 0
all_state_cnt = 0
gt_probs = []
o_static = [[0,0], [0,0]]
for d in all_devices:
for u_pair in user_pairs:
u_pair_set = frozenset(u_pair)
it = np.nditer(test_state_cnt[d][u_pair_set], flags=['multi_index'])
for count in it:
all_state_cnt += count
if count < MIN_TEST_OBS:
continue
state = it.multi_index
gt_prob = 0.
if (d in conflict_state_device) and \
(state in conflict_state_device[d]) and \
(u_pair_set in conflict_state_device[d][state]):
gt_prob = float(conflict_state_device[d][state][frozenset(u_pair)]) / count
gt_ctx_snapshot = gt_ctx_info.coor_to_snapshot(state)
pred_prob = conflict_predicator.get_prob_conflict(ctx_info.get_coor_by_ctx(gt_ctx_snapshot), u_pair, d)
exp_cnt += 1
gt_probs.append(gt_prob)
if gt_prob > 0:
# if gt_prob > 0.5:
# print(gt_ctx_info.coor_to_snapshot(state).values(), gt_prob, pred_prob, u_pair)
exp_gt_c += 1
exp_result[d][u_pair_set][0] += abs(pred_prob - gt_prob)
exp_result[d][u_pair_set][1] += 1
exp_result[d][u_pair_set][4] += gt_prob
if gt_prob > 1:
print(gt_prob, pred_prob, d, u_pair)
# if abs(gt_prob - pred_prob) > 0.2:
# print(pred_prob, gt_prob, gt_ctx_snapshot, count, u_pair)
max_p = max(gt_prob, max_p)
else:
exp_result[d][u_pair_set][2] += pred_prob - gt_prob
exp_result[d][u_pair_set][3] += 1
# print(pred_prob, gt_prob, gt_ctx_snapshot, count, u_pair)
avg_gt = sum(gt_probs) / len(gt_probs)
errors = [abs(x-avg_gt) for x in gt_probs]
for i, e in enumerate(errors):
if gt_probs[i] > 0.:
o_static[0][0] += e
o_static[0][1] += 1
else:
o_static[1][0] += e
o_static[1][1] += 1
avg_error = str(sum(errors) / len(errors))
gt_probs = []
print("Optimal single prediction for users: {} is : {}".format(u_pair, str(sum(errors) / len(errors))))
o_acc = [[0., 0.], [0., 0.]]
o_zero = 0.
for d, result in exp_result.items():
for u_pair in user_pairs:
u_pair_set = frozenset(u_pair)
r= result[u_pair_set]
o_acc[0][0] += r[0] #total conflict
o_acc[1][0] += r[2] #total non-conflict
o_acc[0][1] += r[1] #total conflict no.
o_acc[1][1] += r[3] #total non-conflict no.
o_zero += r[4] #total zero prediction
# if r[1] > 0:
# r.append((r[0] + r[2]) / (r[1] + r[3]))
# if r[1] > 0:
# r[0] = r[0] / r[1]
# r[4] = r[4] / r[1]
# if r[3] > 0:
# r[2] = r[2] / r[3]
# print(u_pair, r[0], r[2])
print("The no. conf {}, no. non-conf {}".format(o_acc[0][1], o_acc[1][1]))
print("The overall accuracy for conf is {}".format(o_acc[0][0] / o_acc[0][1]))
print("The overall accuracy for non-conf is {}".format(o_acc[1][0] / o_acc[1][1]))
print("The overall acc is {}".format((o_acc[0][0] + o_acc[1][0])/(o_acc[0][1] + o_acc[1][1])))
print("The overall zero for conf is {}".format(o_zero / o_acc[0][1]))
print("The overall zero for all is {}".format(o_zero / (o_acc[0][1] + o_acc[1][1])))
print("The no. conf {}, no. non-conf {}".format(o_static[0][1], o_static[1][1]))
print("Baseline, optimal single prediction for conf is {}".format(o_static[0][0] / o_static[0][1]))
print("Baseline, optimal single prediction for non-conf is {}".format(o_static[1][0] / o_static[1][1]))
print("The overall baseline is {}".format((o_static[0][0] + o_static[1][0])/(o_static[0][1] + o_static[1][1])))
# print("Baseline approach, optimal single prediction: " + str(sum(errors) / len(errors)))
# +
from refit_alpha_test import *
ctx_info = ContextAccessor({
TIME_CTX: {
"range" : (0, 24*60),
"interval" : 60,
},
THERMO_MODE_CTX: {},
"OutTemp#NUM": {
"range": (-25, 35),
"interval": 5,
},
# WEEKDAY_CTX: {
# "range": (0, 6),
# "interval": 1,
# },
})
gt_ctx_info = ContextAccessor({
TIME_CTX: {
"range" : (0, 24*60),
"interval" : 60,
},
THERMO_MODE_CTX: {},
# WEEKDAY_CTX: {
# "range": (0, 6),
# "interval": 1,
# },
"OutTemp#NUM": {
"range": (-25, 35),
"interval": 5,
},
})
def alpha_nonLinear_generator(alpha_min, alpha_max, alpha_step):
auc_values = []
alpha = alpha_min
while alpha <= alpha_max:
yield alpha
if alpha_step <= alpha / 10:
alpha_step *= 10
alpha += alpha_step
print("Run test accuracy for context " + str(ctx_info.get_all_ctx_ordered()))
alpha_generator = alpha_nonLinear_generator(0, 0, 1e-6)
acc_values = test_acc_alpha(
ctx_info = ctx_info,
gt_ctx_info = gt_ctx_info,
root_folder = root_folder,
alpha_generator = alpha_generator,
device_mapping=setpoint_mapping,
time_step = 1,
test_projects = test_projects,
capacity = capacity,
acc_device = "thermostat")
# -
print(acc_values)
| exps/thermo_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import requests
import datetime
df =
url = r"https://www.powersmartpricing.org/psp/servlet?type=pricingtabledatesingle&date=20140501"
r = requests.get(url=url)
pd.read_html(r.content)
tables = pd.read_html(url)
df = tables[0]
df
df["price_dollar_per_kwh"] = df["Actual Price (Cents per kWh)"].str.extract(r"(\d+\.\d+)").astype(float) / 100
df
pd.date_range("20140530", pd.Timestamp.now(), freq="D").size
df.insert(0, "price_start_datetime", pd.date_range("20140501", periods=24, freq='H'))
end_dt = pd.date_range("20140530 1:00:00", periods=24, freq='H') - pd.Timedelta(seconds=1)
df.insert(1, "price_end_datetime", end_dt)
pd.Timestamp("12:00 AM")
pd.Timestamp(pd.Timestamp.now().strftime("%Y%m%d") + " " + "12:00 AM")
x = df["Time of Day (CT)"].str.extract(r"(\d{1,2}:\d{2} [A|P]M)")
df["date"] = str(pd.Timestamp.now().date())
df["date"].astype("datetime64[D]")
df["price_end_datetime"]
d = pd.DatetimeIndex(pd.Timestampe.now().date(), )
d
d.combine(x, lambda x1, x2: min(x1, x2))
s1 = pd.Series([1, 2])
s2 = pd.Series([0, 3])
s1.combine(s2, lambda x1, x2: min(x1, x2))
pd.Timestamp(pd.Timestamp.now().date()+pd.Timestamp("12:00 AM").time())
df
df["price_end_datetime"] = pd.date_range("20140530 1:00:00", periods=24, freq='H') - pd.Timedelta(seconds=1)
df.drop(["Time of Day (CT)", "Actual Price (Cents per kWh)"], axis=1, inplace=True)
df
pd.date_range("20140530", periods=24, freq='H') - pd.Timedelta(seconds=1)
df["recorded_on"] = pd.Timestamp.now().date()
df
pd.read_html("https://www.wunderground.com/history/daily/KPIA/date/2018-3-30?req_city=Morton&req_state=IL&req_statename=Illinois&reqdb.zip=61550&reqdb.magic=1&reqdb.wmo=99999")
# 1. Make datetime index
# 2. Multithread over index to generate histograms
# 3. Apply cleaning steps
# 4. Blend with weather data
| notebooks/1.0-initial-data-exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## First, let's test the ping method (GET /ping)
# This method will be used by Sagemaker for health check our model. It must return a code **200**.
# +
import json
from urllib import request
base_url='http://localhost:8080'
# -
resp = request.urlopen("%s/ping" % base_url)
print("Response code: %d" % resp.getcode() )
# ## Then we can the predictions (POST /invocations)
# This method will be used by Sagemaker for the predictions. Here we're simulating the header parameter related to the CustomAttributes
# +
# %%time
payload = json.dumps([[4.6, 3.1, 1.5, 0.2]]).encode('utf-8')
headersA={"Content-type": "application/json", "X-Amzn-SageMaker-Custom-Attributes": "logistic"}
headersB={"Content-type": "application/json", "X-Amzn-SageMaker-Custom-Attributes": "random_forest"}
def predict(payload, headers):
req = request.Request("%s/invocations" % base_url, data=payload, headers=headers)
resp = request.urlopen(req)
print("Response code: %d, Payload: [%s]" % (resp.getcode(), json.loads(resp.read())))
predict(payload, headersA)
predict(payload, headersB)
# -
# ## Now, you can go back to the previous Jupyter, stop it and continue executing it
| lab/02_BuildModelImage/02_Testing our local model server.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %run ../Python_files/util_data_storage_and_load.py
# %run ../Python_files/util.py
# +
list_of_lists = []
with open('../temp_files/OD_demand_matrix_Jan_weekday_PM.txt', 'r') as the_file:
idx = 0
for line in the_file:
inner_list = [elt.strip() for elt in line.split(',')]
list_of_lists.append(inner_list)
# -
list_of_lists[0:7]
zero_value = 0.0
with open("../data_traffic_assignment_uni-class/East_Massachusetts_trips_Jan_PM.txt", "w") as text_file:
text_file.write("<NUMBER OF ZONES> 8\n")
text_file.write("<TOTAL OD FLOW> 0.0\n")
text_file.write("<END OF METADATA>\n\n\n")
for n in range(8):
text_file.write("Origin %d \n" %(n+1))
text_file.write("%d : 0.0; " %(n+1))
for idx in range(n*7, (n+1)*7):
text_file.write("%d : %f; " \
%(int(list_of_lists[idx][1]), float(list_of_lists[idx][2])))
if idx % 3 == 0:
text_file.write("\n")
text_file.write("\n\n")
n = 7
range(n*7, (n+1)*7)
| 05_1_cross_validation_uni_class_cdc16/create_East_Massachusetts_trips_Jan_PM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Acquire image from local storage and display the image
# importing python computer vision library
import cv2
# reading the image in the given path using imread - converts image to pixel
input_image = cv2.imread("./face_img_apj.jpg")
# 3D in case of color image and 2D in case of gray scale image
input_image.shape
# using imshow to display the image using imshow - converts pixel back to image
cv2.imshow("Face of APJ", input_image)
# waits till any key is pressed.
# if time=3000 milliseconds is given, image exists till that time and closes
cv2.waitKey(0)
# closes the image display after waitkey
cv2.destroyAllWindows()
| Faces/Image-Faces-Detection/Image_capture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Content with notebooks
#
# You can also create content with Jupyter Notebooks. The content for the current page is contained
# in a Jupyter Notebook called `notebooks.ipynb` in the `doc/features/` folder of the repository. This means that we can include
# code blocks and their outputs, and export them to MyST markdown.
#
# **You can find the original notebook for this page [at this address](https://github.com/ExecutableBookProject/cli/blob/master/docs/features/notebooks.ipynb)**
#
# ## Markdown + notebooks
#
# As it is markdown, you can embed images, HTML, etc into your posts!
#
# 
#
# You an also $add_{math}$ and
#
# $$
# math^{blocks}
# $$
#
# or
#
# $$
# \begin{align*}
# \mbox{mean} la_{tex} \\ \\
# math blocks
# \end{align*}
# $$
#
# But make sure you \$Escape \$your \$dollar signs \$you want to keep!
#
# ## Code blocks and image outputs
#
# Jupyter Book will also embed your code blocks and output in your book.
# For example, here's some sample Matplotlib code:
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
# +
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);
# -
# Note that the image above is captured and displayed by Jekyll.
# + tags=["popout", "remove_input"]
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + .1*np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot'])
ax.set(title="Smoother linez");
# + [markdown] tags=["popout"]
# **You can also pop out content to the side!**. For more information on how to do this,
# check out the [customizing page layout](../old_docs/features/layout) page.
# -
# ## Removing content before publishing
#
# You can also remove some content before publishing your book to the web. For example,
# in [the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/book_template/content/features/notebooks.ipynb) there
# used to be a cell below...
# + tags=["remove_cell"]
thisvariable = "none of this should show up in the textbook"
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
# -
# You can also **remove only the code** so that images and other output still show up.
#
# Below we'll *only* display an image. It was generated with Python code in a cell,
# which you can [see in the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/book_template/content/features/notebooks.ipynb)
# + tags=["hide_input"]
thisvariable = "this plot *will* show up in the textbook."
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
# -
# And here we'll *only* display a Pandas DataFrame. Again, this was generated with Python code
# from [this original notebook](https://github.com/jupyter/textbooks-with-jupyter/blob/master/notebooks/introduction/notebooks.ipynb).
# + tags=["hide_input"]
import pandas as pd
pd.DataFrame([['hi', 'there'], ['this', 'is'], ['a', 'DataFrame']], columns=['Word A', 'Word B'])
# -
# ## Interactive outputs
#
# We can even do the same for *interactive* material. Below we'll display a map using [folium](https://python-visualization.github.io/folium/). When the notebook
# is converted to Markdown, the code for creating the interactive map is retained.
#
# **Note that this will only work for some packages.** They need to be able to output standalone HTML/Javascript, and not
# depend on an underlying Python kernel to work.
import folium
# +
m = folium.Map(
location=[45.372, -121.6972],
zoom_start=12,
tiles='Stamen Terrain'
)
folium.Marker(
location=[45.3288, -121.6625],
popup='Mt. Hood Meadows',
icon=folium.Icon(icon='cloud')
).add_to(m)
folium.Marker(
location=[45.3311, -121.7113],
popup='Timberline Lodge',
icon=folium.Icon(color='green')
).add_to(m)
folium.Marker(
location=[45.3300, -121.6823],
popup='Some Other Location',
icon=folium.Icon(color='red', icon='info-sign')
).add_to(m)
m
# -
# ## Rich outputs from notebook cells
# Because notebooks have rich text outputs, you can store these in
# your Jupyter Book as well!
# !jupyter-book create -h
this_will_error
| docs/features/notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <p><font size="6"><b> CASE - CurieuzeNeuzen citizen science air quality data</b></font></p>
#
#
# > *DS Python for GIS and Geoscience*
# > *October, 2021*
# >
# > *© 2021, <NAME> and <NAME>. Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
#
# ---
# Air pollution remains a key environmental problem in an increasingly urbanized world. While concentrations of traffic-related pollutants like nitrogen dioxide (NO2) are known to vary over short distances, official monitoring networks remain inherently sparse, as reference stations are costly to construct and operate.
#
# The [**CurieuzeNeuzen**](https://curieuzeneuzen.be/curieuzeneuzen-vlaanderen-2018/) citizen science project collected a large, spatially distributed dataset that can complement official monitoring. In a first edition in 2016, in Antwerp, 2000 citizens were involved. This success was followed by a second edition in 2018 engaging 20.000 citizens across Flanders, a highly urbanized, industrialized and densely populated region in Europe. The participants measured the NO2 concentrations in front of their house using a low-cost sampler design (see picture below, where passive sampling tubes are attached using a panel to a window at the facade).
#
# Source: preprint paper at https://eartharxiv.org/repository/view/19/
#
# In this case study, we are going to make use of the data collected across Flanders in 2018: explore the data and investigate relationships with other variables.
#
# <img src="../img/CN_measurement_setup.png" alt="Measurement panel" style="width:800px">
# +
import numpy as np
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
# -
# ## Importing and exploring the data
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Read the csv file from `data/CN_Flanders_open_dataset.csv` into a DataFrame `df` and inspect the data.
# * How many measurements do we have?
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality1.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality2.py
# -
# The dataset contains longitude/latitude columns of the measurement locations, the measured NO2 concentration, and in addition also a "campaign" column indicating the type of measurement location (and an internal "code", which we will ignore here).
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Check the unique values of the "campaign" columns and how many occurrences those have.
#
#
# <details><summary>Hints</summary>
#
# * A pandas Series has a `value_counts()` method that counts the unique values of the column.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality3.py
# + [markdown] clear_cell=false
# Most of the measurements are performed at the facade of the house or building of a participant. In addition, some measurement tubes were also placed next to reference monitoring stations of the VMM and in background locations (e.g. nature reserve or park).
#
# Let's now explore the measured NO2 concentrations.
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Calculate the overall average NO2 concentration of all locations (i.e. the mean of the "no2" column).
# * Calculate a combination of descriptive statistics of the NO2 concentration using the `describe()` method.
# * Make a histogram of the NO2 concentrations to get a visual idea of the distribution of the concentrations.
#
# <details><summary>Hints</summary>
#
# * To calculate the mean of a column, we first need to select it: using the square bracket notation `df[colname]`
# * The average can be calculate with the `mean()` method
# * A histogram of a column can be plotted with the `hist()` method
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality4.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality5.py
# -
# A histogram:
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality6.py
# -
# A more expanded histogram (not asked in the exercise, but uncomment to check the code!)
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality7.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Determine the percentage of locations that exceed the EU and WHO yearly limit value of 40 µg/m³.
#
# __Tip__: first create a boolean mask determining whether the NO2 concentration is above 40 or not. Using this boolean mask, you can determine the percentage of values that follow the condition.
#
# <details><summary>Hints</summary>
#
# * To know the fraction of `True` values in a boolean Series, we can use the `sum()` method, which is equivalent as counting the True values (True=1, False=0, so the sum is a count of the True values) and dividing by the total number of values.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality8.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality9.py
# -
# So overall in Flanders, around 2.3% of the measurement locations exceeded the limit value. This might not seem much, but assuming that the dataset is representative for the population of Flanders (and effort was done to ensure this), around 150,000 inhabitants live in a place where the annual NO2 concentration at the front door exceeds the EU legal threshold value.
# We will also later see that this exceedance has a large spatial variation.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * What is the average measured concentration at the background location? Calculate this by first selecting the appropriate subset of the dataframe.
# * More generally, what is the average measured concentration grouped on the "Campaign" type?
#
# <details><summary>Hints</summary>
#
# * To calculate a grouped statistic, use the `groupby()` method. Pass as argument the name of the column on which you want to group.
# * After the `groupby()` operation, we can (similarly as for a normal DataFrame) select a column and call the aggregation column (`df.groupby("class")["variable"].method()`).
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality10.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality11.py
# -
# The background locations (parks, nature reserves) clearly show a lower concentration than the average location. Note that the number of observations in each class are very skewed, so those averages are not necessarily representative!
# ## Converting to a geospatial dataset
# The provided data was a CSV file, and we explored it above as a pandas DataFrame. To further explore the dataset using the spatial aspects (point data), we will first convert it to a geopandas GeoDataFrame.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Convert `df` into a GeoDataFrame, using the 'lat'/'lon' columns to create a Point geometry column (tip: check the `geopandas.points_from_xy()` function). Also specify the correct Coordinate Reference System with the `crs` keyword. Call the result `gdf`.
# * Do a quick check to see the result is correct: look at the first rows, and make a simple plot of the GeoDataFrame with `.plot()` (you should recognize the shape of Flanders, if not, something went wrong)
#
#
# <details><summary>Hints</summary>
#
# * A GeoDataFrame can be created from an existing pandas.DataFrame by using the `geopandas.GeoDataFrame(...)` constructor. This constructor needs a `geometry=` keyword specifying either the name of the column that holds the geometries or either the geometry values.
# * GeoPandas provides a helper function to create Point geometry values from a array or column of x and y coordinates: `geopandas.points_from_xy(x_values, y_values)`.
# * Remember! The order of coordinates is (x, y), and for geographical coordinates this means the (lon, lat) order.
# * Remember! The Coordinate Reference System typically used for geographical lon/lat coordinates is "EPSG:4326" (WGS84).
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality12.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality13.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality14.py
# -
# Let's make that last plot a bit more informative:
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Make a plot of the point locations of `gdf` and use the "no2" column to color the points.
# * Make the figure a bit larger by specifying the `figsize=` keyword.
#
# <details><summary>Hints</summary>
#
# * The figure size can be specified with the `figsize=` keyword of the `plot()` method.
# * For using one of the columns of the GeoDataFrame to determine the fill color, use the `column=` keyword.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality15.py
# -
# We can already notice some spatial patterns: higher concentrations (and also more measurement locations) in urban areas. But, the visualization above is not really a good way to visualize many points. There are many alternatives (e.g. heatmaps, hexbins, etc), but in this case study we are going to make a [*choropleth* map](https://en.wikipedia.org/wiki/Choropleth_map): choropleths are maps onto which an attribute, a non-spatial variable, is displayed by coloring a certain area.
#
# As the unit of area, we will use the municipalities of Flanders.
# ## Combining with municipalities
#
# We downloaded the publicly available municipality reference from geopunt.be ([Voorlopig referentiebestand gemeentegrenzen, toestand 16/05/2018](https://www.geopunt.be/catalogus/datasetfolder/9ff44cc4-5f16-4507-81a6-6810958b14df)), and added the Shapefile with the borders to the course repo: `data/VRBG/Refgem.shp`.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Read the Shapefile with the municipalities into a variable called `muni`.
# * Inspect the data and do a quick plot.
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality16.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality17.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality18.py
# -
# Now we have a dataset with the municipalities, we want to know for each of the measurement locations in which municipality it is located. This is a "point-in-polygon" spatial join.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# Before performing the spatial join, we need to ensure the two datasets are using the same Coordinate Reference System (CRS).
#
# * Check the CRS of both `gdf` and `muni`. What kind of CRS are they using? Are they the same?
# * Reproject the measurements to the Lambert 72 (EPSG:31370) reference system. Call the result `gdf_lambert`.
#
# <details><summary>Hints</summary>
#
# * The CRS of a GeoDataFrame can be checked by looking at the `crs` attribute.
# * To reproject a GeoDataFrame to another CRS, we can use the `to_crs()` method.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality19.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality20.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality21.py
# -
# The EPSG:31370 or "Belgian Lambert 72" (https://epsg.io/31370) is the local, projected CRS most often used in Belgium.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Add the municipality information to the measurements dataframe. We are mostly interested in the "NAAM" column of the municipalities dataframe (the name of the municipality). Call the result `gdf_combined`.
#
# <details><summary>Hints</summary>
#
# * Joining the measurement locations with the municipality information can be done with the `geopandas.sjoin()` function. The first argument is the dataframe to which we want to add information, the second argument the dataframe with the additional information.
# * You can select a subset of columns when passing a GeoDataFrame to the `sjoin()` function.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality22.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality23.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * What is the average measured concentration in each municipality?
# * Call the result `muni_mean`. Ensure we have a DataFrame with a NO2 columns and a column with the municipality name by calling the `reset_index()` method after the groupby operation.
# * Merge those average concentrations with the municipalities GeoDataFrame (note: those have a common column "NAAM"). Call the merged dataframe `muni_no2`, and check the first rows.
#
# <details><summary>Hints</summary>
#
# * Something like `df.groupby("class")["variable"].mean()` returns a Series, with the group variable as the index. Calling `reset_index()` on the result then converts this into a DataFrame with 2 columns: the group variable and the calculated statistic.
# * Merging two dataframe that have a common column can be done with the `pd.merge()` function.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality24.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality25.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality26.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Make a choropleth of the municipalities using the average NO2 concentration as variable to color the municipality polygons.
# * Set the figure size to be (16, 5), and add a legend.
#
# <details><summary>Hints</summary>
#
# * To specify which column to use to color the geometries in the plot, use the `column=` keyword of the `plot()` method.
# * The figure size can be specified with the `figsize=` keyword of the `plot()` method.
# * Pass `legend=True` to add a legend to the plot. The type of legend (continuous color bar, discrete legend) will be inferred from the plotted values.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality27.py
# -
# When specifying a numerical column to color the polygons, by default this results in a continuous color scale, as you can see above.
#
# However, it is very difficult for the human eye to process small differences in color in a continuous scale.
# Therefore, to create effective choropleths, we typically classify the values into a set of discrete groups.
#
# With GeoPandas' `plot()` method, you can control this with the `scheme` keyword (indicating which classification scheme to use, i.e how to divide the continuous range into a set of discrete classes) and the `k` keyword to indicate how many classes to use. This uses the [mapclassify](https://pysal.org/mapclassify/) package under the hood.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Starting from the previous figure, specify a classification scheme and a number of classes to use. Check the help of the `plot()` method to see the different options, and experiment with those.
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality28.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * What is the percentage of exceedance for each municipality? Repeat the same calculation we did earlier on the original dataset `df`, but now using `gdf_combined` and grouped per municipality.
# * Show the 10 municipalities with the highest percentage of exceedances.
#
# <details><summary>Hints</summary>
#
# * For showing the 10 highest values, we can either use the `sort_values()` sorting the highest values on top and showing the first 10 rows, or either use the `nlargest()` method as a short cut for this operation.
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality29.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality30.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality31.py
# -
# ## Combining with Land Use data
#
# The CORINE Land Cover (https://land.copernicus.eu/pan-european/corine-land-cover) is a program by the European Environment Agency (EEA) to provide an inventory of land cover in 44 classes of the European Union. The data is provided in both raster as vector format and with a resolution of 100m.
#
# The data for the whole of Europe can be downloaded from the website (latest version: https://land.copernicus.eu/pan-european/corine-land-cover/clc2018?tab=download). This is however a large dataset, so we downloaded the raster file and cropped it to cover Flanders, and this subset is included in the repo as `data/CLC2018_V2020_20u1_flanders.tif` (the code to do this cropping can be see at [data/preprocess_data.ipynb#CORINE-Land-Cover](data/preprocess_data.ipynb#CORINE-Land-Cover)).
#
# The air quality is indirectly linked to land use, as the presence of pollution sources will depend on the land use. Therefore, in the following section we will determine the land use for each of the measurement locations based on the CORINE dataset and explore the relationship of the NO2 concentration and land use.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Open the land cover raster file (`data/CLC2018_V2020_20u1_flanders.tif`) with xarray, inspect the metadata, and do a quick visualization.
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality32.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality33.py
# -
# The goal is now to query from the raster file the value of the land cover class for each of the measurement locations. This can be done with the `rasterstats` package and with the point locations of our GeoDataFrame. But first, we need to ensure that both our datasets are using the same CRS. In this case, it's easiest to reproject the point locations to the CRS of the raster file.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * What is the EPSG code of the Coordinate Reference System (CRS) of the raster file? You can find this in the metadata inspected with rasterio or xarray above.
# * Reproject the point dataframe (`gdf`) to the CRS of the raster and assign this to a temporary variable `gdf_raster`.
#
# <details><summary>Hints</summary>
#
# * Reprojecting can be done with the `to_crs()` method of a GeoDataFrame, and the CRS can be specified in the form of "EPSG:xxxx".
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality34.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Use the `rasterstats.point_query()` function to determine the value of the raster for each of the points in the dataframe. Remember to use `gdf_raster` for passing the geometries.
# * Because we have a raster file with discrete classes, ensure to pass `interpolate="nearest"` (the default "bilinear" will result in floats with decimals, not preserving the integers representing discrete classes).
# * Assign the result to a new column "land_use" in `gdf`.
# * Perform a `value_counts()` on this new column do get a quick idea of the new values obtained from the raster file.
#
# Note that the query operation can take a while. Don't worry if it runs for around 20 seconds!
#
#
# <details><summary>Hints</summary>
#
# * Don't forget to first import the `rasterstats` package.
# * The `point_query()` function takes as first argument the point geometries (this can be passed as a GeoSeries), and as second argument the path to the raster file (this will be opened by `rasterio` under the hood).
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality35.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality36.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality37.py
# -
# As you can see, we have obtained a large variety of land cover classes. To make this more practical to work with, we
#
# 1. want to convert the numbers into a class name, and
# 2. reduce the number of classes.
#
# The full hierarchy (with 3 levels) of the 44 classes can be seen at https://wiki.openstreetmap.org/wiki/Corine_Land_Cover. For keeping the exercise a bit practical here, we prepared a simplified set of classes and provided a csv file with this information.
#
# This has a column "value" corresponding to the values used in the raster file, and a "group" column with the simplified classes that we will use for this exercise.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Read the `"data/CLC2018_V2018_legend_grouped.csv"` as a dataframe and call it `legend`.
#
# The additional steps, provided for you, use this information to convert the column of integer land use classes to a column with the simplified names. After that, we again use `value_counts()` on this new column, and we can see that we now have less classes.
#
# <details><summary>Hints</summary>
#
# * Reading a csv file can be done with the `pandas.read_csv()` function.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality38.py
# -
# Convert the "land_use" integer values to a column with land use class names:
value_to_group = dict(zip(legend['value'], legend['group']))
gdf['land_use_class'] = gdf['land_use'].replace(value_to_group)
# + jupyter={"outputs_hidden": false}
gdf['land_use_class'].value_counts()
# -
# Now we have the land use data, let's explore the air quality in relation to this land use.
#
# We can see in the `value_counts()` result above that we have a few classes with only very few observations, though (<50). Calculating statistics for those small groups is not very reliable, so lets leave them out for this exercises (note, this is not necessarily the best strategy in real life! Amongst others, we could also inspect those points and re-assign to a dominant land use class in the surrounding region).
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Assign the value counts of the "land_use_class" column to a variable `counts`.
# * Using "counts", we can determine which classes occur more then 50 times.
# * Using those frequent classes, filter the `gdf` to only include observations from those classes, and call this `subset`.
# * Based on this subset, calculate the average NO2 concentration per land use class.
#
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality39.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality40.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality41.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality42.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Using the `seaborn` package and the `subset` DataFrame, make a boxplot of the NO2 concentration, splitted per land use class.
#
# Don't forget to first import `seaborn`. We can use the `seaborn.boxplot()` function (check the help of this function to see which keywords to specify).
#
# <details><summary>Hints</summary>
#
# * With the `seaborn.boxplot()`, we can specify the `data=` keyword to pass the DataFrame from which to plot values, and the `x=` and `y=` keywords to specify which columns of the DataFrame to use.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality43.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality44.py
# -
# Tweaking the figure a bit more (not asked in the exercise, but uncomment and run to see):
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality45.py
# -
# The dense urban areas and areas close to large roads clearly have higher concentrations on average. On the countryside (indicated by "agricultural" areas) much lower concentrations are observed.
# ## A focus on Gent
#
# Let's now focus on the measurements in Ghent. We first get the geometry of Gent from the municipalities dataframe, so we can use this to filter the measurements:
gent = muni[muni["NAAM"] == "Gent"].geometry.item()
# + jupyter={"outputs_hidden": false}
gent
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# For the exercise here, we don't want to select just the measurements in the municipality of Gent, but those in the region. For this, we will create a bounding box of Gent:
#
# * Create a new Shapely geometry, `gent_region`, that defines the bounding box of the Gent municipality.
#
# Check this section of the Shapely docs (https://shapely.readthedocs.io/en/latest/manual.html#constructive-methods) or experiment yourself to see which attribute is appropriate to use here (`convex_hull`, `envolope`, and `minimum_rotated_rectangle` all create a new polygon encompassing the original).
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality46.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Using the `gent_region` shape, create a subset the measurements dataframe with those measurements located within Gent. Call the result `gdf_gent`.
# * How many measurements are left in the subset?
#
# <details><summary>Hints</summary>
#
# * Ensure to use `gdf_lambert` and not `gdf`, since the `gent` shape is extracted from the `muni` dataframe with EPSG:31370 CRS.
# * To check for a series of points whether they are located in a given polygon, use the `within()` method.
# * Use the resulting boolean Series to mask the original `gdf` GeoDataFrame using boolean indexing (`[]` notation).
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality47.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality48.py
# -
# Alternatively, we can also use the `geopandas.clip()` function. For points there is not much difference with the method above, but for lines or polygons, it will actually "clip" the geometries, i.e. removing the parts that fall outside of the specified region (in addition, this method also uses a spatial index under the hood and will typically be faster for large datasets):
# + jupyter={"outputs_hidden": false}
gdf_gent = geopandas.clip(gdf_lambert, gent_region)
len(gdf_gent)
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Make a visualization of the measurements in Gent. Use contextily to add a background basemap. Color the points based on the NO2 concentration.
#
#
# <details><summary>Hints</summary>
#
# * For using one of the columns of the GeoDataFrame to determine the fill color, use the `column=` keyword.
# * To add a background map, use the `contextily.add_basemap()` function. It takes the matplotlib `ax` to which to add a map as the first argument.
# * Remember that, to use a contextily background, the data needs to be in Web Mercator ("EPSG:3857").
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality49.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality50.py
# -
# The solution above includes a `vmax=50` to indicate to use a max value of 50 for the colorbar range (there are few measurements with much higher values, that would otherwise make most other points look dark blue-ish). A better option can be to use one of the classification schemes from `mapclassify` through the `scheme` keyword.
#
# Further zooming in on the city center, using a discrete color scheme and a different tile provider (not asked in exercise, but uncomment and run to see):
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality51.py
# -
# ## Combining with OpenStreetMap information
#
# We downloaded and filtered OpenStreetMap data for the area of Gent, focusing on the street network information, and provided this as a GeoPackage file (`data/osm_network_gent.gpkg`, see this [notebook](./data/preprocess_data.ipynb#OpenStreetMap) to check the code to download and preprocess the raw OSM data).
#
# The OpenStreetMap street network data includes information about the type of street in the "highway" tag. We can use this as a proxy for traffic intensity of the street, and relate that to the measured NO2 concentration.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Read the `"data/osm_network_gent.gpkg"` file into a `streets` variable, and check the first rows.
# * Convert the data to the appropriate CRS to combine with the `gdf_gent` data, if needed.
# * Make a quick plot to explore the data.
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality52.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality53.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality54.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality55.py
# -
# (Note: for interactively exploring such data, there are better solutions as the GeoPandas `.plot()` method, such as opening the data in QGIS, or using an interactive visualization library, see notebook [visualization-04-interactive](./visualization-04-interactive.ipynb)).
#
# To relate the measured concentration with the road type, we want to determine for each location at what type of street it is located. Since the measurements are not exactly located on one of the lines, we are going to look at the closest street for each location.
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Calculate the distance between the point (defined below) and all streets. And what is the minimum distance?
# * Use the `idxmin()` method to know get the label of which row contains the minimum distance.
# * Using the result of `idxmin()`, we can get the row or the value in the "highway" column that corresponds to the street that is closest to `point`.
#
# <details><summary>Hints</summary>
#
# * The distance method of a Shapely geometry does not accept a GeoSeries of geometries, only a single other geometry (so `point.distance(series)` does not work). However, the distance method is commutative, so you can always switch the order to use the distance method of the GeoSeries (`series.distance(point)` does work).
# * Given a row label, you can get the value of a Series/column with `s[label]`, or the row of a DataFrame with `df.loc[label]`. With both row label and column name, you can get the corresponding value of a DataFrame with `df.loc[label, column_name]`.
# </details>
#
# </div>
# We take the first point geometry in the Gent dataset:
point = gdf_gent["geometry"].iloc[0]
# + jupyter={"outputs_hidden": false}
point
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality56.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality57.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality58.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality59.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality60.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality61.py
# -
# We now want to repeat the above analysis for each measurement location. So let's start with writing a reusable function.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Define a function `closest_road_type` that accepts a single point and the streets dataframe, and returns the class of the closest street.
# * Check that the function works by using it on the `point` defined above.
#
# As help, you can start from this skeleton:
#
# ```python
# def closest_road_type(point, streets):
# # determine "highway" tag of the closest street
# idx_closest = ...
# ...
# return ...
# ```
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality62.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality63.py
# -
# Now we can apply this function to each of the point locations. However, with this brute force method, applying it as is using `gdf_gent` and `streets` takes quite some time. We can speed up the distance calculation by reducing the number of linestrings in the `streets` dataframe to compare with.
#
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Create a `streets_unioned` dataframe with a single line per road type (a union of all lines per road type). Check the `dissolve()` method for this.
# * Repeat the `apply` call, but now on all points and using `streets_unioned` instead of `streets`.
# * Assign the result of the apply to a new columns "road_type".
# * Do a value counts of this new column.
#
# </div>
# When running this, you can see it already takes a bit of time, even for the first 20 rows:
# + jupyter={"outputs_hidden": false}
# %time gdf_gent.geometry.head(20).apply(lambda point: closest_road_type(point, streets))
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality64.py
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality65.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality66.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality67.py
# -
# **Note!** We have been using a brute-force search for the closest street by calculating for each point the distance to all streets. This is a good exercise to learn the syntax, but there are however better methods for such "nearest" queries. See eg https://automating-gis-processes.github.io/site/notebooks/L3/nearest-neighbor-faster.html
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# There are some uncommon categories. For the remainder of this demo, let's group some related categories, and filter out some others.
#
# * Using the defined mapping, replace some values with "pedestrian" in the `"road_type"` column.
# * Using the defined subset of categories, create a subset of `gdf_gent` where the road type "is in" this subset of categories (look at the pandas `isin()` ([doc link](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.isin.html)) method). Call the result `subset`, and do again a value counts to check the result.
#
# <details><summary>Hints</summary>
#
# * The `replace()` method can be called on a column. Pass a dictionary to this method, and the keys of the dictionary present in the column will be replaces with the corresponding values defined in the dictionary.
# * The `Series.isin()` method accepts a list of values, and will return a boolean Series indicating for each element of the original Series whether it is present in the list of values or not.
# * The boolean Series can then be used to filter the original `gdf_gent` dataframe using boolean indexing.
# </details>
#
# </div>
# Replace categories:
mapping = {
"footway": "pedestrian",
"living_street": "pedestrian",
"path": "pedestrian",
}
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality68.py
# -
# Filter categories:
categories = ["primary", "secondary", "tertiary", "residential", "pedestrian"]
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality69.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality70.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Calculate the average measured concentration depending on the type of the road next to the measurement location.
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality71.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# * Similarly, make a plot with `seaborn` of those results. Specify the `categories` as the order to use for the plot.
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality72.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/case-curieuzeneuzen-air-quality73.py
# -
# This analysis confirms that the NO2 concentration is clearly related to traffic.
| notebooks/case-curieuzeneuzen-air-quality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# ## Introduction to Numpy
#
# This workbook aims to provide a quick and in-depth introduction to `numpy`.
import numpy as np
# ``from numpy import *``
#
# This is not a good idea as `numpy` namespace is large and contains a number of functions which can conflict with standard built-in Python functions (like min and max). As a best practice, it is good to alias the namespace as np, so all the methods of `numpy` would be accessible via the np.<method_name> syntax.
#
# +
my_arr = np.arange(1000000)
my_list = list(range(1000000))
# -
# %time for _ in range(10): my_arr2 = my_arr * 2
# %time for _ in range(10): my_list2 = [x * 2 for x in my_list]
# *** Generating Random Data ***
data = np.random.randn(2,3) #Sample from Standard Normal Distribution
data
data * 10 # batch mathematical operations can be performed directly. Element-wise multiplication.
data + data # Element-wise addition.
# ## `ndarray` Multi-dimensional Arrays
#
# An `ndarray` is a generic-multidimensional container for homogenous data. In simple terms, all the elements should be of same type. For example, all ints, all floats etc.
#
# Every array has a `shape` property, which is a tuple describing the dimensions.
data.shape #2 rows, 3 columns
# The `dtype` property describes the data type of the array.
data.dtype # in this case it is float64
# ### Creating `ndarray`
#
# To create an array the `array` function can be used. A simple list can be converted into `ndarray`.
# +
data1 = [1,2,3,4,5,6]
arr1 = np.array(data1)
print('Array: {}'.format(arr1))
print('Type : {}'.format(type(arr1)))
# -
# Nested sequences will be converted into multi-dimensional lists.
# +
data2 = [[1,2,3,4,5],[6,7,8,9,10]]
arr2 = np.array(data2)
print('Array: {}'.format(arr2))
print('Type: {}'.format(type(arr2)))
print('Type (alternative): {}'.format(arr2.dtype))
print('Dimension: {}'.format(arr2.ndim))
print('Shape: {}'.format(arr2.shape))
# -
# In previous example, the nested lists were of same length. What will happen if it is not the case?
# +
data3 = [[1,2,3,4,5],[6,7,8,9]]
arr3 = np.array(data3)
print('Array: {}'.format(arr3))
print('Type: {}'.format(type(arr3)))
print('Type (alternative): {}'.format(arr3.dtype))
print('Dimension: {}'.format(arr3.ndim))
print('Shape: {}'.format(arr3.shape))
# -
# *** Notice, since the length of the lists is not the same. The numpy module created a single dimensional array of lists, instead of multi-dimensional array. ***
#
# > *** Important: *** While creating a numpy array from individual list remember this check.
#
#
# Additionally, you can create initialized arrays using helper methods like `zeros`, `ones` and `empty`.
#
# 1. `zeros` : Initializes an array of requested shape with all zeros.
#
# 2. `ones` : Initializes an array of requested shape with all ones.
#
# 3. `empty` : Initializes an array of requested shape with *** random values. ***
#
# To create higher order arrays pass in tuple with shape specification.
#
# Let's look at an illustration.
z1 = np.zeros(10)
print('Array : {}'.format(z1))
print('Dimension: {}'.format(z1.ndim))
print('Shape: {}'.format(z1.shape))
z2 = np.zeros((2,3))
print('Array : {}'.format(z2))
print('Dimension: {}'.format(z2.ndim))
print('Shape: {}'.format(z2.shape))
o1 = np.ones((2,3))
print('Array : {}'.format(o1))
print('Dimension: {}'.format(o1.ndim))
print('Shape: {}'.format(o1.shape))
e1 = np.empty((2,3))
print('Array : {}'.format(e1))
print('Dimension: {}'.format(e1.ndim))
print('Shape: {}'.format(e1.shape))
# *** Note: *** It is not necessary that the values will be either 0, or 1 they can be random in case of `np.empty` (garbage values).
# `arange` is array-valued version of Python's `range` function.
np.arange(10)
# *** Array Creation Functions ***
#
# | Function | Description |
# |----------|-------------|
# | array | Convert input data (list, tuple, array, or other sequence to array) |
# | asarray | Comvert input to ndarray, but do not copy if it is already a ndarray |
# | arange | Built-in range function, returns ndarray instead of list. |
# | ones | Array of 1's with given shape and size. |
# | ones_like | Create ndarray of ones matching shape and size of input array |
# | zeros | Array of O's with given shape and size. |
# | zeros_like | Create ndarray of zeros matching shape and size of input array |
# | empty | Array of random (garbage) data with given shape and size. |
# | empty_like | Create ndarray of random (garbage) data matching shape and size of input array |
# | full | Array of given element data indicated by "fill value" of given shape and size. |
# | full_like | Create ndarray of rgiven element data indicated by "fill value" matching shape and size of input array |
# | eye, identity | Create a square NxN matrix ()
#
#
# ## Data Types for ndarrays
#
# The *data type* or *dtype* is a special object containing the information (or metadata about the data). Different types of Numpy data types supported are:
#
# int8, uint8, int16, uint16, int32, uint32, int64, uint64, float16, float32, float64, float128, complex64, complex128, complex256, bool, object, string_, unicode_
#
# Explicit, type-casting of an array can be done using the `astype` method.
#
arr = np.array([1,2,3,4,5])
print('Initial DataType : {}'.format(arr.dtype))
float_arr = arr.astype(np.float64)
print('Converted DataType : {}'.format(float_arr.dtype))
# The above example is converting int to floating point. *** Note: *** When casting from floating-point to int, the decimal part is truncated. This is a typical *down-casting* behaviour which is lossy in nature (irreversible). That is you loose the information represented by the decimal part, and it cannot be recovered by converting int to float again. The context of the problem to be solved is important.
arr = np.array([3.7,2.8, -0.5, 0.5, 12.1, 11.4])
print('Initial DataType : {}'.format(arr.dtype))
int_arr = arr.astype(np.int)
print('Converted DataType: {}'.format(int_arr)) # Note there is no rounding.
numeric_string = np.array(['1.25', '-9.6', '42'], dtype=np.string_)
numeric_string.astype(np.float)
# *** Calling `astype` *always* creates a new array (a copy of the data), even if the new dtype is teh same as the old dtype.
# ## Arithmetic with NumPy Arrays
#
# Batch operations can be done on data without writing any for loops. This is called as *vectorization*. The arrays need to be of equal-size for these operations.
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
arr
arr * arr #Element-wise operations or vectorization
arr - arr
# *** Scalar Operations ***
1/arr
arr ** 0.5
# Comparison between ndarrays result in ndarray ofsame shape and dimension.
arr2 = np.array([[0., 4., 1.],[7., 2., 12.]])
arr2
arr2 > arr
# Basic Indexing and Slicing
arr = np.arange(10)
arr
arr[5] #6th element in the array
arr[5:8]
arr[5:8] = 12 # This will broadcast the value to the selection, and update the list.
arr # Notice the index positions 5:8 are updated in original array.
# *Note:* Array slices are *views*, the data is not copied and any modifications to the view will be reflected in the source array.
arr_slice = arr[5:8]
arr_slice
arr_slice[1] = 12345
arr
arr_slice[:] = 64 # all indexes in slice will be assigned 64
arr
# If you want a copy of a slice of an ndarray instead of a view, you will need to explicitly copy the array - for example, `arr[5:8].copy()`.
arr2d = np.array([[1,2,3],[4,5,6],[7,8,9]])
arr2d
arr2d[2]
arr2d[0][2]
arr2d[0, 2] # (rows, column) same as above and concise. Axis 0 = "Rows", Axis 1 = "Columns".
# ### Indexing with Slices
#
# In Numpy the indexing with slices works much like standard Python like slices.
arr
arr[1:6]
arr2d
arr2d[:2]
# Multiple slices can be passed just like multiple indexes, and the slices would work on respective axis.
arr2d[:2, ]
arr2d[:2, 1:]
# Assignements also work with slices.
arr2d[:2, 1:] = 0
arr2d
# ## Boolean Indexing
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = np.random.randn(7,4)
names
data
# Suppose each name corresponds to a row in the data array and we wanted to select all the rows with corresponding name 'Bob'. Like arithmetic operations '==' comparison can be vectorized.
names == 'Bob' #yields array of booleans
# This boolean array can be passed when indexing the array. *Note*: The boolean array *must* be of same length as the array axis it's indexing. Otherwise inconsistent results will be obtained which are hard to track.
data[ names== 'Bob']
# Filtering rows and indexing is possible in one shot.
data[names == 'Bob', 2:]
names != 'Bob'
data[~(names == 'Bob')] # Notice the ~ operator which is used for negation.
# Selecting multiple names using & and | operators. Note the standard python `and` and `or` operators do not work with boolean arrays.
mask = (names == 'Bob') | (names == 'Will')
mask
data[mask]
# Setting boolean values.
data[data < 0] = 0
data
data[ names != 'Joe'] = 7
data
| numpy/01_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # HMAC
# **ToDo**:
# - Add illustration for HMAC - Similar to [this](https://upload.wikimedia.org/wikipedia/commons/thumb/7/7f/SHAhmac.svg/1200px-SHAhmac.svg.png).
# - Add relevant resources at the end.
# ---
#
# Checksums provides control for **data integrity** but not for **authentication**, that is why they are vulnerable to [man in the middle attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack). A similar method that provides both features is called [message authentication code (MAC)](https://en.wikipedia.org/wiki/Message_authentication_code), not to be confused with [media access control address (MAC address)](https://en.wikipedia.org/wiki/MAC_address) used in communications and networking. In this material MAC will always refer to the former.
#
# MACs provides both **integrity** and **authentication** using a **key**. There are different ways to implement this mechanism:
#
# - Using cryptographic hashes (HMAC)
# - Using [block ciphers](https://en.wikipedia.org/wiki/Block_cipher) ([OMAC](https://en.wikipedia.org/wiki/One-key_MAC), [CCM](https://en.wikipedia.org/wiki/CCM_mode), [GCM](https://en.wikipedia.org/wiki/Galois/Counter_Mode), and [PMAC](https://en.wikipedia.org/wiki/PMAC_(cryptography)))
# - Using [universal hashes](https://en.wikipedia.org/wiki/Universal_hashing) ([UMAC](https://en.wikipedia.org/wiki/UMAC)-[VMAC](https://en.wikipedia.org/wiki/VMAC?oldformat=true) and [Poly1305-AES](https://en.wikipedia.org/wiki/Poly1305))
#
# This chapter will cover the usage of [HMAC](https://en.wikipedia.org/wiki/HMAC). Block ciphers, in particular AES, will be covered in following chapter.
#
# As with hashes, HMACs are non-reversible and only with the secret key can one produce the same hash for a given message.
#
# Since HMAC is a procedure, it does not rely on any specific hash function. Therefore different hash functions can be used, to specify exactly which, the name of the hash function is usually stated. For instance, if using SHA256 the HMAC will be called HMAC-SHA256 or HSHA256.
# + [markdown] tags=[]
# ## HMAC != Hashes
# + [markdown] tags=[]
# Hashes were shown to be a method to *hide* information such as passwords in a secure way, on the contrary, when using HMACs, the message is **completely visible / public**. The objective of HMACs is not to hide a message but to provide a way to the receipient to verify if the message has been sent by the intended party.
#
# It should be noted that **anyone** with the secret key can produce messages so the secret key should be exchange by a secure channel beforehand. If someone receives the message and does not have the secret key, they will be able to see the message but they will not be able to verify if its coming for the intended source.
# -
# ## HMAC != Hashes + Salt (+ Pepper)
# HMAC is a method that combines a **secret key** and the original message in such a way that, if changing the key, the message will also change. At first, it may seem that this key is analogous to a salt or a pepper when hashing but the main difference is that the key is not simply appended/preppended to the message but rather mixed in a cryptographically secure way.
#
# Moreover salts are public whereas HMAC keys are not and pepper could be re-discovered whereas HMAC should not.
# + [markdown] tags=[]
# ## HMAC != Checksums
# + [markdown] tags=[]
# While both checksums and HMACs provides integrity, the main difference is that checksums has no password/key protection and hence a given input will always have the same output, whereas in HMACs the output depends on both the input and the password/key.
# -
# ## Generating HMAC
# Python has the [`hmac` module](https://docs.python.org/3/library/hmac.html) as part of the standard library and can be combined with the hash functions used in previous chapters.
#
# As well as with the `hashlib` it is possible to create HMACs with a single function call or with the builder pattern.
import hmac
import hashlib
# ### Using single Function
# The single function approach will always return a bytes object, if the hex representation is desired, use any of the methods available (see [Bytes Appendix](91_Bytes.ipynb)).
# +
secret_key = b"my secret"
message = b"Hello World!"
message_hmac_bytes = hmac.digest(secret_key, message, digest=hashlib.sha256)
message_hmac = message_hmac_bytes.hex()
message_hmac
# -
# ### Using Builder Pattern
# One change in this implementation is that the parameter is called `digestmod` instead of `digest`. For the bytes output one can use the `digest` method and for the hex output the `hexdigest` method.
# +
secret_key = b"my secret"
message = b"Hello World!"
keyed_hasher = hmac.new(secret_key, digestmod=hashlib.sha256)
keyed_hasher.update(message)
message_hmac = keyed_hasher.hexdigest()
message_hmac
# -
# ## Verifying a Message
# **Note**: the `hmac.compare_digest` is exactly the same as `secrets.compare_digest`. The latter is an alias of the former.
def verify(received_message, received_hmac, key):
guess_hmac = hmac.digest(key, received_message, digest=hashlib.sha256).hex()
if not hmac.compare_digest(received_hmac, guess_hmac):
return "Mismatch, either the message has been modified or key is incorrect"
return "Match, message and the key are consistent"
# ### Altered Message
# +
received_message = b"Hello Wrold!"
received_hmac = '3262c371784a36377154bdeb0bfbfc6ebf88591a7e564dbb0c4c7ee16c273440'
guess_key = b"my secret"
print(verify(received_message, received_hmac, guess_key))
# -
# ### Wrong Key
# +
received_message = b"Hello World!"
received_hmac = '3262c371784a36377154bdeb0bfbfc6ebf88591a7e564dbb0c4c7ee16c273440'
guess_key = b"password"
print(verify(received_message, received_hmac, guess_key))
# -
# ### Correct Message and Key
# +
received_message = b"Hello World!"
received_hmac = '3262c371784a36377154bdeb0bfbfc6ebf88591a7e564dbb0c4c7ee16c273440'
guess_key = b"my secret"
print(verify(received_message, received_hmac, guess_key))
# -
# ## Conclusion
# HMACs are a sequence of bytes that can only be produced with a given input and a password/key, the input message is generally public and the HMAC serves as means of integrity, without requiring a secure channel. The drawback is that the passwrod/key itself should be delivered by a secure channel to begin with.
| chapters/05_HMAC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !open .
sklearn.datasets
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = load_boston()
print(dataset["DESCR"])
X = dataset.data
y = dataset.target
dataset.feature_names
from sklearn import linear_model
# +
#supervisado
lr = linear_model.LinearRegression()
X, y = load_boston(return_X_y=True)
lr.fit(X,y)
yhat = lr.predict(X)
plt.figure(figsize=(12,9))
plt.scatter(y,yhat,edgecolors=(0, 0, 0))
plt.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
plt.xlabel('Measured')
plt.ylabel('Predicted')
# -
df = pd.DataFrame(X,columns=dataset.feature_names)
df["Target"] = y
df.head()
# %matplotlib inline
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
x=np.array([[1,2],[1,4],[1,0],[4,2],[4,4],[4,0]])
kmeans= KMeans(n_clusters=2, random_state=0).fit(x)
kmeans.cluster_centers_
plt.scatter(x[:,0],x[:,1],c=kmeans.labels_)
from sklearn.datasets import make_blobs
X,y = make_blobs(n_samples=1000, centers=3, n_features=2,
random_state=0, cluster_std=.9)
plt.scatter(X[:,0],X[:,1])#,c=y
plt.scatter(X[:,0],X[:,1],c=y)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=0).fit(X)
plt.scatter(X[:,0],X[:,1],c=kmeans.labels_)
nx = np.array([[0, 0], [4, 3], [0, 4]])
predict= kmeans.predict(nx)
predict
plt.scatter(X[:,0],X[:,1],c=y)
plt.scatter(nx[:,0],nx[:,1],c='r')
# +
# Para descargar directamente desde una pagina
# # %load
# -
# # 2 Ejemplo <NAME>
import pandas as pd
NominasList = ["nomina_0_3000.csv","nomina_10001_.csv","nomina_3001_4000.csv","nomina_4001_4500.csv","nomina_4901_4999.csv","nomina_5001_7000.csv","nomina_7000_10000.csv"]
# +
names=["Nombre", "Dependencia", "Puesto","Percepciones", "Deducciones", "Neto"]
mi_df = lambda x: pd.read_csv(x,usecols = [1,2,3,6,7,8], names = names, header=None, skiprows=2)
df_from_each_file = (mi_df(f) for f in NominasList)
#df_from_each_file = (mi_df("Dataset/"+f) for f in NominasList) #está dentro de una carpeta
nomina = pd.concat(df_from_each_file, ignore_index = True)
# -
nomina.dtypes
nomina.plot.scatter("Percepciones","Deducciones",figsize=(12,9))
# +
X = nomina[ ["Percepciones", "Deducciones"]].values.reshape(-1,2)
k_means = KMeans(n_clusters=2).fit(X)
centroides = k_means.cluster_centers_
etiquetas = k_means.labels_
centroides, etiquetas
# +
color = ['r' if x else 'g' for x in etiquetas]
nomina.plot.scatter("Percepciones", "Deducciones", figsize=(12,9),c = color )
# -
distortions = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k)
kmeanModel.fit(X)
distortions.append(kmeanModel.inertia_)
plt.figure(figsize=(16,8))
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow ;ethod showing the optimal k')
plt.show
from sklearn.linear_model import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(10).reshape(-1,1)
y = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
model = LogisticRegression(solver='liblinear',random_state = 0).fit(x,y)
model.fit(x, y)
model = LogisticRegression(solver='liblinear', random_state=0).fit(x,y)
model.classes_
| Untitled7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#https://arxiv.org/pdf/1110.2515.pdf
# -
import subprocess
import os
import pickle
saveFile1 = ''
saveFile2 = ''
# # Compare covers using NMI
def parseAnswer(answer):
parsedAnswer = []
for line in answer:
line = line.split('\t')
if len(line) >= 2:
parsedAnswer.append(line)
for l in parsedAnswer:
l[0] = l[0].replace(':', '').replace(' ', '')
return parsedAnswer
def compareCovers(cover1, cover2, folderNMI, NMI_type='NMI<Max>'):
command = folderNMI +'onmi ' + cover1 + ' ' + cover2
p = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
answer = p.stdout.decode('ascii').split('\n')
parsedAnswer = parseAnswer(answer)
if NMI_type == 'NMI<Max>':
return parsedAnswer[0][1]
elif NMI_type == 'lfkNMI':
return parsedAnswer[1][1]
elif NMI_type == 'NMI<Sum>':
return parsedAnswer[2][1]
else:
print('Wrong NMI_type!\n')
return parsedAnswer
folderNMI = '' #nmi file
c1 = '' #cover1 file
c2 = '' #cover2 file
folderNMI +'onmi ' + c1 + ' ' + c2
compareCovers(c1,c2)
# # Compare Clusters
def openCover(file):
with open(file, 'rb') as fp:
cover = pickle.load(fp)
return cover
def openGraph(file):
with open(file, 'rb') as fp:
graph = pickle.load(fp)
return graph
def calcDegreeCentrality(g):
grau = {}
for v in g:
grau[v] = len(g[v])
#sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
return grau
def calcNodesCentrality(g1,g2):
grau = {}
for v in g1:
grau[v] = len(g1[v])
for v2 in g2:
if v2 in grau:
grau[v2] = (len(g1[v2]) + len(g2[v2])) / 2
else:
grau[v2] = len(g2[v2])
return grau
def clusterCentrality(cluster, g, nodeCentrality):
total = 0
for n in cluster:
total += nodeCentrality[n]
return total
def calcClustersCentralities(cover, g, nodeCentrality):
centralities = {}
for c,i in zip(cover, range(len(cover))):
centralities[i] = clusterCentrality(c, g, nodeCentrality)
return centralities
def comunitySimilarity(c1,c2, n1,n2,nodeCentrality, clustersCentralities1, clustersCentralities2):
similarity = 0
for n in c1:
if n in c2:
#similarity += 1
similarity += nodeCentrality[n]
#return similarity / max(len(c1), len(c2))
return [similarity / max(clustersCentralities1[n1], clustersCentralities2[n2]), similarity / min(clustersCentralities1[n1], clustersCentralities2[n2])]
def bestComunitySimilarity(comunity, cover1, nodeCentrality, clustersCentralities1, clustersCentralities2):
higherSimilarity = -1
nCluster = -1
for c, i in zip(cover1, range(len(cover1))):
similarity = comunitySimilarity(c, comunity, nodeCentrality, clustersCentralities1, clustersCentralities2)
if similarity > higherSimilarity:
higherSimilarity = similarity
nCluster = i
return higherSimilarity, nCluster
def coverSimilarities(cover1, cover2, nodeCentrality, clustersCentralities1, clustersCentralities2, sizeThreshold=10):
all_similarities = []
for c1, n1 in zip(cover1, range(len(cover1))):
if len(c1) >= sizeThreshold:
local_similarities = []
for c2, n2 in zip(cover2, range(len(cover2))):
if len(c2) >= sizeThreshold:
local_similarities.append(comunitySimilarity(c1,c2,n1,n2, nodeCentrality, clustersCentralities1, clustersCentralities2))
else:
local_similarities.append([0,0])
all_similarities.append(local_similarities)
else:
local_similarities = []
for c2 in cover2:
local_similarities.append([0,0])
all_similarities.append(local_similarities)
return all_similarities
def compareCovers(all_similarities, threshold):
similar_clusters = []
for c1 in range(len(all_similarities)):
for c2 in range(len(all_similarities[c1])):
if all_similarities[c1][c2][0] >= threshold:
#if [c2,c1,all_similarities[c1][c2]] not in similar_clusters:
similar_clusters.append([c1,c2,all_similarities[c1][c2][0]])
return similar_clusters
cover1 = openCover('') #cover1 pickle file
cover2 = openCover('') #cover2 pickle file
g1 = openGraph('') #networkx graph1 pickle file
g2 = openGraph('') #networkx graph1 pickle file
nodeCentrality = calcNodesCentrality(g1,g2)
c1Centralities = calcClustersCentralities(cover1, g1, nodeCentrality)
c2Centralities = calcClustersCentralities(cover2, g2, nodeCentrality)
all_similarities = coverSimilarities(cover1, cover2, nodeCentrality, c1Centralities, c2Centralities, sizeThreshold=10)
similarity_threshold = 0.4
similar_clusters = compareCovers(all_similarities, similarity_threshold)
len(similar_clusters)
# ### Similar CLusters
similar_clusters
# ### Cluster y from cover 1
# +
lista = []
y = 26
for n in cover1[y]:
lista.append([g1.nodes()[n]['peso'], n])
sorted(lista, key = lambda x: x[1], reverse=True)
# -
# ### Cluster y from cover 2
# +
lista = []
y = 8
for n in cover2[y]:
lista.append([dictionaryCodeMerged2[n], g2.nodes()[n]['peso'], n])
sorted(lista, key = lambda x: x[1], reverse=True)
# -
# ### Nodes that belong to Clusters i of Cover 1 and j from Cover 2
# +
i = 12
j = 19
lista_iguais = []
for n in cover1[i]:
if n in cover2[j]:
lista_iguais.append([dictionaryCodeMerged2[n], g1.nodes()[n]['peso'], g2.nodes()[n]['peso']])
sorted(lista_iguais, key = lambda x: x[1], reverse=True)
# -
# # View in Cytoscape
import networkx as nx
from py2cytoscape import cyrest
from py2cytoscape import util as cy
from py2cytoscape.data.cyrest_client import CyRestClient
from IPython.display import Image
c1 = cover1[14]
c2 = cover1[19]
c3 = cover2[5]
# +
#comparison of two clusters from different covers
new_graph = nx.Graph()
for n in c1:
if n in c2:
new_graph.add_node(n, peso=g1.nodes()[n]['peso']+g2.nodes()[n]['peso'], clusters=3, dicionario=dictionaryCodeMerged1[n])
else:
new_graph.add_node(n, peso=g1.nodes()[n]['peso'], clusters=1, dicionario=dictionaryCodeMerged1[n])
for n in c2:
if n not in c1:
new_graph.add_node(n, peso=g2.nodes()[n]['peso'], clusters=2, dicionario=dictionaryCodeMerged2[n])
for e in g1.edges():
if new_graph.has_node(e[0]) and new_graph.has_node(e[1]):
if e in g2.edges():
new_graph.add_edge(e[0], e[1], weight= g1[e[0]][e[1]]['weight'] + g2[e[0]][e[1]]['weight'])
else:
new_graph.add_edge(e[0], e[1], weight= g1[e[0]][e[1]]['weight'])
for e in g2.edges():
if new_graph.has_node(e[0]) and new_graph.has_node(e[1]):
if e not in g1.edges():
new_graph.add_edge(e[0], e[1], weight= g2[e[0]][e[1]]['weight'])
# +
#evolution of two clusters into one cluster
new_graph = nx.Graph()
count = 0
dicionario1={}
dicionario2={}
dicionario3={}
for n in c1:
new_graph.add_node(count, code=n, peso=g1.nodes()[n]['peso'], clusters=1, dicionario=dictionaryCodeMerged1[n])
dicionario1[n] = count
count += 1
for n in c2:
new_graph.add_node(count, code=n, peso=g1.nodes()[n]['peso'], clusters=2, dicionario=dictionaryCodeMerged1[n])
dicionario2[n] = count
count += 1
for n in c3:
new_graph.add_node(count, code=n, peso=g2.nodes()[n]['peso'], clusters=3, dicionario=dictionaryCodeMerged2[n])
dicionario3[n] = count
count += 1
for n in c1:
if n in c2:
new_graph.add_edge(dicionario1[n], dicionario2[n])
if n in c3:
new_graph.add_edge(dicionario1[n], dicionario3[n])
for n in c2:
if n in c3:
new_graph.add_edge(dicionario2[n], dicionario3[n])
# -
#nx.write_gml(new_graph, saveFile1 + 'cls_12_19.gml')
nx.write_gml(new_graph, saveFile1 +'cls_14,19_5.gml')
cytoscape=cyrest.cyclient()
cyjs = CyRestClient()
plot = cyjs.network.create_from_networkx(new_graph)
#create groups
for i in range(1,4):
group = ''
for n in new_graph.nodes():
if new_graph.nodes()[n]['clusters'] == i:
group += 'name:' + str(n) + ','
group = group[:-1]
cytoscape.group.create(nodeList=group, groupName='group'+str(i))
dictionaryCodeMerged2['bn:00107646a']
for n in new_graph.nodes():
print( new_graph.nodes()[n]['clusters'])
| compareCovers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
import numpy as np
import pandas as pd
import pysubgroup as ps
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'sd-4sql\\packages'))
saved_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))),'Data\\saved-data\\')
from sd_analysis import *
from subgroup_discovery import *
from sd_postprocessing import *
import matplotlib.pyplot as plt
import matplotlib
import warnings
warnings.filterwarnings("ignore")
# ### Table = mvtrealise
queries = pd.read_csv(saved_path + 'dataset-d2.csv')
# ## Use cases 1 : Execution TIME
# #### Support
result_supp = sd_binary_table (queries, table = table, _target = 'time_disc', mesure = 'Support', _depth = 1,
threshold = 10000, result_size = 100, algorithm = 'Beam Search', _beam_width = 100)
res_supp = result_supp.to_dataframe()
# #### Lift
result_lift = sd_binary_table (queries, table = table, _target = 'time_disc', mesure = 'Lift', _depth = 1,
threshold = 10000, result_size = 100, algorithm = 'Beam Search', _beam_width = 100)
res_lift = result_lift.to_dataframe()
# #### WRAcc
result_wracc = sd_binary_table (queries, table = table, _target = 'time_disc', mesure = 'WRAcc', _depth = 1,
threshold = 10000, result_size = 100, algorithm = 'Beam Search', _beam_width = 100)
res_wracc = result_wracc.to_dataframe()
# #### Binomial
result_binomial = sd_binary_table (queries, table = table, _target = 'time_disc', mesure = 'Binomial', _depth = 1,
threshold = 10000, result_size = 100, algorithm = 'Beam Search', _beam_width = 100)
res_binomial = result_binomial.to_dataframe()
# #### Post-processing
plot_sgbars(res_lift, 10, ylabel="target share", title="Discovered Subgroups", dynamic_widths=False, _suffix="")
plot_npspace(res_lift, 10, queries, annotate=True, fixed_limits=False)
d, d_names, sg_names = greedy_jaccard(result_lift.to_descriptions(),10, queries, 0.8)
for sg in d_names.keys() :
print(sg)
similarity_sgs(result_lift.to_descriptions(), 10, queries, color=True)
indices = similarity_dendrogram(result_lift.to_descriptions(), 20, queries,truncated = True, p = 5)
res_raf = res_lift[res_lift.index.isin(indices)]
plot_sgbars(res_raf, res_raf.shape[0], ylabel="target share", title="Discovered Subgroups",
dynamic_widths=False, _suffix="")
plot_npspace(res_raf, res_raf.shape[0], queries, annotate=True, fixed_limits=False)
| Code/sd-4sql/notebooks/.ipynb_checkpoints/use-case1-d2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Nível de escolaridade dos eleitores na Eleição de 2020
# ### Importação das biblioteas Pandas e Matplotlib
import pandas as pd
import matplotlib.pyplot as plt
# ### Seleção das colunas que serão usadas para a análise
colunasSelecionadas = ['HH_GERACAO', 'SG_UF', 'NM_MUNICIPIO', 'CD_MUNICIPIO', 'NR_ZONA',
'DS_GENERO', 'DS_FAIXA_ETARIA', 'DS_GRAU_ESCOLARIDADE',
'QT_ELEITORES_PERFIL']
# ### Leitura do arquivo CSV
dados_2020 = pd.read_csv('Dados/perfil_eleitorado_2020.csv', sep=';', encoding='latin-1', usecols=colunasSelecionadas)
# #### Tabela com os dados já lidos e selecionados
dados_2020
# ### Selecionando o grau de escolaridade do Brasil e das principais cidades do país
# +
# grau de escolaridade do pais e grau de escolaridade das cidades.
# comparamos o grau d escoalridade dos municipios e comparamos com a qunadidde de analfabetos
escolaridade_Brasil_2020 = dados_2020.groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
escolaridade_SP2020 = dados_2020.query("NM_MUNICIPIO == 'SÃO PAULO'").groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
escolaridade_RJ2020 = dados_2020.query("NM_MUNICIPIO == 'RIO DE JANEIRO'").groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
escolaridade_FO2020 = dados_2020.query("NM_MUNICIPIO == 'FORTALEZA'").groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
escolaridade_BH2020 = dados_2020.query("NM_MUNICIPIO == 'BELO HORIZONTE'").groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
escolaridade_MA2020 = dados_2020.query("NM_MUNICIPIO == 'MACEIÓ'").groupby('DS_GRAU_ESCOLARIDADE').QT_ELEITORES_PERFIL.sum()
analfabeto_municipios = dados_2020.query("DS_GRAU_ESCOLARIDADE == 'ANALFABETO'").groupby('NM_MUNICIPIO').QT_ELEITORES_PERFIL.sum().sort_values(ascending=False).head()
# -
# ### Gráfico mostrando o grau de escolaridade dos eleitores no Brasil
# +
# Grafico mostrando o grua de escolaridade dos eleitores brasileiros
x = escolaridade_Brasil_2020.keys()
y = escolaridade_Brasil_2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas (valor em milhões)')
plt.bar(x, y, label="2020", color="#6959CD", width=0.4)
plt.rcParams['figure.figsize'] = (30,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando os municípios com os maiores índices de eleitores analfabetos
# Vale destacar que esse índice é proporcional ao número de habitantes nos múnicípios
# +
# qtdd de eleitores analfabetos nos principais municipios
x = analfabeto_municipios.keys()
y = analfabeto_municipios.to_list()
plt.title('Municipios com o maior número de eleitores analfábetos')
plt.xlabel('Municipios')
plt.ylabel('Total de pessoas')
plt.bar(x, y, label="2020", color="#8FBC8F", width=0.4)
plt.rcParams['figure.figsize'] = (30,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando o índice de escolaridade dos eleitores na cidade de São Paulo-SP
# +
x = escolaridade_SP2020.keys()
y = escolaridade_SP2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros do municipio de São Paulo')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas (valor em milhões)')
plt.bar(x, y, label="2020", color="#0000CD", width=0.4)
plt.rcParams['figure.figsize'] = (30,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando o índice de escolaridade dos eleitores na cidade do Rio de Janeiro-RJ
# +
x = escolaridade_RJ2020.keys()
y = escolaridade_RJ2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros do municipio do Rio de janeiro')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas (valor em milhões)')
plt.bar(x, y, label="2020", color="#2F4F4F", width=0.4)
plt.rcParams['figure.figsize'] = (32,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando o índice de escolaridade dos eleitores na cidade de Fortaleza-CE
# +
x = escolaridade_FO2020.keys()
y = escolaridade_FO2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros do municio de Fortaleza')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas')
plt.bar(x, y, label="2020", color="#8B4513", width=0.4)
plt.rcParams['figure.figsize'] = (32,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando o índice de escolaridade dos eleitores na cidade de Belo Horizonte-MG
# +
x = escolaridade_BH2020.keys()
y = escolaridade_BH2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros do municipio de Belo Horizonte')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas')
plt.bar(x, y, label="2020", color="#FF00FF", width=0.4)
plt.rcParams['figure.figsize'] = (32,10)
plt.grid(True)
plt.show()
# -
# ### Gráfico mostrando o índice de escolaridade dos eleitores na cidade de Maceió-AL
# +
x = escolaridade_MA2020.keys()
y = escolaridade_MA2020.to_list()
plt.title('Grau de escolaridade dos eleitores brasileiros do municipio de Maceió')
plt.xlabel('Escolaridade')
plt.ylabel('Total de pessoas')
plt.bar(x, y, label="2020", color="#FFFF00", width=0.4)
plt.rcParams['figure.figsize'] = (32,10)
plt.grid(True)
plt.show()
# -
| analise_dados_eleitorais.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/luisdiaz1997/Python-Lessons/blob/master/Python_Workshop_Week1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8ZG3RSMEG_QU" colab_type="text"
# # PACKAGES
#
# There are tools you can download into your code to implement. Think of these like your toolbox, to create plots, analyze data, and
# + id="RAKk3_pBjh4E" colab_type="code" colab={}
# + [markdown] id="HRi-8zH1HC2M" colab_type="text"
# # IMPORT DATA
#
# Pd. - pandas : used for data manipulation and analysis
#
# pd.read_csv({dataframe})
#
# variables : a way to name your values to refer to them
# + id="-dTA8yZkkPKd" colab_type="code" colab={}
url_link = "https://raw.githubusercontent.com/luisdiaz1997/Python-Lessons/master/IMDB-Movie-Data.csv"
# + [markdown] id="jseXO1XqZ42H" colab_type="text"
# # Google
#
# how can you view the first five lines of the dataframe?
# + id="rE5D09WCXdos" colab_type="code" colab={}
# + id="LonoOeLIH_it" colab_type="code" colab={}
# + id="OShBVCGpH_ob" colab_type="code" colab={}
# + [markdown] id="5I1ACp7VXgCp" colab_type="text"
# ## How we can access rows
# + id="80tX4SoUH7DD" colab_type="code" colab={}
# + id="5-z1XffTH7Li" colab_type="code" colab={}
# + [markdown] id="3Q7xuTZ5m7N7" colab_type="text"
# ## How we can access multiple rows?
# + id="Twnz7qjuH7lA" colab_type="code" colab={}
# + [markdown] id="1CZUhL_CjT7l" colab_type="text"
# ##Accessing Columns
# + id="jHMPmhinH8K5" colab_type="code" colab={}
# + [markdown] id="pceAut44C9UY" colab_type="text"
# ## Get the movies released 2015
# + id="vZTCXyCGQT3x" colab_type="code" colab={}
# + [markdown] id="b4l7-tCtLOd9" colab_type="text"
# ##Get the movies with a rating higher than 8
# + id="Q1sl32MFjeHY" colab_type="code" colab={}
# + id="L7BgXQgejePQ" colab_type="code" colab={}
# + [markdown] id="fH1akRxwJEzF" colab_type="text"
# ## Get the movies with a rating higher than 7 and released 2014
# + id="clprus-tJEAe" colab_type="code" colab={}
# + [markdown] id="xR7S_rhHkC8D" colab_type="text"
# #Some useful numpy functions
# + id="VXHejzoTky4b" colab_type="code" colab={}
# + id="pGFJBokVky_q" colab_type="code" colab={}
# + [markdown] id="5ZFjmhtokyQv" colab_type="text"
# ##What was the maximum length of movies?
# + id="HYPcnhKRkJNL" colab_type="code" colab={}
# + [markdown] id="Yj09fzuJL1rT" colab_type="text"
# ## What was the maximum duration of the movies in 2014?
# + id="gXGMtrNpj0Wj" colab_type="code" colab={}
# + id="EjFkbkLcj0ha" colab_type="code" colab={}
# + [markdown] id="onQ4cHuYlcZW" colab_type="text"
# ##What was the average duration of movies in 2015?
# + id="1x1lrOjalkoO" colab_type="code" colab={}
# + id="LFNHNqQYlk88" colab_type="code" colab={}
# + [markdown] id="8eJhxWzvjora" colab_type="text"
# #Challenge Question
#
# ##In the year 2016, what was the average rating of movies?
# + id="jRRfmayyH5km" colab_type="code" colab={}
# + id="ms6CBUD8H5tC" colab_type="code" colab={}
# + [markdown] id="eUCEUH-dzoXO" colab_type="text"
# ##unique function
# + id="Ilv-bjkSIB1E" colab_type="code" colab={}
# + id="uiQ-UR4AICVO" colab_type="code" colab={}
# + id="MUI2DEo4mxhV" colab_type="code" colab={}
# + [markdown] id="vHt7cLs1IDcR" colab_type="text"
# ## For loop
# + id="O0FMMBbZLN4f" colab_type="code" colab={}
# + id="A3Bn-733LOHx" colab_type="code" colab={}
# + [markdown] id="fZHGtk8aLCNI" colab_type="text"
# ## What year had best average rating?
# + id="913igq4GK-xc" colab_type="code" colab={}
# + [markdown] id="LBnr18bFLQrf" colab_type="text"
# #Challenge 2
# ##What year had the the biggest average length of movies?
# + id="YSpMGYHsLf5O" colab_type="code" colab={}
| Python_Workshop_Week1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Read data from csv file
# Log order: Score, RMSD, minRMSD, Ads, Energy
import os, csv
rows = []
csvfile = os.path.join('assignment', 'niz-uptake.csv')
with open(csvfile) as fileobj:
reader = csv.reader(fileobj)
for row in reader:
rows.append(row)
# ### Arrange data
headers = ['mof', 'metal', 'niz', 'err', 'vp', 'vf', 'ro', 'sa', 'dia']
data = {h: [i[idx] for i in rows[1:]] for idx, h in enumerate(headers)}
# Convert numerical values to float (csvreader reads as string by default)
keys = ['niz', '<KEY> 'ro', 'sa', 'dia']
for k in keys:
data[k] = [float(i) for i in data[k]]
# ### Plotting the data
import os
from bokeh.plotting import figure, show
from bokeh.plotting import output_notebook
from bokeh.models import HoverTool
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import output_file
img_dir = '/home/kutay/Documents/git/visualization/sciviscomm/assignment/mofs'
# img_dir = 'assets/img/mofs'
data['images'] = [os.path.join(img_dir, '%s.svg' % i) for i in data['mof']]
hover = HoverTool(
tooltips="""
<div>
<img
src="@images" height="180" alt="@images" width="200"
style="float: left; margin: 0px 0px 0px 0px;"
></img>
<div>
<span style="font-size: 17px; font-weight: bold;">@mof</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Metal: </span>
<span style="font-size: 14px; color: #f44283;">@metal</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">V<sub>f</sub>: </span>
<span style="font-size: 14px; color: #f44283;">@vf</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Surface Area: </span>
<span style="font-size: 14px; color: #f44283;">@sa</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Pore diameter: </span>
<span style="font-size: 14px; color: #f44283;">@dia</span>
</div>
</div>
"""
)
# +
p = figure(plot_width=1000, plot_height=600,
title='Pyrazinamide Uptake (g NIZ/ g MOF)',
tools=[hover, "pan", "wheel_zoom", "box_zoom", "reset", "tap"],
toolbar_location='right')
p.xaxis.axis_label = 'Pore Volume (cm³/g)'
p.yaxis.axis_label = 'Isoniazid uptake (g PZA/ g MOF)'
source = ColumnDataSource(data=data)
p.circle('vp', 'niz',
source=source,
size=18,
fill_color='salmon',
fill_alpha=0.5,
line_color="darkred",
nonselection_fill_color="salmon",
nonselection_line_color="darkred",
nonselection_fill_alpha=0.5,
nonselection_line_alpha=1.0,
selection_fill_alpha=0.5,
selection_fill_color="blue",
selection_line_color="darkred")
p.ygrid.grid_line_alpha = 0.2
p.xgrid.grid_line_color = None
show(p)
output_notebook()
output_file('test.html')
# -
# ### Embed html
# +
from bokeh.resources import CDN
from bokeh.embed import file_html
html = file_html(p, CDN, "PZA uptake")
with open('pza.html', 'w') as html_file:
html_file.write(html)
# -
| sciviscomm/interactive-plotting-niz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: FinBERT
# language: python
# name: finbert
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hawc2/Text-Analysis-with-Python/blob/master/FinBERT_Training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8m1dOEyQtatw"
# # FinBERT Notebook for <NAME>'s research
#
# This notebooks shows how to train and use the FinBERT pre-trained language model for financial sentiment analysis.
#
# It was not designed to be used with Google Colab, so alterations listed below need to be made. I've finished some preliminary setup for installing packages and downloading data/models, but it isn't finished and there may be a better approach.
#
# # Relevant Resources
#
# Github Repo: https://github.com/ProsusAI/finBERT
#
# # Steps to Set-Up
#
# I've gotten pretty far, but I've encountered a couple confusions.
#
# The Models section in Github is not clear about where to put which models, and which config.json file to use. The main error I keep getting, which worries me may be a development issue, is the following, from the Configuring Training Parameters section:
#
# Can't load config for '/content/models/language_model/finbertTRC2'. Make sure that:
#
# - '/content/models/language_model/finbertTRC2' is a correct model identifier listed on 'https://huggingface.co/models'
#
# - or '/content/models/language_model/finbertTRC2' is the correct path to a directory containing a config.json file
#
#
# # Contacting Developer
#
# It may be worth contacting the developer and ask them for help setting up a Colab environment for this script
#
# + id="JzpSlmyLgFHI"
# + [markdown] id="vHvHaVPxUiMH"
# # Set-Up Colab Notebook
# + id="1Ede4QjEUCzT" colab={"base_uri": "https://localhost:8080/"} outputId="e34807f9-ed28-42f9-e7ba-ce46230f77fb"
# !pip install oblib
# !pip install scikit-learn
# !pip install spacy
# !pip install torch
# !pip install textblob
# !pip install transformers
# + id="XiIAA1D6Ukck" colab={"base_uri": "https://localhost:8080/"} outputId="625a050a-95de-4440-b99d-5dd9dac1ded1"
# !git clone https://github.com/ProsusAI/finBERT
# %cd finBERT
# + id="OaVJYNsAtat6" colab={"base_uri": "https://localhost:8080/"} outputId="c78f7a16-3bfd-42fc-95d2-fde7b783e61d"
from pathlib import Path
import shutil
import os
import logging
import sys
sys.path.append('..')
from textblob import TextBlob
from pprint import pprint
from sklearn.metrics import classification_report
from transformers import AutoModelForSequenceClassification
from finbert.finbert import *
import finbert.utils as tools
# %load_ext autoreload
# %autoreload 2
project_dir = Path.cwd().parent
pd.set_option('max_colwidth', -1)
# + [markdown] id="ZrGrvtgLik83"
# # Import Models and Datasets
# + id="YYFJNim8XdKl" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4029848a-61fc-461d-ba94-27ce0d7a7f16"
# !mkdir models
# !mkdir models/sentiment/
# !mkdir models/language_model/
# !mkdir models/classifier_model/
# !mkdir data
# !mkdir data/sentiment_data
# + id="Iq_izOfCVTFj" colab={"base_uri": "https://localhost:8080/"} outputId="67e1573c-cb87-46ec-864d-ac309e409e44"
# !wget https://prosus-public.s3-eu-west-1.amazonaws.com/finbert/language-model/pytorch_model.bin -P /models/language_model/
# !wget https://prosus-public.s3-eu-west-1.amazonaws.com/finbert/finbert-sentiment/pytorch_model.bin -P /models/sentiment/
# !wget https://huggingface.co/ProsusAI/finbert/raw/main/config.json -P /models/language_models/
# !wget https://huggingface.co/ProsusAI/finbert/raw/main/config.json -P /models/sentiment
# + [markdown] id="1-TYixG-hfb_"
# ## Download FinancialPhraseBank Zip
#
# Wget wasn't working, not sure it can be automated
#
# Next couple cells:
# 1) Upload zip to Colab
# 2) Unzip with command
# 3) Run the datasets script on the Sentences_50Agree.txt in the unzipped folder
# + id="0rDN4UGkcV1c" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 89} outputId="5e921866-913f-4c0f-edc9-79eea68ac839"
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
# + id="63EK1oCmcUKp" colab={"base_uri": "https://localhost:8080/"} outputId="fdfcbc5e-d6c8-4b7b-e3c5-7f20e3cc7b45"
# !unzip FinancialPhraseBank-v1.0.zip
# + id="VA3P09Jjc1LQ" colab={"base_uri": "https://localhost:8080/"} outputId="df6eebd1-220a-4218-cdbd-374124cdc8c6"
# !python scripts/datasets.py --data_path /content/finBERT/FinancialPhraseBank-v1.0/Sentences_50Agree.txt
# + [markdown] id="m46GvF2Qtat5"
# ## Modules
# + id="4xpaPtqxtat8"
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.ERROR)
# + [markdown] id="VFg0Y5GKtat9"
# ## Prepare the model
# + [markdown] id="3jvTYlpmtat-"
# ### Setting path variables:
# 1. `lm_path`: the path for the pre-trained language model (If vanilla Bert is used then no need to set this one).
# 2. `cl_path`: the path where the classification model is saved.
# 3. `cl_data_path`: the path of the directory that contains the data files of `train.csv`, `validation.csv`, `test.csv`.
# ---
#
# In the initialization of `bertmodel`, we can either use the original pre-trained weights from Google by giving `bm = 'bert-base-uncased`, or our further pre-trained language model by `bm = lm_path`
#
#
# ---
# All of the configurations with the model is controlled with the `config` variable.
# + id="syfkYYUctat-"
lm_path = project_dir/'models'/'language_model'/'finbertTRC2'
cl_path = project_dir/'models'/'classifier_model'/'finbert-sentiment'
cl_data_path = project_dir/'data'/'sentiment_data'
# + [markdown] id="KTFN_fr5tat_"
# ### Configuring training parameters
# + [markdown] id="O_eTbCmqtat_"
# You can find the explanations of the training parameters in the class docsctrings.
# + id="AOe5OjBntauA" colab={"base_uri": "https://localhost:8080/", "height": 650} outputId="62344bc5-7750-4429-c434-0a068d7acd67"
# Clean the cl_path
try:
shutil.rmtree(cl_path)
except:
pass
bertmodel = AutoModelForSequenceClassification.from_pretrained(lm_path,cache_dir=None, num_labels=3)
config = Config( data_dir=cl_data_path,
bert_model=bertmodel,
num_train_epochs=4,
model_dir=cl_path,
max_seq_length = 48,
train_batch_size = 32,
learning_rate = 2e-5,
output_mode='classification',
warm_up_proportion=0.2,
local_rank=-1,
discriminate=True,
gradual_unfreeze=True)
# + [markdown] id="-IagArA0tauC"
# `finbert` is our main class that encapsulates all the functionality. The list of class labels should be given in the prepare_model method call with label_list parameter.
# + id="j0DVhx0FtauD"
finbert = FinBert(config)
finbert.base_model = 'bert-base-uncased'
finbert.config.discriminate=True
finbert.config.gradual_unfreeze=True
# + id="Ja2Ug4autauE"
finbert.prepare_model(label_list=['positive','negative','neutral'])
# + [markdown] id="kkUsGlqXtauE"
# ## Fine-tune the model
# + id="VyZQWr6ltauF"
# Get the training examples
train_data = finbert.get_data('train')
# + id="-bASZW60tauF"
model = finbert.create_the_model()
# + [markdown] id="_wCA8uYCtauF"
# ### [Optional] Fine-tune only a subset of the model
# The variable `freeze` determines the last layer (out of 12) to be freezed. You can skip this part if you want to fine-tune the whole model.
#
# <span style="color:red">Important: </span>
# Execute this step if you want a shorter training time in the expense of accuracy.
# + id="WcVpUgDStauF"
# This is for fine-tuning a subset of the model.
freeze = 6
for param in model.bert.embeddings.parameters():
param.requires_grad = False
for i in range(freeze):
for param in model.bert.encoder.layer[i].parameters():
param.requires_grad = False
# + [markdown] id="3odoFQXXtauH"
# ### Training
# + id="yU3233n1tauH"
trained_model = finbert.train(train_examples = train_data, model = model)
# + [markdown] id="fa683E3ktauI"
# ## Test the model
#
# `bert.evaluate` outputs the DataFrame, where true labels and logit values for each example is given
# + id="GnJwGjwptauI"
test_data = finbert.get_data('test')
# + id="AjAfmFAJtauJ"
results = finbert.evaluate(examples=test_data, model=trained_model)
# + [markdown] id="Ag_yjM0ytauJ"
# ### Prepare the classification report
# + id="Q39wjsUetauJ"
def report(df, cols=['label','prediction','logits']):
#print('Validation loss:{0:.2f}'.format(metrics['best_validation_loss']))
cs = CrossEntropyLoss(weight=finbert.class_weights)
loss = cs(torch.tensor(list(df[cols[2]])),torch.tensor(list(df[cols[0]])))
print("Loss:{0:.2f}".format(loss))
print("Accuracy:{0:.2f}".format((df[cols[0]] == df[cols[1]]).sum() / df.shape[0]) )
print("\nClassification Report:")
print(classification_report(df[cols[0]], df[cols[1]]))
# + id="YDzFK1NptauJ"
results['prediction'] = results.predictions.apply(lambda x: np.argmax(x,axis=0))
# + id="M5zqzpWKtauK"
report(results,cols=['labels','prediction','predictions'])
# + [markdown] id="DpVTBz_htauK"
# ### Get predictions
# + [markdown] id="Mbc9wkm1tauL"
# With the `predict` function, given a piece of text, we split it into a list of sentences and then predict sentiment for each sentence. The output is written into a dataframe. Predictions are represented in three different columns:
#
# 1) `logit`: probabilities for each class
#
# 2) `prediction`: predicted label
#
# 3) `sentiment_score`: sentiment score calculated as: probability of positive - probability of negative
#
# Below we analyze a paragraph taken out of [this](https://www.economist.com/finance-and-economics/2019/01/03/a-profit-warning-from-apple-jolts-markets) article from The Economist. For comparison purposes, we also put the sentiments predicted with TextBlob.
# > Later that day Apple said it was revising down its earnings expectations in the fourth quarter of 2018, largely because of lower sales and signs of economic weakness in China. The news rapidly infected financial markets. Apple’s share price fell by around 7% in after-hours trading and the decline was extended to more than 10% when the market opened. The dollar fell by 3.7% against the yen in a matter of minutes after the announcement, before rapidly recovering some ground. Asian stockmarkets closed down on January 3rd and European ones opened lower. Yields on government bonds fell as investors fled to the traditional haven in a market storm.
# + id="_6pz52lvtauL"
text = "Later that day Apple said it was revising down its earnings expectations in \
the fourth quarter of 2018, largely because of lower sales and signs of economic weakness in China. \
The news rapidly infected financial markets. Apple’s share price fell by around 7% in after-hours \
trading and the decline was extended to more than 10% when the market opened. The dollar fell \
by 3.7% against the yen in a matter of minutes after the announcement, before rapidly recovering \
some ground. Asian stockmarkets closed down on January 3rd and European ones opened lower. \
Yields on government bonds fell as investors fled to the traditional haven in a market storm."
# + id="eFX-TveytauM"
cl_path = project_dir/'models'/'classifier_model'/'finbert-sentiment'
model = AutoModelForSequenceClassification.from_pretrained(cl_path, cache_dir=None, num_labels=3)
# + id="wQzDgiPYtauM"
import nltk
nltk.download('punkt')
# + id="oLCX5nvVtauM"
result = predict(text,model)
# + id="2Joh8C5-tauN"
blob = TextBlob(text)
result['textblob_prediction'] = [sentence.sentiment.polarity for sentence in blob.sentences]
result
# + id="vgMe0WmKtauO"
print(f'Average sentiment is %.2f.' % (result.sentiment_score.mean()))
# + [markdown] id="pfTMS2hAtauO"
# Here is another example
# + id="n4zMLJl5tauO"
text2 = "Shares in the spin-off of South African e-commerce group Naspers surged more than 25% \
in the first minutes of their market debut in Amsterdam on Wednesday. <NAME>, CEO of \
Naspers and Prosus Group poses at Amsterdam's stock exchange, as Prosus begins trading on the \
Euronext stock exchange in Amsterdam, Netherlands, September 11, 2019. REUTERS/Piroschka van de Wouw \
Prosus comprises Naspers’ global empire of consumer internet assets, with the jewel in the crown a \
31% stake in Chinese tech titan Tencent. There is 'way more demand than is even available, so that’s \
good,' said the CEO of Euronext Amsterdam, <NAME> Tilburg. 'It’s going to be an interesting \
hour of trade after opening this morning.' Euronext had given an indicative price of 58.70 euros \
per share for Prosus, implying a market value of 95.3 billion euros ($105 billion). The shares \
jumped to 76 euros on opening and were trading at 75 euros at 0719 GMT."
# + id="PGXrzZNrtauP"
result2 = predict(text2,model)
blob = TextBlob(text2)
result2['textblob_prediction'] = [sentence.sentiment.polarity for sentence in blob.sentences]
# + id="81AyGPmetauP"
result2
# + id="z7i3hGk8tauP"
print(f'Average sentiment is %.2f.' % (result2.sentiment_score.mean()))
# + id="0iKql9cstauP"
| FinBERT_Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tasbirul/Deep-Learning-With-Keras/blob/master/06_CNN_With_CIFAR-10.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run Now in Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tasbirul/Deep-Learning-With-Keras/blob/master/06_CNN_With_CIFAR-10.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# <br>
import keras
keras.__version__
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
batch_size = 32
num_classes = 10
epochs = 100
num_predictions = 20
# +
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# -
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# +
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# -
model.summary()
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test Accuracy:', test_acc * 100 ,"%")
pre = model.predict(x_test)
# +
import numpy as np
import matplotlib.pyplot as plt
def test_result(point):
plt.imshow(x_test[point])
plt.show()
print(np.argmax(pre[point]))
print(y_test[point])
test_result(5)
| Keras/06_CNN_With_CIFAR-10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from time import time
from utils import make_batch
from models import WaveNet, Generator
from IPython.display import Audio
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
# %matplotlib inline
Audio('./voice_train_cut.wav')
x, sr = librosa.load('./voice_train.wav')
librosa.display.waveplot(x, sr=sr)
x, sr = librosa.load('./voice_train_cut.wav')
librosa.display.waveplot(x, sr=sr)
inputs, targets = make_batch('./voice_train_cut.wav')
# output_path = './output.wav'
num_time_samples = inputs.shape[1]
num_channels = 1
gpu_fraction = 1
# sample_rate = 44100
sample_rate = 32000
inputs.shape
librosa.display.waveplot(inputs[0, :, 0], sr=44100)
model = WaveNet(num_time_samples=num_time_samples, num_channels=num_channels, gpu_fraction=gpu_fraction)
# +
# model = WaveNet(num_time_samples=num_time_samples, num_channels=num_channels, gpu_fraction=gpu_fraction)
# print('inputs.shape = ', inputs.shape)
# Audio(inputs.reshape(inputs.shape[1]), rate=44100)
# print('inputs.shape = ', inputs.shape)
# print('targets.shape = ', targets.shape)
tic = time()
model.test(inputs, targets)
toc = time()
print('Training time = {} seconds'.format(toc-tic))
generator = Generator(model)
input_ = inputs[:, 0:1, 0]
tic = time()
predictions = generator.run(input_, sample_rate)
# -
Audio(predictions, rate=44100)
librosa.display.waveplot(predictions[0, :], sr=44100)
librosa.display.waveplot(predictions[0, :], sr=sample_rate)
X = librosa.stft(predictions[0, :])
Xdb = librosa.amplitude_to_db(abs(X))
librosa.display.specshow(Xdb, sr=sample_rate, x_axis='time', y_axis='hz')
| demo/demo_test_cut_predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
import time
import numpy as np
import pandas as pd
from nameparser import HumanName
from genderize import Genderize
from data.namsor_api import NamSorApi
# + pycharm={"name": "#%%\n", "is_executing": false}
df_author = pd.read_csv('../processed/author.csv')
df_author
# + pycharm={"name": "#%%\n", "is_executing": false}
gender_list = pd.read_csv('gender_list.csv')
gender_list
# + pycharm={"name": "#%%\n", "is_executing": false}
gender_list_filtered = gender_list[(gender_list['probability'] > 0.8) & (gender_list['count'] > 1)]
gender_list_filtered
# + pycharm={"name": "#%%\n", "is_executing": false}
student_mentor = pd.read_csv('../processed/author_mentor.csv')
student_mentor
# + pycharm={"name": "#%%\n", "is_executing": false}
unique_student_first_name = student_mentor['author'].map(lambda x: HumanName(x).first).unique()
unique_mentor_first_name = student_mentor['mentor'].map(lambda x: HumanName(x).first).unique()
unique_first_name = np.unique(np.concatenate((unique_student_first_name, unique_mentor_first_name), axis=None))
len(unique_first_name)
# + pycharm={"name": "#%%\n", "is_executing": false}
# init a Genderize instance, get the list of genders
gender = Genderize(api_key='8398523e8bc59e36035174fd47c877a9')
gender_list = gender.get(unique_first_name)
df_gender = pd.DataFrame(gender_list)
df_gender.to_csv('gender_list.csv', index=None)
# + pycharm={"name": "#%%\n", "is_executing": false}
author_reduced = pd.concat([student_mentor['author'], student_mentor['mentor']], axis=0)
author_reduced = author_reduced.unique()
author_reduced = pd.Series(author_reduced)
first_name = author_reduced.map(lambda x: HumanName(x).first)
# + pycharm={"name": "#%%\n", "is_executing": false}
name = pd.concat([author_reduced, first_name], axis=1)
name.columns = ['author', 'name']
name
# + pycharm={"name": "#%%\n", "is_executing": false}
join_name = name.merge(gender_list, on='name')
join_name
# + pycharm={"name": "#%%\n", "is_executing": false}
c = 0
namesor = NamSorApi()
for index, row in join_name.iterrows():
if row['probability'] < 0.8 and index >= 550676:
try:
c += 1
if c == 3500:
break
gender = namesor.get_gender(row['author'])
join_name.loc[index, 'gender'] = gender['likelyGender']
join_name.loc[index, 'probability'] = gender['probabilityCalibrated']
except Exception:
print('boom')
time.sleep(5)
join_name
# + pycharm={"name": "#%%\n", "is_executing": false}
join_name.to_csv('author_gender.csv')
# + pycharm={"name": "#%%\n", "is_executing": false}
temp = join_name[(join_name['probability'] < 0.6) & (join_name['probability'] > 0.5)]
temp
# + pycharm={"name": "#%%\n", "is_executing": false}
temp2 = join_name[join_name['probability'] > 0.8]
temp2
# + pycharm={"name": "#%%\n", "is_executing": false}
temp3 = temp2.drop(columns=['name', 'count'])
temp3
# + pycharm={"name": "#%%\n", "is_executing": false}
temp3.to_csv('author_gender_filtered.csv', index=None)
| data/gender/dblp_gender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Make sure to give Xvfb a moment to activate after launching the container
# -
from paraview import simple as pvs
from ipyparaview.viewer import PVTimeDisplay
import os
####################
# This is the demo directory to show
result_directory = "demo-04/"
####################
data_file = "visdump_data.VisIt.xmf"
fname = os.path.join(result_directory, "sim_dump", data_file)
fname
assert os.path.isfile(fname)
data = pvs.Xdmf3ReaderS(FileName=fname)
data.UpdatePipeline()
viewer = PVTimeDisplay(data, ren_size=[800, 400])
viewer.frame
| dev/cmb/demo/post-vis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 1: Dictionary Basics
#
student = {} # empty dictionary
# +
# set some values
student['Name'] = 'Michael'
student['GPA'] = 3.4
student['Ischool'] = True
print(student)
# -
# mutable
student['GPA'] = 4.0
print(student['Name'], student['GPA'])
# +
a = {"a": "b", "x": "y", "2": 2}
print(a[2])
# -
| lessons/09-Dictionaries/WMC1-Dict-Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
# Create a GIF Movie {#gif_movie_example}
# ==================
#
# Generate a moving gif from an active plotter
#
# +
import numpy as np
import pyvista as pv
x = np.arange(-10, 10, 0.25)
y = np.arange(-10, 10, 0.25)
x, y = np.meshgrid(x, y)
r = np.sqrt(x ** 2 + y ** 2)
z = np.sin(r)
# Create and structured surface
grid = pv.StructuredGrid(x, y, z)
# Create a plotter object and set the scalars to the Z height
plotter = pv.Plotter(notebook=False, off_screen=True)
plotter.add_mesh(grid, scalars=z.ravel(), smooth_shading=True)
# Open a gif
plotter.open_gif("wave.gif")
pts = grid.points.copy()
# Update Z and write a frame for each updated position
nframe = 15
for phase in np.linspace(0, 2 * np.pi, nframe + 1)[:nframe]:
z = np.sin(r + phase)
pts[:, -1] = z.ravel()
plotter.update_coordinates(pts, render=False)
plotter.update_scalars(z.ravel(), render=False)
# must update normals when smooth shading is enabled
plotter.mesh.compute_normals(cell_normals=False, inplace=True)
plotter.render()
plotter.write_frame()
# Closes and finalizes movie
plotter.close()
| gif.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # doc2vec
#
# This is an experimental code developed by <NAME> found and [published on the word2vec Google group](https://groups.google.com/d/msg/word2vec-toolkit/Q49FIrNOQRo/J6KG8mUj45sJ).
#
# The input format for `doc2vec` is still one big text document but every line should be one document prepended with an unique id, for example:
#
# ```
# _*0 This is sentence 1
# _*1 This is sentence 2
# ```
#
# ### Requirements
#
# This notebook requires [`nltk`](http://www.nltk.org/)
#
# Download some data: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz.
#
# You could use `make test-data` from the root of the repo.
#
# ## Preprocess
#
# Merge data into one big document with an id per line and do some basic preprocessing: word tokenizer.
import os
import nltk
directories = ['train/pos', 'train/neg', 'test/pos', 'test/neg', 'train/unsup']
input_file = open('../data/alldata.txt', 'w')
id_ = 0
for directory in directories:
rootdir = os.path.join('../data/aclImdb', directory)
for subdir, dirs, files in os.walk(rootdir):
for file_ in files:
with open(os.path.join(subdir, file_), "r") as f:
doc_id = "_*%i" % id_
id_ = id_ + 1
text = f.read()
text = text
tokens = nltk.word_tokenize(text)
doc = " ".join(tokens).lower()
doc = doc.encode("ascii", "ignore")
input_file.write("%s %s\n" % (doc_id, doc))
input_file.close()
# ## doc2vec
# %load_ext autoreload
# %autoreload 2
import word2vec
word2vec.doc2vec('../data/alldata.txt', '../data/doc2vec-vectors.bin', cbow=0, size=100, window=10, negative=5,
hs=0, sample='1e-4', threads=12, iter_=20, min_count=1, binary=True, verbose=True)
# ## Prediction
#
# Is possible to load the vectors using the same wordvectors class as a regular word2vec binary file.
# %load_ext autoreload
# %autoreload 2
import word2vec
model = word2vec.load('../data/doc2vec-vectors.bin')
model.vectors.shape
# The documents vector are going to be identified by the id we used in the preprocesing section, for example document 1 is going to have vector:
model['_*1']
# We can ask for similarity words or documents on document `1`
indexes, metrics = model.similar('_*1')
model.generate_response(indexes, metrics).tolist()
# Now its we just need to matching the id to the data created on the preprocessing step
| examples/doc2vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# импортируем программные библиотеки, которые помогут нам работать со спутниковыми данными
# Основное средство - GDAL. Основной компонент работы с растровыми геоданными, который вообще везде
from osgeo import gdal
# matplotlib - самая большая и мощная библиотека визуализации данных в Python
import matplotlib.pyplot as plt
import matplotlib as mpl
# numpy - одна из главных математических библиотек Python. Быстрые вычисления матриц (а снимки - это матрицы)
import numpy as np
# Некоторые служебные команды. Так мы сможем смотреть на изображения прямо в jupyter notebook
from IPython.display import Image
# Настроим размер картинок, которые будет показывать jupyter notebook
mpl.rcParams['figure.figsize'] = (10,10)
# -
dataset = gdal.Open('Probability/lu1860_R.asc')
dataset0 = gdal.Open('C:/Users/olegz/Desktop/CLUE/Lesunovo PROJECT/Predicted/Predicted1860.asc')
Real1860=dataset.ReadAsArray()
Predicted1860=dataset0.ReadAsArray()
#Расчёт точности размещения
count=0
All=0
for i in range(0, 800):
for j in range(0, 736):
if Real1860[i][j]!=-99999:
if Real1860[i][j]==0 or Predicted1860[i][j]==0:
All+=1
Change=Real1860[i][j]-Predicted1860[i][j]
if Change==0:
count+=1
#однако у нас в All неправильно размещенные пиксели попали два раза (из-за или-или). Надо поэтому их вычесть из подсчёта
All_corrected=(All-count)/2+count
Precision=count/All_corrected
print (Precision)
#теперь общая точность (доля верно размещённых пикселей)
count=0
All=0
for i in range(0, 800):
for j in range(0, 736):
if Real1860[i][j]!=-99999:
All+=1
Change=Real1860[i][j]-Predicted1860[i][j]
if Change==0:
count+=1
Precision=count/All
print (Precision)
| For crazy ideas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1.Write a Python program to convert kilometers to miles?
#
# +
# Taking kilometers input from the user
kilometers = float(input("Enter value in kilometers: "))
# conversion factor
conv_fac = 0.621371
# calculate miles
miles = kilometers * conv_fac
print('%0.2f kilometers is equal to %0.2f miles' %(kilometers,miles))
# -
# 2. Write a Python program to convert Celsius to Fahrenheit?
celsius = float(input("Enter temperature in celsius: "))
fahrenheit = (celsius * 9/5) + 32
print('%.2f Celsius is %0.2f Fahrenheit' %(celsius, fahrenheit))
# 3. Write a Python program to display calendar?
# +
import calendar
# To take month and year input from the user
yy = int(input("Enter year: "))
mm = int(input("Enter month: "))
# display the calendar
print(calendar.month(yy, mm))
# -
# 3. Write a Python program to solve quadratic equation?
# +
import cmath
a = float(input('Enter a: '))
b = float(input('Enter b: '))
c = float(input('Enter c: '))
# calculate the discriminant
d = (b**2) - (4*a*c)
# find two solutions
sol1 = (-b-cmath.sqrt(d))/(2*a)
sol2 = (-b+cmath.sqrt(d))/(2*a)
print('The solution are {0} and {1}'.format(sol1,sol2))
# -
# 5.Write a Python program to swap two variables without temp variable?
#
#
# +
x = input("Enter x : ")
y = input("Enter y : ")
print ("Before swapping: ")
print("Value of x : ", x, " and y : ", y)
# code to swap 'x' and 'y'
x, y = y, x
print ("After swapping: ")
print("Value of x : ", x, " and y : ", y)
| Python Basic Programming/Programming_Assingment2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: imgentf2
# language: python
# name: imgentf2
# ---
# ### If run on Google Colab, uncomment cell below to install correct Tensorflow version
'''
!pip3 uninstall tensorflow
!pip3 install tensorflow_gpu==2.3.1
'''
# +
import tensorflow as tf
from tensorflow.keras import layers, Model
from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Reshape, Conv2DTranspose, MaxPooling2D, UpSampling2D, LeakyReLU
from tensorflow.keras.activations import relu
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from packaging.version import parse as parse_version
assert parse_version(tf.__version__) < parse_version("2.4.0"), \
f"Please install TensorFlow version 2.3.1 or older. Your current version is {tf.__version__}."
# -
# ## Load MNIST dataset
# +
(ds_train, ds_test_), ds_info = tfds.load('mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True)
batch_size = 256
def preprocess(image, label):
image = tf.cast(image, tf.float32)
image = image/255.
return image, image
ds_train = ds_train.map(preprocess)
ds_train = ds_train.cache() # put dataset into memory
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(batch_size)
ds_test = ds_test_.map(preprocess).batch(batch_size).cache().prefetch(batch_size)
# +
# return label for testing
def preprocess_with_label(image, label):
image = tf.cast(image, tf.float32)
image = tf.math.round(image/255.)
return image, label
ds_test_label = ds_test_.map(preprocess_with_label).batch(1000)
# -
# # Building Autoencoder
# +
def Encoder(z_dim):
inputs = layers.Input(shape=[28,28,1])
x = inputs
x = Conv2D(filters=8, kernel_size=(3,3), strides=2, padding='same', activation='relu')(x)
x = Conv2D(filters=8, kernel_size=(3,3), strides=1, padding='same', activation='relu')(x)
x = Conv2D(filters=8, kernel_size=(3,3), strides=2, padding='same', activation='relu')(x)
x = Conv2D(filters=8, kernel_size=(3,3), strides=1, padding='same', activation='relu')(x)
x = Flatten()(x)
out = Dense(z_dim)(x)
return Model(inputs=inputs, outputs=out, name='encoder')
def Decoder(z_dim):
inputs = layers.Input(shape=[z_dim])
x = inputs
x = Dense(7*7*64, activation='relu')(x)
x = Reshape((7,7,64))(x)
x = Conv2D(filters=64, kernel_size=(3,3), strides=1, padding='same', activation='relu')(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(filters=32, kernel_size=(3,3), strides=1, padding='same', activation='relu')(x)
x = UpSampling2D((2,2))(x)
out = Conv2D(filters=1, kernel_size=(3,3), strides=1, padding='same', activation='sigmoid')(x)
#return out
return Model(inputs=inputs, outputs=out, name='decoder')
class Autoencoder:
def __init__(self, z_dim):
self.encoder = Encoder(z_dim)
self.decoder = Decoder(z_dim)
model_input = self.encoder.input
model_output = self.decoder(self.encoder.output)
self.model = Model(model_input, model_output)
# -
autoencoder = Autoencoder(z_dim=10)
# +
model_path = "./models/autoencoder.h5"
os.makedirs("./models", exist_ok=True)
checkpoint = ModelCheckpoint(model_path,
monitor= "val_loss",
verbose=1,
save_best_only=True,
mode= "auto",
save_weights_only = False)
early = EarlyStopping(monitor= "val_loss",
mode= "auto",
patience = 5)
callbacks_list = [checkpoint, early]
autoencoder.model.compile(
loss = "mse",
optimizer=tf.keras.optimizers.RMSprop(learning_rate=3e-4))
#metrics=[tf.keras.losses.BinaryCrossentropy()])
autoencoder.model.fit(ds_train, validation_data=ds_test,
epochs = 100, callbacks = callbacks_list)
# -
# # Sample and Display Images
# +
images, labels = next(iter(ds_test))
autoencoder.model = load_model(model_path)
outputs = autoencoder.model.predict(images)
# Display
grid_col = 10
grid_row = 2
f, axarr = plt.subplots(grid_row, grid_col, figsize=(grid_col*1.1, grid_row))
i = 0
for row in range(0, grid_row, 2):
for col in range(grid_col):
axarr[row,col].imshow(images[i,:,:,0], cmap='gray')
axarr[row,col].axis('off')
axarr[row+1,col].imshow(outputs[i,:,:,0], cmap='gray')
axarr[row+1,col].axis('off')
i += 1
f.tight_layout(0.1, h_pad=0.2, w_pad=0.1)
plt.show()
# -
# ## Set z_dim = 2 and to look at the latent variables
# +
autoencoder_2 = Autoencoder(z_dim=2)
early = EarlyStopping(monitor= "val_loss",
mode= "auto",
patience = 5)
callbacks_list = [early]
autoencoder_2.model.compile(
loss = "mse",
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3))
autoencoder_2.model.fit(ds_train, validation_data=ds_test,
epochs = 50, callbacks = callbacks_list)
# -
images, labels = next(iter(ds_test_label))
outputs = autoencoder_2.encoder.predict(images)
plt.figure(figsize=(8,8))
plt.scatter(outputs[:,0], outputs[:,1], c=labels, cmap='RdYlBu', s=3)
plt.colorbar()
# +
z_samples = np.array([[z1, z2] for z2 in np.arange(-5, 5, 1.) for z1 in np.arange(-5, 5, 1.)])
images = autoencoder_2.decoder.predict(z_samples)
grid_col = 10
grid_row = 10
f, axarr = plt.subplots(grid_row, grid_col, figsize=(grid_col, grid_row))
i = 0
for row in range(grid_row):
for col in range(grid_col):
axarr[row,col].imshow(images[i,:,:,0], cmap='gray')
axarr[row,col].axis('off')
i += 1
f.tight_layout(0.1, h_pad=0.2, w_pad=0.1)
plt.show()
# -
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
@interact
def explore_latent_variable(z1 = (-5,5,0.1),
z2 = (-5,5,0.1)):
z_samples = [[z1, z2]]
images = autoencoder_2.decoder.predict(z_samples)
plt.figure(figsize=(2,2))
plt.imshow(images[0,:,:,0], cmap='gray')
| Chapter02/ch2_autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:planetpieces]
# language: python
# name: conda-env-planetpieces-py
# ---
# +
import os
import tensorflow as tf
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import data_tools
# -
tf.__version__
# +
filename = 'output1.tif'
data = data_tools.read_raster(filename, band=None)
# -
data[0]
data_tools.plot_me(data[0][0])
data_tools.histo_me(data[0][0])
print(np.shape(mask))
# +
fig, ax = plt.subplots(figsize=(7, 7))
ax.imshow(mask)
plt.show()
# -
# Rough threshold values for snow:
# - R > 1000, channel 2
# - G > 1000, channel 1
# - B > 1000, channel 0
# - NIR < 100, channel 3
#
# Reading in data will result in an array of the shape (4, 8000, 8000).
#
# We are going to start with the green band where snow is the most reflective.
filenames = os.listdir('/home/jovyan/data/planet_flat/')
filename_tifs = [f for f in filenames if f.endswith("Analytic_refl.tif")]
sample_files = filename_tifs[:10]
sample_files
def apply_mask(root_dir, filename, threshold=9000, band=0):
data = data_tools.read_raster(os.path.join(root_dir,filename))[0]##
# data[data==0]=-9999
mask = np.zeros(np.shape(data))
# data = np.ma.masked_equal(data, 0) #returns masked array of data != 0
mask[np.where(data > threshold)] = 1
return mask
root_dir = '/home/jovyan/data/planet_flat/'
threshold = 2000
band = 1
sample_masks = []
for file in sample_files:
mask = apply_mask(root_dir, file, threshold=threshold, band=band)
sample_masks.append(mask)
# +
fig = plt.figure(figsize=(10, 10))
columns = 5
rows = 2
for i, img in enumerate(sample_masks, 1):
fig.add_subplot(rows, columns, i)
plt.axis('off')
plt.imshow(img)
plt.tight_layout()
plt.show()
# +
sample_arr = []
for file in sample_files:
data = data_tools.read_raster(os.path.join(root_dir,file))[0]
sample_arr.append(data)
fig = plt.figure(figsize=(10, 10))
columns = 5
rows = 2
for i, img in enumerate(sample_arr, 1):
fig.add_subplot(rows, columns, i)
plt.axis('off')
plt.imshow(img)
plt.tight_layout()
plt.show()
| contributors/claire/data-exploration/read_in_rasters-claire.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit
# name: python383jvsc74a57bd0babbe98f182cd6f821de8f93c23581f96884d2c1c5b7d529efea2e82e4e3f1c4
# ---
# ## A collection of bode plot functions using trapezoidal SVF.
#
# HTML output built with: jupyter nbconvert --to html svfbode.ipynb
#
# https://cytomic.com/files/dsp/SvfLinearTrapOptimised2.pdf
from math import *
import cmath
import matplotlib.pyplot as plt
# +
def db_from_lin(gain):
return log(gain, 10.0) * 20.0
def lin_from_db(decibels):
return pow(10.0, decibels * 0.05)
# -
def svf_bode_digital(f_hz, coeffs, fs):
a, g, k, a1, a2, a3, m0, m1, m2 = coeffs
z = cmath.exp(-(pi * 2.0) * f_hz * 1.0j / fs)
gpow2 = g * g
z_n2 = z * z
denominator = (gpow2 + g*k + 1) + 2*(gpow2 - 1)*z + (gpow2 - g*k + 1)*z_n2
y = m0 + (m1 * g * (1 - z_n2) + m2 * gpow2 * (1 + 2*z + z_n2)) / denominator
return y
def plot_svf(svf_coeffs):
amplitude = []
phase = []
x = []
for i in range(20, 20000, 10):
n = svf_bode_digital(i, svf_coeffs, 96000)
amplitude.append(db_from_lin(abs(n).real))
phase.append(degrees(cmath.phase(n)))
x.append(i)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(x, amplitude)
ax1.set_ylabel("amplitude")
ax1.set_ylim([-24, 24])
ax2 = ax1.twinx()
ax2.semilogx(x, phase, "r-")
ax2.set_ylabel("phase", color="r")
ax2.set_ylim([-180, 180])
plt.xlim([20, 20000])
plt.show()
def lowpass(f0, q_value, fs):
a = 1.0
g = tan(pi * f0 / fs)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 0.0
m1 = 0.0
m2 = 1.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(lowpass(1000, 2.0, 96000))
def highpass(f0, q_value, fs):
a = 1.0
g = tan(pi * f0 / fs)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 1.0
m1 = -k
m2 = -1.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(highpass(1000, 2.0, 96000))
def bandpass(f0, q_value, fs):
a = 1.0
g = tan(pi * f0 / fs)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 0.0
m1 = 1.0
m2 = 0.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(bandpass(1000, 2.0, 96000))
def bell(f0, q_value, db_gain, fs):
a = pow(10, db_gain / 40.0)
g = tan(pi * f0 / fs)
k = 1.0 / (q_value * a)
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 1.0
m1 = k * (a * a - 1.0)
m2 = 0.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(bell(1000, 2.0, -10, 96000))
def notch(f0, q_value, fs):
a = 1
g = tan(pi * f0 / fs)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 1.0
m1 = -k
m2 = 0.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(notch(1000, 2.0, 96000))
def allpass(f0, q_value, fs):
a = 1
g = tan(pi * f0 / fs)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 1.0
m1 = -2.0 * k
m2 = 0.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(allpass(1000, 2.0, 96000))
def lowshelf(f0, q_value, db_gain, fs):
a = pow(10, db_gain / 40.0)
g = tan(pi * f0 / fs) / sqrt(a)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = 1.0
m1 = k * (a - 1.0)
m2 = a * a - 1.0
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(lowshelf(1000, 2.0, -10, 96000))
def highshelf(f0, q_value, db_gain, fs):
a = pow(10, db_gain / 40.0)
g = tan(pi * f0 / fs) * sqrt(a)
k = 1.0 / q_value
a1 = 1.0 / (1.0 + g * (g + k))
a2 = g * a1
a3 = g * a2
m0 = a * a
m1 = k * (1.0 - a) * a
m2 = 1.0 - a * a
return a, g, k, a1, a2, a3, m0, m1, m2
svf_coeffs = plot_svf(highshelf(1000, 2.0, -10, 96000))
class Filter(object):
def __init__(self, coeffs):
super().__init__()
self.coeffs = coeffs
self.ic1eq = 0.0
self.ic2eq = 0.0
def process(self, x):
a, g, k, a1, a2, a3, m0, m1, m2 = coeffs
v3 = x - self.ic2eq
v1 = a1 * self.ic1eq + a2 * v3
v2 = self.ic2eq + a2 * self.ic1eq + a3 * v3
self.ic1eq = 2.0 * v1 - self.ic1eq
self.ic2eq = 2.0 * v2 - self.ic2eq
return m0 * x + m1 * v1 + m2 * v2
| svfbode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.9 64-bit (''tcc_puc_fake_news'': venv)'
# name: python3
# ---
# +
from bs4 import BeautifulSoup
import requests
import multiprocessing as mp
import pandas as pd
import re
from nltk.stem import RSLPStemmer
from wordcloud import WordCloud
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
pd.set_option('display.max_colwidth', None)
# -
def getPage(url):
html = requests.get(url).content
soup = BeautifulSoup(html, 'html.parser')
links = soup.findAll('a')
return list(dict.fromkeys([link.get('href') for link in links if link.get('title') != None and link.get('href') != None and str(link.get('title')).startswith('#Verificamos:')]))
def get_is_fake_and_text(url):
try:
resp = requests.get(url)
if(resp.status_code != 200):
raise Exception('Error executing http call')
html = resp.content
soup = BeautifulSoup(html, 'html.parser')
div_post_inner = soup.find('div', {'class': 'post-inner'})
div_b_text = div_post_inner.findChildren('div', {'class': 'etiqueta etiqueta-7'})[0].text
is_fake = True
if div_b_text == 'VERDADEIRO':
raise Exception('Not fake')
elif div_b_text == 'FALSO':
is_fake = False
else:
raise Exception('Not parseable')
ps = div_post_inner.findAll('p', recursive=False)
p_tag = ps[2].text
return (is_fake, p_tag)
except Exception as e:
print('##### ERROR - ' + str(e))
return (None, None)
# +
base_url = "https://piaui.folha.uol.com.br/lupa/page/"
pool = mp.Pool(mp.cpu_count())
v_links = []
pages = []
for i in range(1, 366):
pages.append(base_url + str(i) + "/")
results = [pool.apply(getPage, args=((p,))) for p in pages]
pool.close()
[v_links.extend(r) for r in results]
v_links = list(dict.fromkeys(v_links))
# -
df = pd.DataFrame(v_links)
df.to_csv('generated/piaui_verified_links.csv', index=False)
# +
pool = mp.Pool(mp.cpu_count())
results_get_fake = [pool.apply(get_is_fake_and_text, args=((p,))) for p in v_links]
pool.close()
df = pd.DataFrame(results_get_fake)
df.to_csv('generated/results_get_fake.csv', index=False)
# +
df = pd.read_csv('generated/results_get_fake.csv')
df['0'] = df['0'].astype(bool)
df['1'] = df['1'].astype(str)
df = df[df['0'] == False]
df.info()
print(df['0'].value_counts())
df.head(10)
# +
remove_pontuacao = lambda x: re.sub(r'[^\w\s]', '', x)
remove_nao_citacao = lambda x: x if x.startswith('“') else ''
remove_quebra_linha = lambda x: re.sub(r'\\n', ' ', x)
remove_tabulacao = lambda x: re.sub(r'\\t', ' ', x)
remove_multiplos_espacos = lambda x: re.sub(' +', ' ', x)
get_between_markers = lambda x: '' if x.find('”') == -1 else x[x.find('“')+1: x.find('”')]
remove_little = lambda x: '' if len(x.split(' ')) < 10 else x
stemmer = RSLPStemmer()
stopws = stopwords.words('portuguese')
df['1'] = df['1'].map(remove_quebra_linha).map(remove_tabulacao).map(remove_multiplos_espacos).map(remove_nao_citacao).map(get_between_markers).map(remove_little).map(remove_pontuacao)
df.replace('FALSO', '', inplace=True)
df['1'] = df['1'].str.lower()
df['1'] = df['1'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
df['1'] = df['1'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopws)]))
df['1'] = df['1'].str.strip()
df.replace('', None, inplace=True)
df.drop_duplicates(subset='1', keep='last', inplace=True)
df.dropna(inplace=True)
df['1'] = df['1'].str.lower()
df['1'] = df['1'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
words = " ".join(df['1'])
word_cloud = WordCloud(width=800, height=400, collocations = False, background_color = 'white').generate(words)
plt.figure( figsize=(20,10) )
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
plt.show()
df['1'] = df['1'].apply(lambda x: ' '.join([stemmer.stem(y) for y in x.split(' ')]))
df.to_csv('generated/results_get_fake_built.csv', index=False)
df.info()
print(df['0'].value_counts())
df.head(10)
| src/pre_processing_fake_news.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
"""
使用pandas的read_excel函数,
读取bitcoin.xlsx文件,
并将文件数据赋值给变量df。
"""
df = pd.read_excel('../datasets/bitcoin/bitcoin.xlsx')
# 查看df
df
# +
"""
选择df的前10行数据,并选择前5列,构成df_part
"""
df_part = df[:10][df.columns[0:5]]
# 查看df_part
df_part
# +
"""
将df_part的数据,
按照excel的格式写入到bitcoin_part.xlsx文件。
"""
df_part.to_excel('../datasets/bitcoin/bitcoin_part.xlsx', index=False)
"""
再次使用pandas的read_excel函数,
读取bitcoin_part.xlsx文件,
并将文件数据赋值给变量df_new。
"""
df_new = pd.read_excel('../datasets/bitcoin/bitcoin_part.xlsx')
# 校验df_new与df_part是否一致。s
df_new
# -
| Chapter_4/Section_4.3.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multiple infillers
#
# Here we investigate three infillers that perform many cruncher operations. The first one is designed to infill all required data for running openSCM, with the minimum of inputs. One is designed to break down aggregate values, like the Kyoto gases or HFCs. The second does the opposite, recalculating aggregates from its components. These functions are purposefully not as object-oriented as the crunchers so that modellers unfamiliar with this coding structure can use them.
#
# You will need to install silicone in order to run this code.
# ## Imports
# +
import os.path
import traceback
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyam
import silicone.multiple_infillers as mi
from silicone.utils import (
convert_units_to_MtCO2_equiv,
get_sr15_scenarios,
return_cases_which_consistently_split,
)
# -
SR15_SCENARIOS = "./sr15_scenarios.csv"
# ## Example data
#
# Here we pull some example data by downloading a selection of the SR1.5 scenarios.
valid_model_ids = [
"MESSAGE*",
"AIM*",
"C-ROADS*",
"GCAM*",
"IEA*",
"IMAGE*",
"MERGE*",
"POLES*",
"REMIND*",
"WITCH*",
]
if not os.path.isfile(SR15_SCENARIOS):
get_sr15_scenarios(SR15_SCENARIOS, valid_model_ids)
# ### Starting point
#
# Our starting point is the test data, loaded as a `pyam.IamDataFrame`.
sr15_data = pyam.IamDataFrame(SR15_SCENARIOS)
target = "Emissions|CO2"
constituents = ["Emissions|CO2|*"]
to_infill = sr15_data.filter(model="WITCH*", variable=[target] + constituents)
database = sr15_data.filter(model="WITCH*", keep=False)
to_infill.head()
# ## Investigating where the data is consistent
#
# A utility function called return_cases_which_consistently_split indicates which variables consist of only other variables, which is useful to know in order to work out where data can be consistently split using decompose_collection_with_time_dep_ratio. Note that this is not a requirement for using that method (a consistent aggregate value is constructed in any case) but indicates that this is approach is rigorous.
# In the first instance, it does not find any results because there are several layers of constituents. However with only one layer, this works as expected. Note that if only one layer is used, there is also a pyam built-in function called 'check_consistency' that performs the same effect.
no_cases = return_cases_which_consistently_split(
to_infill,
target,
constituents,
)
len(no_cases)
# However in the case below, we select only the next level of info, and find it matches in all cases (the number of cases does not depend on the accuracy, as shown in the second box.
all_cases = return_cases_which_consistently_split(
to_infill,
target,
["Emissions|CO2|AFOLU", "Emissions|CO2|Energy and Industrial Processes"],
)
len(all_cases)
all_cases = return_cases_which_consistently_split(
to_infill,
target,
["Emissions|CO2|AFOLU", "Emissions|CO2|Energy and Industrial Processes"],
how_close={
"equal_nan": True,
"rtol": 100, # This means that we accept a factor of 100 inaccuracy.
},
)
len(all_cases)
# ## Using the infiller functions
# Here we show the use of the DecomposeCollectionTimeDepRatio and InfillAllRequiredVariables functions.
# ### InfillAllRequiredVariables
# This is a function to more conveniently infill all of the required variables that are not already found in the system.
database.tail()
to_infill.tail()
infilled = mi.infill_all_required_variables(
to_infill, database, [target], output_timesteps=list(range(2020, 2101, 10))
)
infilled.head()
# We now have a complete scenario, with all required variables.
to_infill.filter().variable
# ### DecomposeCollectionTimeDepRatio
# This function is designed to split up an aggregate value into its known components, using the relationship between these found in models that have all components (but not necessarily the aggregate variable). The aggregate value is calculated in the first step.
database["unit"].unique()
# Define some calculation parameters
components = [
"Emissions|CO2",
"Emissions|CH4",
"Emissions|N2O",
"Emissions|F-Gases",
]
aggregate = "Emissions|Kyoto Gases (AR4-GWP100)"
to_infill = sr15_data.filter(model="WITCH*", scenario="SSP1*", variable=aggregate)
unit_consistent_db = convert_units_to_MtCO2_equiv(database.filter(variable=components))
unit_consistent_db.variable
to_infill.variable
decomposer = mi.DecomposeCollectionTimeDepRatio(unit_consistent_db)
results = decomposer.infill_components(
aggregate, components, to_infill, only_consistent_cases=False
)
decomposer_all = mi.DecomposeCollectionTimeDepRatio(unit_consistent_db)
results_all = decomposer.infill_components(aggregate, components, to_infill)
results.head()
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111)
results.filter(variable="*CO2").plot(
ax=ax, linestyle="--", dashes=(10, 10), linewidth=3
)
results_all.filter(variable="*CO2").plot(ax=ax, linewidth=2)
plt.title(
"CO2 calculated as a fraction of Kyoto gas, with (solid) and without (dashed) model/scenario data not reported at some times"
)
# We now have variable information for each of the components
results.variable
# ### SplitCollectionWithRemainderEmissions
# This technique is also used for splitting up an aggregated basket of emissions, however does not assume that the emissions are in proportion to the aggregate at any given time. The key advantage of this is that it allows some emissions that are naturally limited to stay within their limits. The key disadvantage is that one of the emissions is only determined by conservation of the aggregate, and therefore may have quite a non-representative value. It is best for situations where one of the emissions is either very large compared to the others, or of unknown sign. This technique can be used with any cruncher, however the default is QRW. In most cases, CO$_2$ will be the emission to use as the 'remainder'. We will apply this technique to the same case as above, although we will not convert the units to be the same this time as the function will do this automatically. Note that this requires unit conversion to make the values comparable. In this case, we specify that the values be converted to CO$_2$-equivalent with AR4 GWP100 values, using the `metric_name` option.
components = [
"Emissions|CH4",
"Emissions|N2O",
"Emissions|F-Gases",
]
remainder = "Emissions|CO2" # This is the component that will make up the difference
aggregate = "Emissions|Kyoto Gases (AR4-GWP100)"
to_infill = sr15_data.filter(model="WITCH*", scenario="SSP1*", variable=aggregate)
all_rem_vars = [remainder, aggregate] + components
split_remainder = mi.SplitCollectionWithRemainderEmissions(
database.filter(variable=all_rem_vars)
)
remainder_split_res = split_remainder.infill_components(
aggregate,
components,
remainder,
to_infill,
metric_name="AR4GWP100",
)
remainder_split_res.head()
# We can compare this infilling method with that of the DecomposeCollectionTimeDepRatio method above. We plot them below, with the DecomposeCollection method dashed.
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(121)
remainder_split_res.filter(variable="*CO2").plot(ax=ax, label="__")
results.filter(variable="*CO2").plot(ax=ax, linestyle="--", label="__")
plt.title("")
ax = fig.add_subplot(122)
convert_units_to_MtCO2_equiv(remainder_split_res.filter(variable="*CH4")).plot(
ax=ax, label="__"
)
results.filter(variable="*CH4").plot(ax=ax, linestyle="--", label="__")
# We see that the results are fairly similar for CO$_2$, since they make up the bulk of the aggregate in both cases. However the CH$_4$ results are very different, as they can take any positive value in the ratio-based technique, whereas it is constrained to keep to values in the infiller database in the QRW-based SplitCollection technique. This particularly results in regular behaviour when the lead values go negative, as seen in the lowermost curve on the right after 2070 - the methane level increases as the lead value becomes more negative for the DecomposeCollection method, whereas the (more expected) results from the SplitCollection method is a slow decline in emissions.
# ### Calculate aggregate values
# This tool is used to build an aggregate value that ensures that the data is self-consistent. In our case, the database has a value for F-Gases, but these aggregates are not consistent with the AR5 conversion factors. We want the AR5 values, which are the default, so do not add a `metric_name` value this time.
to_infill = sr15_data.filter(model="WITCH*")
target = "Emissions|F-Gases"
components = ["Emissions|HFC", "Emissions|PFC", "Emissions|SF6"]
valid_cases = return_cases_which_consistently_split(
to_infill,
target,
components,
how_close={
"equal_nan": True,
"rtol": 0.1,
},
)
len(valid_cases)
# We first convert them to a common unit.
to_infill = to_infill.filter(variable=[target] + components)
to_infill = convert_units_to_MtCO2_equiv(to_infill)
to_infill.filter(variable=target, scenario="ADVANCE_2020_Med2C").head()
# The recalculated values are similar but noticably different:
infilled = mi.infill_composite_values(to_infill, {target: components})
infilled.filter(variable=target, scenario="ADVANCE_2020_Med2C").head()
# We have now fixed the inconsistency problem:
to_infill.filter(variable=target, keep=False, inplace=True)
to_infill.append(infilled, inplace=True)
valid_cases = return_cases_which_consistently_split(
to_infill,
target,
components,
metric_name="AR4GWP100",
how_close={
"equal_nan": True,
"rtol": 0.01,
},
)
len(valid_cases)
len(to_infill.scenario)
# The `infill_composite_values` function can also take a dictionary of dictionaries as its second argument if the values should be multiplied by constants before summing them. This allows us to subtract values as well.
aggregate = "Emissions|CO2|Other"
other_CO2 = mi.infill_composite_values(
sr15_data,
{
aggregate: {
"Emissions|CO2": 1,
"Emissions|CO2|Energy and Industrial Processes": -1,
"Emissions|CO2|AFOLU": -1,
}
},
)
other_CO2.head(10)
# These values are relatively small (cf. 34000 total emissions), so presumably represent rounding errors.
| notebooks/05_Multiple_infillers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8 (XPython)
# language: python
# name: xpython
# ---
# +
# %matplotlib inline
# %load_ext Cython
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import h10
from tqdm import tqdm
from nicks_plot_utils import *
import boost_histogram as bh
# -
root_reader = h10.h10_mc()
root_reader.add("aao_norad.root")
# + language="cython"
# import numpy as np
# cimport numpy as np
# from libc.math cimport sin, cos, sqrt
# from scipy import stats
# cimport cython
#
# cdef float MP = 0.93827208816
# cdef float E0 = 4.81726
# cdef float ME = 0.00051099895
#
# cdef float p_targ_px = 0.0
# cdef float p_targ_py = 0.0
# cdef float p_targ_pz = 0.0
# cdef float p_targ_E = MP
#
# cdef float e_beam_px = 0.0
# cdef float e_beam_py = 0.0
# cdef float e_beam_pz = sqrt(E0**2-ME**2)
# cdef float e_beam_E = E0
#
# def calc_W(float e_prime_px, float e_prime_py, float e_prime_pz, float e_prime_E):
# # cdef float e_prime_px = e_p*sin(e_theta)*cos(e_phi)
# # cdef float e_prime_py = e_p*sin(e_theta)*sin(e_phi)
# # cdef float e_prime_pz = e_p*cos(e_theta)
# # cdef float e_prime_E = sqrt(e_prime_px**2 + e_prime_py**2 + e_prime_pz**2 - ME**2)
#
# cdef float temp_px = e_beam_px - e_prime_px + p_targ_px
# cdef float temp_py = e_beam_py - e_prime_py + p_targ_py
# cdef float temp_pz = e_beam_pz - e_prime_pz + p_targ_pz
# cdef float temp_E = e_beam_E - e_prime_E + p_targ_E
#
#
# cdef float temp2 = temp_px**2+temp_py**2+temp_pz**2-temp_E**2
# cdef float temp3 = sqrt(-temp2)
#
#
# return temp3
#
#
# def calc_q2(float e_prime_px, float e_prime_py, float e_prime_pz, float e_prime_E):
# # cdef float e_prime_px = e_p*sin(e_theta)*cos(e_phi)
# # cdef float e_prime_py = e_p*sin(e_theta)*sin(e_phi)
# # cdef float e_prime_pz = e_p*cos(e_theta)
# # cdef float e_prime_E = sqrt(e_prime_px**2 + e_prime_py**2 + e_prime_pz**2 - ME**2)
#
# cdef float temp_px = e_beam_px - e_prime_px
# cdef float temp_py = e_beam_py - e_prime_py
# cdef float temp_pz = e_beam_pz - e_prime_pz
# cdef float temp_E = e_beam_E - e_prime_E
#
# cdef float temp2 = temp_px**2+temp_py**2+temp_pz**2-temp_E**2
#
# return temp2
# +
wvsq2 = bh.Histogram(bh.axis.Regular(150, 1.1, 2.0),
bh.axis.Regular(150, 1.0, 3.5))
w = bh.Histogram(bh.axis.Regular(150, 1.1, 2.0))
with tqdm(total=root_reader.num_entries) as pbar:
for i, event in enumerate(root_reader):
pbar.update(1)
# if i > 500000: break
if i % 100 == 0: pbar.update(1)
W = calc_W(event.pxpart[0],event.pypart[0],event.pzpart[0],event.epart[0])
Q2 = calc_q2(event.pxpart[0],event.pypart[0],event.pzpart[0],event.epart[0])
wvsq2.fill(W,Q2)
w.fill(W)
# +
hist = Hist2D(boost_hist=wvsq2, xname="$W~[GeV]$", yname="$Q^2~[GeV^2]$")
hist_W = Hist1D(boost_hist=w, name="$W~[GeV]$")
#hist_W.histogram(label="$W$ Projection", color='r', density=True, factor=3.0)
hist.plot(colorbar=True, density=False, zeros=False)
# -
| radEffectsCorr/RadCorr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this lecture we're going to address how you can bring multiple dataframe objects together, either by
# merging them horizontally, or by concatenating them vertically. Before we jump into the code, we need to
# address a little relational theory and to get some language conventions down. I'm going to bring in an image
# to help explain some concepts.
#
# 
#
# Ok, this is a Venn Diagram. A Venn Diagram is traditionally used to show set membership. For example, the
# circle on the left is the population of students at a university. The circle on the right is the population
# of staff at a university. And the overlapping region in the middle are all of those students who are also
# staff. Maybe these students run tutorials for a course, or grade assignments, or engage in running research
# experiments.
#
# So, this diagram shows two populations whom we might have data about, but there is overlap between those
# populations.
# When it comes to translating this to pandas, we can think of the case where we might have these two
# populations as indices in separate DataFrames, maybe with the label of Person Name. When we want to join the
# DataFrames together, we have some choices to make. First what if we want a list of all the people regardless
# of whether they're staff or student, and all of the information we can get on them? In database terminology,
# this is called a full outer join. And in set theory, it's called a union. In the Venn diagram, it represents
# everyone in any circle.
#
# Here's an image of what that would look like in the Venn diagram.
#
# 
# It's quite possible though that we only want those people who we have maximum information for, those people
# who are both staff and students. Maybe being a staff member and a student involves getting a tuition waiver,
# and we want to calculate the cost of this. In database terminology, this is called an inner join. Or in set
# theory, the intersection. It is represented in the Venn diagram as the overlapping parts of each circle.
#
# Here's what that looks like: 
#
# +
# With that background, let's see an example of how we would do this in pandas, where we would use the merge
# function.
import pandas as pd
# First we create two DataFrames, staff and students.
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'},
{'Name': 'Sally', 'Role': 'Course liasion'},
{'Name': 'James', 'Role': 'Grader'}])
# And lets index these staff by name
staff_df = staff_df.set_index('Name')
# Now we'll create a student dataframe
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'},
{'Name': 'Mike', 'School': 'Law'},
{'Name': 'Sally', 'School': 'Engineering'}])
# And we'll index this by name too
student_df = student_df.set_index('Name')
# And lets just print out the dataframes
print(staff_df.head())
print(student_df.head())
# +
# There's some overlap in these DataFrames in that James and Sally are both students and staff, but Mike and
# Kelly are not. Importantly, both DataFrames are indexed along the value we want to merge them on, which is
# called Name.
# +
# If we want the union of these, we would call merge() passing in the DataFrame on the left and the DataFrame
# on the right and telling merge that we want it to use an outer join. We want to use the left and right
# indices as the joining columns.
pd.merge(staff_df, student_df, how='outer', left_index=True, right_index=True)
# +
# We see in the resulting DataFrame that everyone is listed. And since Mike does not have a role, and John
# does not have a school, those cells are listed as missing values.
# If we wanted to get the intersection, that is, just those who are a student AND a staff, we could set the
# how attribute to inner. Again, we set both left and right indices to be true as the joining columns
pd.merge(staff_df, student_df, how='inner', left_index=True, right_index=True)
# +
# And we see the resulting DataFrame has only James and Sally in it. Now there are two other common use cases
# when merging DataFrames, and both are examples of what we would call set addition. The first is when we
# would want to get a list of all staff regardless of whether they were students or not. But if they were
# students, we would want to get their student details as well. To do this we would use a left join. It is
# important to note the order of dataframes in this function: the first dataframe is the left dataframe and
# the second is the right
pd.merge(staff_df, student_df, how='left', left_index=True, right_index=True)
# -
# You could probably guess what comes next. We want a list of all of the students and their roles if they were
# also staff. To do this we would do a right join.
pd.merge(staff_df, student_df, how='right', left_index=True, right_index=True)
# +
# We can also do it another way. The merge method has a couple of other interesting parameters. First, you
# don't need to use indices to join on, you can use columns as well. Here's an example. Here we have a
# parameter called "on", and we can assign a column that both dataframe has as the joining column
# First, lets remove our index from both of our dataframes
staff_df = staff_df.reset_index()
student_df = student_df.reset_index()
# Now lets merge using the on parameter
pd.merge(staff_df, student_df, how='right', on='Name')
# +
# Using the "on" parameter instead of a the index is how I find myself using merge() the most.
# +
# So what happens when we have conflicts between the DataFrames? Let's take a look by creating new staff and
# student DataFrames that have a location information added to them.
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR',
'Location': 'State Street'},
{'Name': 'Sally', 'Role': 'Course liasion',
'Location': 'Washington Avenue'},
{'Name': 'James', 'Role': 'Grader',
'Location': 'Washington Avenue'}])
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business',
'Location': '1024 Billiard Avenue'},
{'Name': 'Mike', 'School': 'Law',
'Location': 'Fraternity House #22'},
{'Name': 'Sally', 'School': 'Engineering',
'Location': '512 Wilson Crescent'}])
# In the staff DataFrame, this is an office location where we can find the staff person. And we can see the
# Director of HR is on State Street, while the two students are on Washington Avenue, and these locations just
# happen to be right outside my window as I film this. But for the student DataFrame, the location information
# is actually their home address.
# The merge function preserves this information, but appends an _x or _y to help differentiate between which
# index went with which column of data. The _x is always the left DataFrame information, and the _y is always
# the right DataFrame information.
# Here, if we want all the staff information regardless of whether they were students or not. But if they were
# students, we would want to get their student details as well.Then we can do a left join and on the column of
# Name
pd.merge(staff_df, student_df, how='left', on='Name')
# +
# From the output, we can see there are columns Location_x and Location_y. Location_x refers to the Location
# column in the left dataframe, which is staff dataframe and Location_y refers to the Location column in the
# right dataframe, which is student dataframe.
# Before we leave merging of DataFrames, let's talk about multi-indexing and multiple columns. It's quite
# possible that the first name for students and staff might overlap, but the last name might not. In this
# case, we use a list of the multiple columns that should be used to join keys from both dataframes on the on
# parameter. Recall that the column name(s) assigned to the on parameter needs to exist in both dataframes.
# Here's an example with some new student and staff data
staff_df = pd.DataFrame([{'First Name': 'Kelly', 'Last Name': 'Desjardins',
'Role': 'Director of HR'},
{'First Name': 'Sally', 'Last Name': 'Brooks',
'Role': 'Course liasion'},
{'First Name': 'James', 'Last Name': 'Wilde',
'Role': 'Grader'}])
student_df = pd.DataFrame([{'First Name': 'James', 'Last Name': 'Hammond',
'School': 'Business'},
{'First Name': 'Mike', 'Last Name': 'Smith',
'School': 'Law'},
{'First Name': 'Sally', 'Last Name': 'Brooks',
'School': 'Engineering'}])
# As you see here, <NAME> and <NAME> don't match on both keys since they have different last
# names. So we would expect that an inner join doesn't include these individuals in the output, and only Sally
# Brooks will be retained.
pd.merge(staff_df, student_df, how='inner', on=['First Name','Last Name'])
# +
# Joining dataframes through merging is incredibly common, and you'll need to know how to pull data from
# different sources, clean it, and join it for analysis. This is a staple not only of pandas, but of database
# technologies as well.
# +
# If we think of merging as joining "horizontally", meaning we join on similar values in a column found in two
# dataframes then concatenating is joining "vertically", meaning we put dataframes on top or at the bottom of
# each other
# Let's understand this from an example. You have a dataset that tracks some information over the years. And
# each year's record is a separate CSV and every CSV ofr every year's record has the exactly same columns.
# What happens if you want to put all the data, from all years' record, together? You can concatenate them.
# +
# Let's take a look at the US Department of Education College Scorecard data It has each US university's data
# on student completion, student debt, after-graduation income, etc. The data is stored in separate CSV's with
# each CSV containing a year's record Let's say we want the records from 2011 to 2013 we first create three
# dataframe, each containing one year's record. And, because the csv files we're working with are messy, I
# want to supress some of the jupyter warning messages and just tell read_csv to ignore bad lines, so I'm
# going to start the cell with a cell magic called %%capture
# -
# %%capture
df_2011 = pd.read_csv("datasets/college_scorecard/MERGED2011_12_PP.csv", error_bad_lines=False)
df_2012 = pd.read_csv("datasets/college_scorecard/MERGED2012_13_PP.csv", error_bad_lines=False)
df_2013 = pd.read_csv("datasets/college_scorecard/MERGED2013_14_PP.csv", error_bad_lines=False)
# Let's get a view of one of the dataframes
df_2011.head(3)
# We see that there is a whopping number of columns - more than 1900! We can calculate the length of each
# dataframe as well
print(len(df_2011))
print(len(df_2012))
print(len(df_2013))
# +
# That's a bit surprising that the number of schools in the scorecard for 2011 is almost double that of the
# next two years. But let's not worry about that. Instead, let's just put all three dataframes in a list and
# call that list frames and pass the list into the concat() function Let's see what it looks like
frames = [df_2011, df_2012, df_2013]
pd.concat(frames)
# -
# As you can see, we have more observations in one dataframe and columns remain the same. If we scroll down to
# the bottom of the output, we see that there are a total of 30,832 rows after concatenating three dataframes.
# Let's add the number of rows of the three dataframes and see if the two numbers match
len(df_2011)+len(df_2012)+len(df_2013)
# +
# The two numbers match! Which means our concatenation is successful. But wait, now that all the data is
# concatenated together, we don't know what observations are from what year anymore! Actually the concat
# function has a parameter that solves such problem with the keys parameter, we can set an extra level of
# indices, we pass in a list of keys that we want to correspond to the dataframes into the keys parameter
# Now let's try it out
pd.concat(frames, keys=['2011','2012','2013'])
# +
# Now we have the indices as the year so we know what observations are from what year. You should know that
# concatenation also has inner and outer method. If you are concatenating two dataframes that do not have
# identical columns, and choose the outer method, some cells will be NaN. If you choose to do inner, then some
# observations will be dropped due to NaN values. You can think of this as analogous to the left and right
# joins of the merge() function.
# -
# Now you know how to merge and concatenate datasets together. You will find such functions very useful for
# combining data to get more complex or complicated results and to do analysis with. A solid understanding of
# how to merge data is absolutely essentially when you are procuring, cleaning, and manipulating data. It's
# worth knowing how to join different datasets quickly, and the different options you can use when joining
# datasets, and I would encourage you to check out the pandas docs for joining and concatenating data.
| Course - 1: Introduction to Data Science in Python/resources/week-3/MergingDataFrame_ed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generative models - variational auto-encoders
#
# ### Author: <NAME> (<EMAIL>)
#
# In this course we will cover
# 1. A [quick recap](#recap) on simple probability concepts (and in TensorFlow)
# 2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs)
# 3. An explanation of the [implementation](#implem) of VAEs
# 4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)**
# <a id="recap"> </a>
#
# ## Quick recap on probability
#
# The field of probability aims to model random or uncertain events. Hence, a random variable $X$ denotes a quantity that is uncertain, such as the result of an experiment (flipping a coin) or the measurement of an uncertain property (measuring the temperature). If we observe several occurrences of the variable $\{\mathbf{x}_{i}\}_{i=1}$, it might take different values on each occasion, but some values may occur more often than others. This information is captured by the _probability distribution_ $p(\mathbf{x})$ of the random variable.
#
# To understand these concepts graphically, we will rely on the `Tensorflow Probability` package.
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# ### Probability distributions
#
# #### Discrete distributions
#
# Let $\mathbf{x}$ be a discrete random variable with range $R_{X}=\{x_1,\cdots,x_n\}$ (finite or countably infinite). The function
# \begin{equation}
# p_{X}(x_{i})=p(X=x_{i}), \forall i\in\{1,\cdots,n\}
# \end{equation}
# is called the probability mass function (PMF) of $X$.
#
# Hence, the PMF defines the probabilities of all possible values for a random variable. The above notation allows to express that the PMF is defined for the random variable $X$, so that $p_{X}(1)$ gives the probability that $X=1$. For discrete random variables, the PMF is also called the \textit{probability distribution}. The PMF is a probability measure, therefore it satisfies all the corresponding properties
# - $0 \leq p_{X}(x_i) < 1, \forall x_i$
# - $\sum_{x_i\in R_{X}} p_{X}(x_i) = 1$
# - $\forall A \subset R_{X}, p(X \in A)=\sum_{x_a \in A}p_{X}(x_a)$
# A very simple example of discrete distribution is the `Bernoulli` distribution. With this distribution, we can model a coin flip. If we throw the coin a very large number of times, we hope to see on average an equal amount of _heads_ and _tails_.
bernoulli = tfp.distributions.Bernoulli(probs=0.5)
samples = bernoulli.sample(10000)
sns.distplot(samples)
plt.title("Samples from a Bernoulli (coin toss)")
plt.show()
# However, we can also _sample_ from the distribution to have individual values of a single throw. In that case, we obtain a series of separate events that _follow_ the distribution
vals = ['heads', 'tails']
samples = bernoulli.sample(10)
for s in samples:
print('Coin is tossed on ' + vals[s])
# #### Continuous distributions
#
# The same ideas apply to _continuous_ random variables, which can model for instance the height of human beings. If we try to guess the height of someone that we do not know, there is a higher probability that this person will be around 1m70, instead of 20cm or 3m. For the rest of this course, we will use the shorthand notation $p(\mathbf{x})$ for the distribution $p(\mathbf{x}=x_{i})$, which expresses for a real-valued random variable $\mathbf{x}$, evaluated at $x_{i}$, the probability that $\mathbf{x}$ takes the value $x_i$.
#
# One notorious example of such distributions is the Gaussian (or Normal) distribution, which is defined as
# \begin{equation}
# p(x)=\mathcal{N}(\mu,\sigma)=\frac{1}{\sqrt{2\pi\sigma^{2}}}e^{-\frac{(x-\mu)^{2}}{2\sigma^{2}}}
# \end{equation}
#
# Similarly as before, we can observe the behavior of this distribution with the following code
normal = tfp.distributions.Normal(loc=0., scale=1.)
samples = normal.sample(10000)
sns.distplot(samples)
plt.title("Samples from a standard Normal")
plt.show()
# ### Comparing distributions (KL divergence)
# $
# \newcommand{\R}{\mathbb{R}}
# \newcommand{\bb}[1]{\mathbf{#1}}
# \newcommand{\bx}{\bb{x}}
# \newcommand{\by}{\bb{y}}
# \newcommand{\bz}{\bb{z}}
# \newcommand{\KL}[2]{\mathcal{D}_{\text{KL}}\left[#1 \| #2\right]}$
# Originally defined in the field of information theory, the _Kullback-Leibler (KL) divergence_ (usually noted $\KL{p(\bx)}{q(\bx)}$) is a dissimilarity measure between two probability distributions $p(\bx)$ and $q(\bx)$. In the view of information theory, it can be understood as the cost in number of bits necessary for coding samples from $p(\bx)$ by using a code optimized for $q(\bx)$ rather than the code optimized for $p(\bx)$. In the view of probability theory, it represents the amount of information lost when we use $q(\bx)$ to approximate the true distribution $p(\bx)$. %that explicit the cost incurred if events were generated by $p(\bx)$ but charged under $q(\bx)$
#
#
# Given two probability distributions $p(\bx)$ and $q(\bx)$, the Kullback-Leibler divergence of $q(\bx)$ _from_ $p(\bx)$ is defined to be
# \begin{equation}
# \KL{p(\bx)}{q(\bx)}=\int_{\R} p(\bx) \log \frac{p(\bx)}{q(\bx)}d\bx
# \end{equation}
#
# Note that this dissimilarity measure is \textit{asymmetric}, therefore, we have
# \begin{equation}
# \KL{p(\bx)}{q(\bx)}\neq \KL{q(\bx)}{p(\bx)}
# \end{equation}
# This asymmetry also describes an interesting behavior of the KL divergence, depending on the order to which it is evaluated. The KL divergence can either be a _mode-seeking_ or _mode-coverage} measure.
# <a id="vae"></a>
# ## Variational auto-encoders
#
# As we have seen in the previous AE course, VAEs are also a form generative models. However, they are defined from a more sound probabilistic perspective. to find the underlying probability distribution of the data $p(\mathbf{x})$ based on a set of examples in $\mathbf{x}\in\mathbb{R}^{d_{x}}$. To do so, we consider *latent variables* defined in a lower-dimensional space $\mathbf{z}\in\mathbb{R}^{d_{z}}$ ($d_{z} \ll d_{x}$) with the joint probability distribution $p(\mathbf{x}, \mathbf{z}) = p(\mathbf{x} \vert \mathbf{z})p(\mathbf{z})$. Unfortunately, for complex distributions this integral is too complex and cannot be found in closed form.
#
#
# ### Variational inference
#
# The idea of *variational inference* (VI) allows to solve this problem through *optimization* by assuming a simpler approximate distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})\in\mathcal{Q}$ from a family $\mathcal{Q}$ of approximate densities. Hence, the goal is to minimize the difference between this approximation and the real distribution. Therefore, this turns into the optimization problem of minimizing the Kullback-Leibler (KL) divergence between the parametric approximation and the original density
#
# $$
# q_{\phi}^{*}(\mathbf{z}\vert \mathbf{x})=\text{argmin}_{q_{\phi}(\mathbf{z} \vert \mathbf{x})\in\mathcal{Q}} \mathcal{D}_{KL} \big[ q_{\phi}\left(\mathbf{z} \vert \mathbf{x}\right) \parallel p\left(\mathbf{z} \vert \mathbf{x}\right) \big]
# \tag{2}
# $$
#
# By developing this KL divergence and re-arranging terms (the detailed development can be found in [3](#reference1)), we obtain
#
# $$
# \log{p(\mathbf{x})} - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z} \vert \mathbf{x}) \big] =
# \mathbb{E}_{\mathbf{z}} \big[ \log{p(\mathbf{x} \vert \mathbf{z})}\big] - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z}) \big]
# \tag{3}
# $$
#
# This formulation describes the quantity we want to maximize $\log p(\mathbf{x})$ minus the error we make by using an approximate $q$ instead of $p$. Therefore, we can optimize this alternative objective, called the *evidence lower bound* (ELBO)
#
# $$
# \begin{equation}
# \mathcal{L}_{\theta, \phi} = \mathbb{E} \big[ \log{ p_\theta (\mathbf{x|z}) } \big] - \beta \cdot D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel p_\theta(\mathbf{z}) \big]
# \end{equation}
# \tag{4}
# $$
#
# We can see that this equation involves $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ which *encodes* the data $\mathbf{x}$ into the latent representation $\mathbf{z}$ and a *decoder* $p(\mathbf{x} \vert \mathbf{z})$, which allows generating a data vector $\mathbf{x}$ given a latent configuration $\mathbf{z}$. Hence, this structure defines the *Variational Auto-Encoder* (VAE).
#
# The VAE objective can be interpreted intuitively. The first term increases the likelihood of the data generated given a configuration of the latent, which amounts to minimize the *reconstruction error*. The second term represents the error made by using a simpler posterior distribution $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ compared to the true prior $p_{\theta}(\mathbf{z})$. Therefore, this *regularizes* the choice of approximation $q$ so that it remains close to the true posterior distribution [3].
# ### Reparametrization trick
#
# Now, while this formulation has some very interesting properties, it involves sampling operations, where we need to draw the latent point $\mathbf{z}$ from the distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})$. The simplest choice for this variational approximate posterior is a multivariate Gaussian with a diagonal covariance structure (which leads to independent Gaussians on every dimension, called the *mean-field* family) so that
# $$
# \text{log}q_\phi(\mathbf{z}\vert\mathbf{x}) = \text{log}\mathcal{N}(\mathbf{z};\mathbf{\mu}^{(i)},\mathbf{\sigma}^{(i)})
# \tag{5}
# $$
# where the mean $\mathbf{\mu}^{(i)}$ and standard deviation $\mathbf{\sigma}^{(i)}$ of the approximate posterior are different for each input point and are produced by our encoder parametrized by its variational parameters $\phi$. Now the KL divergence between this distribution and a simple prior $\mathcal{N}(\mathbf{0}, \mathbf{I})$ can be very simply obtained with
# $$
# D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel \mathcal{N}(\mathbf{0}, \mathbf{I}) \big] = \frac{1}{2}\sum_{j=1}^{D}\left(1+\text{log}((\sigma^{(i)}_j)^2)+(\mu^{(i)}_j)^2+(\sigma^{(i)}_j)^2\right)
# \tag{6}
# $$
#
# While this looks convenient, we will still have to perform gradient descent through a sampling operation, which is non-differentiable. To solve this issue, we can use the *reparametrization trick*, which takes the sampling operation outside of the gradient flow by considering $\mathbf{z}^{(i)}=\mathbf{\mu}^{(i)}+\mathbf{\sigma}^{(i)}\odot\mathbf{\epsilon}^{(l)}$ with $\mathbf{\epsilon}^{(l)}\sim\mathcal{N}(\mathbf{0}, \mathbf{I})$
# <a id="implem"> </a>
#
# ## VAE implementation
#
# As we have seen, VAEs can be simply implemented by decomposing the above series of operations into an `encoder` which represents the distribution $q_\phi(\mathbf{z}\vert\mathbf{x})$, from which we will sample some values $\tilde{\mathbf{z}}$ (using the reparametrization trick) and compute the Kullback-Leibler (KL) divergence. Then, we use these values as input to a `decoder` which represents the distribution $p_\theta(\mathbf{x}\vert\mathbf{z})$ so that we can produce a reconstruction $\tilde{\mathbf{x}}$ and compute the reconstruction error.
#
# Therefore, we can define the VAE based on our previous implementation of the AE that we recall here
# +
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Model
class AE(Model):
def __init__(self, encoder, decoder, encoding_dim):
super(AE, self).__init__()
self.encoding_dim = encoding_dim
self.encoder = encoder
self.decoder = decoder
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
# -
# In order to move to a probabilistic version, we need to add the latent space sampling mechanism, and change the behavior of our `call` function. This process is implemented in the following `VAE` class.
#
# Note that we purposedly rely on an implementation of the `encode` function where the `encoder` first produces an intermediate representation of size `encoder_dims`. Then, this representation goes through two separate functions for encoding $\mathbf{\mu}$ and $\mathbf{\sigma}$. This provides a clearer implementation but also the added bonus that we can ensure that $\mathbf{\sigma} > 0$
class VAE(AE):
def __init__(self, encoder, decoder, encoding_dims, latent_dims):
super(VAE, self).__init__(encoder, decoder, encoding_dims)
self.latent_dims = latent_dims
self.mu = layers.Dense(self.latent_dims, activation='relu')
self.sigma = layers.Dense(self.latent_dims, activation='softplus')
def encode(self, x):
x = self.encoder(x)
mu = self.mu(x)
sigma = self.sigma(x)
return mu, sigma
def decode(self, z):
return self.decoder(z)
def call(self, x):
# Encode the inputs
z_params = self.encode(x)
# Obtain latent samples and latent loss
z_tilde, kl_div = self.latent(x, z_params)
# Decode the samples
x_tilde = self.decode(z_tilde)
return x_tilde, kl_div
def latent(self, x, z_params):
n_batch = x.shape[0]
# Retrieve mean and var
mu, sigma = z_params
# Re-parametrize
q = tfp.distributions.Normal(np.zeros(mu.shape[1]), np.ones(sigma.shape[1]))
z = (sigma * tf.cast(q.sample(n_batch), 'float32')) + mu
# Compute KL divergence
kl_div = -0.5 * tf.reduce_sum(1 + sigma - tf.pow(mu, 2) - tf.exp(sigma))
kl_div = kl_div / n_batch
return z, kl_div
# Now the interesting aspect of VAEs is that we can define any parametric function as `encoder` and `decoder`, as long as we can optimize them. Here, we will rely on simple feed-forward neural networks, but these can be largely more complex (with limitations that we will discuss later in the tutorial).
def construct_encoder_decoder(nin, n_latent = 16, n_hidden = 512, n_classes = 1):
# Encoder network
encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
])
# Decoder network
decoder = tf.keras.Sequential([
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(nin * n_classes, activation='sigmoid'),
layers.Reshape((28, 28))
])
return encoder, decoder
# ### Evaluating the error
#
# In the definition of the `VAE` class, we directly included the computation of the $D_{KL}$ term to regularize our latent space. However, remember that the complete loss of equation (4) also contains a *reconstruction loss* which compares our reconstructed output to the original data.
#
# While there are several options to compare the error between two elements, there are usually two preferred choices among the generative literature depending on how we consider our problem
# 1. If we consider each dimension (pixel) to be a binary unit (following a Bernoulli distribution), we can rely on the `binary cross entropy` between the two distributions
# 2. If we turn our problem to a set of classifications, where each dimension can belong to a given set of *intensity classes*, then we can compute the `multinomial loss` between the two distributions
#
# In the following, we define both error functions and regroup them in the `reconstruction_loss` call (depending on the `num_classes` considered). However, as the `multinomial loss` requires a large computational overhead, and for the sake of simplicity, we will train all our first models by relying on the `binary cross entropy`
# +
optimizer = tf.keras.optimizers.Adam(1e-4)
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def compute_loss(model, x):
mean, logvar = model.encode(x)
z = model.reparameterize(mean, logvar)
x_logit = model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
@tf.function
def train_step(model, x, optimizer):
"""Executes one training step and returns the loss.
This function computes the loss and gradients, and uses the latter to
update the model's parameters.
"""
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# -
# ### Optimizing a VAE on a real dataset
#
# For this tutorial, we are going to take a quick shot at a real-life problem by trying to train our VAEs on the `FashionMNIST` dataset. This dataset can be natively used in PyTorch by relying on the `torchvision.datasets` classes as follows
# Load (and eventually download) the dataset
(x_train, _), (x_test, _) = fashion_mnist.load_data()
# Normalize the dataset in the [0, 1] range]
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
# The `FashionMNIST` dataset is composed of simple 28x28 black and white images of different items of clothings (such as shoes, bags, pants and shirts). We put a simple function here to display one batch of the test set (note that we keep a fixed batch from the test set in order to evaluate the different variations that we will try in this tutorial).
def plot_batch(batch, nslices=8):
# Create one big image for plot
img = np.zeros(((batch.shape[1] + 1) * nslices, (batch.shape[2] + 1) * nslices))
for b in range(batch.shape[0]):
row = int(b / nslices); col = int(b % nslices)
r_p = row * batch.shape[1] + row; c_p = col * batch.shape[2] + col
img[r_p:(r_p+batch.shape[1]),c_p:(c_p+batch.shape[2])] = batch[b]
im = plt.imshow(img, cmap='Greys', interpolation='nearest'),
return im
# Select a random set of fixed data
fixed_batch = x_test[:64]
print(x_test.shape)
plt.figure(figsize=(10, 10))
plot_batch(fixed_batch);
# Now based on our proposed implementation, the optimization aspects are defined in a very usual way
# Using Bernoulli or Multinomial loss
num_classes = 1
# Number of hidden and latent
n_hidden = 512
n_latent = 2
# Compute input dimensionality
nin = fixed_batch.shape[1] * fixed_batch.shape[2]
# Construct encoder and decoder
encoder, decoder = construct_encoder_decoder(nin, n_hidden = n_hidden, n_latent = n_latent, n_classes = num_classes)
# Build the VAE model
model = VAE(encoder, decoder, n_hidden, n_latent)
# Compile the model
model.compile(optimizer='adam', loss=losses.MeanSquaredError())
# Now all that is left to do is train the model. We define here a `train_vae` function that we will reuse along the future implementations and variations of VAEs and flows. Note that this function is set to run for only a very few number of `epochs` and also most importantly, *only considers a subsample of the full dataset at each epoch*. This option is just here so that you can test the different models very quickly on any CPU or laptop.
# +
def generate_and_save_images(model, epoch, test_sample):
mean, logvar = model.encode(test_sample)
z = model.reparameterize(mean, logvar)
predictions = model.sample(z)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, :, :, 0], cmap='gray')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
epochs=50
for epoch in range(1, epochs + 1):
for train_x in x_train:
train_step(model, train_x, optimizer)
loss = tf.keras.metrics.Mean()
for test_x in x_test:
loss(compute_loss(model, test_x))
elbo = -loss.result()
display.clear_output(wait=False)
print('Epoch: {}, Test set ELBO: {}, time elapse for current epoch: {}'
.format(epoch, elbo, end_time - start_time))
generate_and_save_images(model, epoch, test_sample)
# -
# ### Evaluating generative models
#
# In order to evaluate our upcoming generative models, we will rely on the computation of the Negative Log-Likelihood. This code for the following `evaluate_nll_bpd` is inspired by the [Sylvester flow repository](https://github.com/riannevdberg/sylvester-flows)
# +
from scipy.special import logsumexp
def evaluate_nll_bpd(data_loader, model, batch = 500, R = 5):
model.eval()
# Set of likelihood tests
likelihood_test = []
# Go through dataset
for batch_idx, (x, _) in enumerate(data_loader):
for j in range(x.shape[0]):
a = []
for r in range(0, R):
cur_x = x[j].unsqueeze(0)
# Repeat it as batch
x = cur_x.expand(batch, *cur_x.size()[1:]).contiguous()
x = x.view(batch, -1)
x_tilde, kl_div = model(x)
rec = reconstruction_loss(x_tilde, x, average=False)
a_tmp = (rec + kl_div)
a.append(- a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
likelihood_test = np.array(likelihood_test)
nll = - np.mean(likelihood_test)
# Compute the bits per dim (but irrelevant for binary data)
bpd = nll / (np.prod(nin) * np.log(2.))
return nll, bpd
# -
# Now we can evaluate our VAE model more formally as follows.
# Plot final loss
plt.figure()
plt.plot(losses_kld[:, 0].numpy());
# Evaluate log-likelihood and bits per dim
nll, _ = evaluate_nll_bpd(test_loader, model)
print('Negative Log-Likelihood : ' + str(nll))
# ### Limitations of VAEs
#
# Although VAEs are extremely powerful tools, they still have some limitations. Here we list the three most important and known limitations (all of them are still debated and topics of active research).
# 1. **Blurry reconstructions.** As can be witnessed directly in the results of the previous vanilla VAE implementation, the reconstructions appear to be blurry. The precise origin of this phenomenon is still debated, but the proposed explanation are
# 1. The use of the KL regularization
# 2. High variance regions of the latent space
# 3. The reconstruction criterion (expectation)
# 4. The use of simplistic latent distributions
# 2. **Posterior collapse.** The previous *blurry reconstructions* issue can be mitigated by using a more powerful decoder. However, relying on a decoder with a large capacity causes the phenomenon of *posterior collapse* where the latent space becomes useless. A nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/)
# 3. **Simplistic Gaussian approximation**. In the derivation of the VAE objective, recall that the KL divergence term needs to be computed analytically. Therefore, this forces us to rely on quite simplistic families. However, the Gaussian family might be too simplistic to model real world data
# In the present tutorial, we show how normalizing flows can be used to mostly solve the third limitation, while also adressing the two first problems. Indeed, we will see that normalizing flows also lead to sharper reconstructions and also act on preventing posterior collapse
# <a id="improve"></a>
# ## Improving the quality of VAEs
#
# As we discussed in the previous section, several known issues have been reported when using the vanilla VAE implementation. We listed some of the major issues as being
# 1. **Blurry reconstructions.**
# 2. **Posterior collapse.**
# 3. **Simplistic Gaussian approximation**.
#
# Here, we discuss some recent developments that were proposed in the VAE literature and simple adjustments that can be made to (at least partly) alleviate these issues. However, note that some more advanced proposals such as PixelVAE [5](#reference1) and VQ-VAE [6](#reference1) can lead to wider increases in quality
# ### Reducing the bluriness of reconstructions
#
# In this tutorial, we relied on extremely simple decoder functions, to show how we could easily define VAEs and normalizing flows together. However, the capacity of the decoder obviously directly influences the quality of the final reconstruction. Therefore, we could address this issue naively by using deep networks and of course convolutional layers as we are currently dealing with images.
# First you need to construct a more complex encoder and decoder
def construct_encoder_decoder_complex(nin, n_latent = 16, n_hidden = 512, n_params = 0, n_classes = 1):
# Encoder network
encoder = ...
# Decoder network
decoder = ...
return encoder, decoder
# ### Preventing posterior collapse with Wasserstein-VAE-MMD (InfoVAE)
#
# As we discussed earlier, the reason behind posterior collapse mostly relates to the KL divergence criterion (a nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/). This can be mitigated by relying on a different criterion, such as regularizing the latent distribution by using the *Maximum Mean Discrepancy* (MMD) instead of the KL divergence. This model was independently proposed as the *InfoVAE* and later also as the *Wasserstein-VAE*.
#
# Here we provide a simple implementation of the `InfoVAEMMD` class based on our previous implementations.
# +
def compute_kernel(x, y):
return ...
def compute_mmd(x, y):
return ...
class InfoVAEMMD(VAE):
def __init__(self, encoder, decoder):
super(InfoVAEMMD, self).__init__(encoder, decoder)
def latent(self, x, z_params):
return ...
# -
# ### Complexifying the posterior with flows
#
# As this was the central topic of this tutorial, we will not go through the explanation again. However, as we will now be relying on convolutional layers instead of linear ones, we just need to make small changes in our encoding and decoding functions in order to control the shape of the different tensors. This is performed in the following class by simply adding two `view` operations in the `encode` and `decode` functions respectively.
class VAENormalizingFlow(VAE):
def __init__(self, encoder, decoder, flow, encoder_dims, latent_dims):
super(VAENormalizingFlow, self).__init__(encoder, decoder, encoder_dims, latent_dims)
self.flow_enc = nn.Linear(encoder_dims, flow.n_parameters())
self.flow = flow
self.apply(self.init_parameters)
def encode(self, x):
x = self.encoder(x)
x = x.view(x.shape[0], -1)
mu = self.mu(x)
sigma = self.sigma(x)
flow_params = self.flow_enc(x)
return mu, sigma, flow_params
def decode(self, z):
z = z.view(z.shape[0], z.shape[1], 1, 1)
x_tilde = self.decoder(z)
return x_tilde
def latent(self, x, z_params):
n_batch = x.size(0)
# Split the encoded values to retrieve flow parameters
mu, sigma, flow_params = z_params
# Re-parametrize a Normal distribution
q = distrib.Normal(torch.zeros(mu.shape[1]), torch.ones(sigma.shape[1]))
# Obtain our first set of latent points
z_0 = (sigma * q.sample((n_batch, ))) + mu
# Update flows parameters
self.flow.set_parameters(flow_params)
# Complexify posterior with flows
z_k, list_ladj = self.flow(z_0)
# ln p(z_k)
log_p_zk = torch.sum(-0.5 * z_k * z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = torch.sum(-0.5 * (sigma.log() + (z_0 - mu) * (z_0 - mu) * sigma.reciprocal()), dim=1)
# ln q(z_0) - ln p(z_k)
logs = (log_q_z0 - log_p_zk).sum()
# Add log determinants
ladj = torch.cat(list_ladj, dim=1)
# ln q(z_0) - ln p(z_k) - sum[log det]
logs -= torch.sum(ladj)
return z_k, (logs / float(n_batch))
# ### Putting it all together
#
# Here we combine all these ideas (except for the MMD, which is not adequate as the flow definition already regularizes the latent space without the KL divergence) to perform a more advanced optimization of the dataset. Hence, we will rely on the complex encoder and decoder with gated convolutions, the multinomial loss and the normalizing flows in order to improve the overall quality of our reconstructions.
# Size of latent space
n_latent = 16
# Number of hidden units
n_hidden = 256
# Our MLP blocks
block_planar = [PlanarFlow]
# Create normalizing flow
flow = NormalizingFlow(dim=n_latent, blocks=block_planar, flow_length=16, density=distrib.MultivariateNormal(torch.zeros(n_latent), torch.eye(n_latent)))
# Rely on Bernoulli or multinomial
num_classes = 128
# Construct encoder and decoder
encoder, decoder = construct_encoder_decoder_complex(nin, n_hidden = n_hidden, n_latent = n_latent, n_classes = num_classes)
# Create VAE with planar flows
model_flow_p = VAENormalizingFlow(encoder, decoder, flow, n_hidden, n_latent)
# Create optimizer algorithm
optimizer = optim.Adam(model_flow_p.parameters(), lr=1e-3)
# Add learning rate scheduler
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.99995)
# Launch our optimization
losses_flow_param = train_vae(model_flow_p, optimizer, scheduler, train_loader, fixed_batch, model_name='flow_complex', epochs=200, flatten=False)
# *NB*: It seems that the multinomial version have a hard time converging. Although I only let this run for 200 epochs and only for a subsampling of 5000 examples, it might need more time, but this might also come from a mistake somewhere in my code ... If you spot something odd please let me know :)
# ### References
#
# <a id="reference1"></a>
# [1] Rezende, <NAME>, and <NAME>. "Variational inference with normalizing flows." _arXiv preprint arXiv:1505.05770_ (2015). [link](http://arxiv.org/pdf/1505.05770)
#
# [2] Kingma, <NAME>., <NAME>, and <NAME>. "Improving Variational Inference with Inverse Autoregressive Flow." _arXiv preprint arXiv:1606.04934_ (2016). [link](https://arxiv.org/abs/1606.04934)
#
# [3] <NAME>., & <NAME>. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114. (2013). [link](https://arxiv.org/pdf/1312.6114)
#
# [4] <NAME>., <NAME>., & <NAME>. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082. (2014). [link](https://arxiv.org/pdf/1401.4082)
#
# [5] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Pixelvae: A latent variable model for natural images. arXiv preprint arXiv:1611.05013. [link](https://arxiv.org/pdf/1611.05013)
#
# [6] <NAME>., & <NAME>. (2017). Neural discrete representation learning. In NIPS 2017 (pp. 6306-6315). [link](http://papers.nips.cc/paper/7210-neural-discrete-representation-learning.pdf)
#
# ### Inspirations and resources
#
# https://blog.evjang.com/2018/01/nf1.html
# https://github.com/ex4sperans/variational-inference-with-normalizing-flows
# https://akosiorek.github.io/ml/2018/04/03/norm_flows.html
# https://github.com/abdulfatir/normalizing-flows
# https://github.com/riannevdberg/sylvester-flows
| 02_variational_auto_encoders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this notebook, we visualise the data for both raw and normalised cases.
# + tags=[]
# direct to proper path
import os
import sys
module_path = os.path.abspath(os.path.join('../..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from collections import defaultdict
import math
import json
import xarray as xr
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import PairwiseKernel, DotProduct, RBF
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics import r2_score, mean_squared_error, make_scorer
from sklearn.model_selection import KFold
from codes.embedding import Embedding
from codes.environment import Rewards_env
from codes.ucb import GPUCB, Random
from codes.evaluations import evaluate, plot_eva
from codes.regression import *
from codes.kernels_for_GPK import *
from ipywidgets import IntProgress
from IPython.display import display
import warnings
# %matplotlib inline
from matplotlib import rcParams
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 10
rcParams['ytick.labelsize'] = 10
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# +
# generate valids names and path for plots
def valid_name(name):
return name.replace('_', ' ')
def valid_path(path):
return path.replace(' ', '_')
# +
# Path_raw = '../../data/Results_Microplate_normFalse_formatSeq_logTrue.csv'
# Path_normalised = '../../data/Results_Microplate_normTrue_plateRep_formatSeq_logTrue.csv'
norm_method = 'mean'
log_flag = 'True'
to_design_round = '_Round4'
Path_raw = '../../data/pipeline_data/Results_Microplate_partialTrue_normFalse_formatSeq.csv'
# normalisation over round
Path_normalised = '../../data/pipeline_data/Results_Microplate_partialTrue_normTrue_' + norm_method+ '_roundRep_formatSeq_log' + log_flag + to_design_round + '.csv'
# normalasation over all data
Path_RNF = '../../data/pipeline_data/Results_Microplate_partialTrue_normTrue_' + norm_method+ '_roundRep_formatSeq_log' + log_flag + to_design_round + '_RNFalse.csv'
df_raw = pd.read_csv(Path_raw)
df_normalised = pd.read_csv(Path_normalised)
df_RNF = pd.read_csv(Path_RNF)
# -
df_raw
df_normalised
# ## AVERAGE Label Histogram
# +
f, axes = plt.subplots(1, 2, figsize=(8, 4), sharex=False)
sns.distplot(df_raw['AVERAGE'], kde=False, color="b", ax=axes[0])
axes[0].set_title('Raw Average Label')
sns.distplot(df_normalised['AVERAGE'], kde=False, color="r", ax=axes[1])
axes[1].set_title('Normalised Average Label')
# -
# ## Replicates Distributions
#
# Sorted by the average labels. x-axis represents sequences index with increasing average labels.
# + tags=[]
# import matplotlib
# cmap = matplotlib.cm.get_cmap('Spectral')
# for plate_name in ['First_Plate', 'Second_Plate', 'Third_Plate']:
# df_raw_plate = df_raw[df_raw['Plate'] == plate_name]
# df_normalised_plate = df_normalised[df_normalised['Plate'] == plate_name]
# sorted_by_ave_df_raw = df_raw_plate.sort_values(by = ['AVERAGE'])
# sorted_by_ave_df_normalised = df_normalised_plate.sort_values(by = ['AVERAGE'])
# f, axes = plt.subplots(1, 2, figsize=(20, 6), sharex=False)
# for idx, i in enumerate(['Rep1', 'Rep2', 'Rep3', 'Rep4', 'Rep5', 'Rep6','Rep7', 'Rep8', 'Rep9']):
# # for idx, i in enumerate(['Rep4', 'Rep5']):
# sns.lineplot(range(len(sorted_by_ave_df_raw[i])), sorted_by_ave_df_raw[i], label = str(i), marker = '.', ax = axes[0], color = sns.color_palette("tab10")[idx])
# # print(sorted_by_ave_df_raw[sorted_by_ave_df_raw['Name'] == 'RBS_1by1_0'])
# # print(float(sorted_by_ave_df_raw[sorted_by_ave_df_raw['Name'] == 'RBS_1by1_0'][i]))
# axes[0].axhline(float(sorted_by_ave_df_raw[sorted_by_ave_df_raw['Name'] == 'RBS_1by1_0'][i]), color = sns.color_palette("tab10")[idx])
# sns.lineplot(range(len(sorted_by_ave_df_normalised[i])), sorted_by_ave_df_normalised[i], label = str(i), marker = '.', ax = axes[1], color = sns.color_palette("tab10")[idx])
# axes[1].axhline(float(df_normalised_plate[df_normalised_plate['Name'] == 'RBS_1by1_0'][i]), color = sns.color_palette("tab10")[idx])
# axes[0].set_title(valid_name(plate_name) + ' Raw Replicates')
# axes[1].set_title(valid_name(plate_name) + ' Normalised Replicates')
# plt.show()
# -
# Note the Z-score normalisation is in terms of each replicates, so if we sort each replicate and plot it, each replicates would like the similar, but note in this case, the x-axis does not represent the SAME sequence for each replicate.
# + tags=[]
for plate_name in ['First_Plate', 'Second_Plate', 'Third_Plate', 'Fourth_Plate', 'Fifth_Platee']:
df_raw_plate = df_raw[df_raw['Plate'] == plate_name]
df_normalised_plate = df_normalised[df_normalised['Plate'] == plate_name]
f, axes = plt.subplots(1, 2, figsize=(20, 6), sharex=False)
for i in ['Rep1', 'Rep2', 'Rep3', 'Rep4', 'Rep5', 'Rep6']:
sns.lineplot(range(len(df_raw_plate[i])), np.sort(df_raw_plate[i]), label = str(i), marker = '.', ax = axes[0])
sns.lineplot(range(len(df_normalised_plate[i])), np.sort(df_normalised_plate[i]), label = str(i), marker = '.', ax = axes[1])
axes[0].set_title(valid_name(plate_name) + ' Raw Replicates, sorted by each replicate')
axes[1].set_title(valid_name(plate_name) + ' Normalised Replicates, sorted by each replicate')
# -
# ## Violinplot
# + tags=[]
f, axes = plt.subplots(2, 1, figsize=(15,10), sharex=False)
sns.violinplot(x = 'Group', y = 'AVERAGE', data = df_raw, ax = axes[0])
sns.violinplot(x = 'Group', y = 'AVERAGE', data = df_normalised, ax = axes[1])
axes[0].set_title('Violin Plot with Raw Labels')
axes[1].set_title('Violin Plot with Normalised Labels')
# +
f, axes = plt.subplots(1, 2, figsize=(20, 6), sharex=False)
sns.swarmplot(x = 'Group', y = 'AVERAGE', data = df_raw, ax = axes[0], order=['Consensus', 'Reference', 'BPS-NC', 'BPS-C', 'UNI', 'PPM', 'Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])
sns.swarmplot(x = 'Group', y = 'AVERAGE', data = df_normalised, ax = axes[1], order=['Consensus', 'Reference', 'BPS-NC', 'BPS-C', 'UNI', 'PPM', 'Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])
axes[0].set(xlabel='Sequence design principles', ylabel='Translation Initiation Rate (TIR),\\ averaged over 6 technical replicates')
axes[1].set(xlabel='Sequence design principles', ylabel='Translation Initiation Rate (TIR),\\ averaged over 6 technical replicates')
axes[0].set_title('Swarmplot with Raw TIR Labels')
axes[1].set_title('Swarmplot with Normalised TIR Labels')
plt.savefig('swarmplots.pdf', bbox_inches='tight')
# -
df_raw_bandits = df_raw.loc[df_raw['Group'].isin(['Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])]
sns.histplot(data = df_raw_bandits, x = 'AVERAGE', hue = 'Group', kde=True)
plt.title('Raw')
df_raw_bandits.groupby('Group').mean()
df_normalised_bandits = df_normalised.loc[df_normalised['Group'].isin(['Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])]
sns.histplot(data = df_normalised_bandits, x = 'AVERAGE', hue = 'Group', kde=True)
plt.title('Mean normlaisation over each group')
df_normalised_bandits.groupby('Group').mean()
df_RNF = df_RNF.loc[df_RNF['Group'].isin(['Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])]
sns.histplot(data = df_RNF, x = 'AVERAGE', hue = 'Group', kde=True)
plt.title('Mean normlaisation over all data')
df_RNF.groupby('Group').mean()
df_normalised['Rep1'].mean()
# # How about using ratio as normalisation
#
# aka the the ration between TIR AVERAGE and each round's Reference RBS' mean
# +
# calcuate mean of the Reference seq in each plate
ref_mean_list = list(df_raw[df_raw['Group'] == 'Reference'].groupby(by = 'Round').mean()['AVERAGE'])
df_raw['ratio'] = df_raw['AVERAGE']/np.asarray(ref_mean_list)[df_raw['Round']]
# -
df_raw
# +
f, axes = plt.subplots(1, 2, figsize=(20, 6), sharex=False)
sns.swarmplot(x = 'Group', y = 'AVERAGE', data = df_raw, ax = axes[0], order=['Consensus', 'Reference', 'BPS-NC', 'BPS-C', 'UNI', 'PPM', 'Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])
sns.swarmplot(x = 'Group', y = 'ratio', data = df_raw, ax = axes[1], order=['Consensus', 'Reference', 'BPS-NC', 'BPS-C', 'UNI', 'PPM', 'Bandit-0', 'Bandit-1', 'Bandit-2', 'Bandit-3'])
axes[0].set(xlabel='Sequence design principles', ylabel='Translation Initiation Rate (TIR),\\ averaged over 6 technical replicates')
axes[1].set(xlabel='Sequence design principles', ylabel='Translation Initiation Rate (TIR),\\ averaged over 6 technical replicates')
axes[0].set_title('Swarmplot with Raw TIR Labels')
axes[1].set_title('Swarmplot with TIR Labels Ratio to Reference RBS')
plt.savefig('swarmplots.pdf', bbox_inches='tight')
# -
# ## AVERAGE VS. Predictions
df_bandit2 = df_normalised[df_normalised['Group'] == 'Bandit-1']
df_bandit2['Pred Mean']
# plt.scatter(df_bandit2['Rep1'], df_bandit2['Pred Mean'], label = 'rep1')
# plt.scatter(df_bandit2['Rep2'], df_bandit2['Pred Mean'], label = 'rep2')
# plt.scatter(df_bandit2['Rep3'], df_bandit2['Pred Mean'], label = 'rep3')
# plt.scatter(df_bandit2['Rep4'], df_bandit2['Pred Mean'], label = 'rep4')
# plt.scatter(df_bandit2['Rep5'], df_bandit2['Pred Mean'], label = 'rep5')
plt.scatter(df_bandit2['AVERAGE'], df_bandit2['Pred Mean'], label = 'AVERAGE')
plt.xlabel('Replciates')
plt.ylabel('Pred Mean')
plt.title('Round 1 Predictions vs. Labels')
plt.legend()
sorted_by_ave_df_bandit2 = df_bandit2.sort_values(by = ['AVERAGE'])
plt.plot(range(len(sorted_by_ave_df_bandit2['AVERAGE'])), sorted_by_ave_df_bandit2['AVERAGE'], label = 'AVERAGE', marker = '.')
plt.plot(range(len(sorted_by_ave_df_bandit2['AVERAGE'])), sorted_by_ave_df_bandit2['Pred Mean'], label = 'Pred Mean', marker = '.')
plt.fill_between(
range(len(sorted_by_ave_df_bandit2['AVERAGE'])),
sorted_by_ave_df_bandit2['Pred Mean'] + 2 * sorted_by_ave_df_bandit2['Pred Std'],
sorted_by_ave_df_bandit2['Pred Mean'] - 2 * sorted_by_ave_df_bandit2['Pred Std'],
alpha = 0.5
)
plt.legend()
plt.title('bandit2: Sorted by AVERAGE')
plt.xlabel('sequences')
plt.ylabel('label')
plt.show()
sorted_by_ave_df_bandit2 = df_bandit2.sort_values(by = ['Pred Mean'])
plt.plot(range(len(sorted_by_ave_df_bandit2['AVERAGE'])), sorted_by_ave_df_bandit2['AVERAGE'], label = 'AVERAGE', marker = '.')
plt.plot(range(len(sorted_by_ave_df_bandit2['AVERAGE'])), sorted_by_ave_df_bandit2['Pred Mean'], label = 'Pred Mean', marker = '.')
plt.fill_between(
range(len(sorted_by_ave_df_bandit2['AVERAGE'])),
sorted_by_ave_df_bandit2['Pred Mean'] + 2 * sorted_by_ave_df_bandit2['Pred Std'],
sorted_by_ave_df_bandit2['Pred Mean'] - 2 * sorted_by_ave_df_bandit2['Pred Std'],
alpha = 0.5
)
plt.legend()
plt.title('bandit2: Sorted by Pred Mean')
plt.xlabel('sequences')
plt.ylabel('label')
plt.show()
# ## Poor Prediction?
#
# From the above three prediction vs true label plots, the prediction looks pretty bad, in terms of
# - the predicted label looks random in the sorted AVERAGE plot.
# - the 95% confidence interval fails to cover (a lot of) true labels.
#
# Here are some reasons this happens:
# - if we look at the training set (round 1), although the highest (averaged) labels are around 2.5 (expect concensus sequence), our design on focus on the core part, so we should exclude the noncore group, then the best one is only around 1.3. Our highest prediction is around 1.2 (remember we stil have a little bit regression-to-mean). So it makes no sense we are able to predict some very high label (as the true labels show, to be around 3) given the training labels we got.
# - Round 1 and 0 are from different distribution/design area. If it is true that Round 1 from the area we have no information, then the question to ask is, whether do we want such design? Evidence:
# - From [result_analysis/Prediction_compare_with_baseline.ipynb], the prediction with training from both Round 0 and 1 looks good, while the prediction from Round 0 and test on Round 1 looks bad.
# - From [data/Clustering/] plots, the bandit2 group are not in the area where we have known information.
| notebook/result_analysis/Data_visualisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from Edge import *
from math import sqrt
from collections import defaultdict
class Digraph:
def __init__(self):
self.vertices = []
self.size = len(self.vertices)
self.edge_count = len(self.edges)
self.edges = []
self.neighbour = defaultdict(set)
def get_vertices(self):
return self.vertices
def get_edges(self):
if (self.edges == None):
raise ValueError("edge can't be zero or null")
else:
return self.edges
def add_vertices(self, vertices):
if (self.vertices is not None):
self.vertices = []
else:
raise IOError("Node list is empty")
def add_edges(self, edges):
if (self.edges is not None):
self.edges = []
else:
raise IOError("Edge list is empty")
def add_node(self,label,num=0):
for v in self.vertices:
if label == v.get_label():
return v
if label not in self.vertices:
self.vertices.append(label)
self.isolated.append(label)
self.size += 1
else:
raise DeprecationWarning("the Node already exists")
def add_edge(self, from_node, to_node, num=0):
if from_node in self.vertices & to_node in self.vertices:
E1 = Edge(from_node, to_node, num=num)
E2 = Edge(from_node, to_node, num=num)
if (from_node in self.isolated)&(to_node in self.isolated):
self.isolated.remove(from_node)
self.isolated.remove(to_node)
if (len(E1) is None or len(E1)==0)&(len(E2) is None or len(E2)==0):
raise ValueError("the length can't be None or 0")
def dist(p1, p2):
return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
def isolated(self, maxD=1e309):
"""
to locate the unvisited nodes to isolate.
"""
visited = self.vertices
preceding_node = {}
T_distance = defaultdict(lambda: 1e309)
self.isolated = isolated
while visited:
current = visited.intersection(T_dist.keys())
if not current: break
min_dist_node = min(current, key=T_distance.get)
visited.remove(min_dist_node)
for neighbour in self.neigbour[min_dist_node]:
d = T_distance[min_dist_node] + self.dist[min_dist_node, neighbour]
if T_distance[neigbour] > d and maxD >= d:
T_distance[neighbour] = d
preceding_node[neighbour] = min_dist_node
isolated = preceding_node[neighbour]
return distance, preceding_node, isolated
def Adj_matrix(self):
encoding = enumerate([v for v in self.vertices])
dim = len(self.vertices)
adj = [[0] * dim] * dim
for key, value in encoding:
for i in range(dim):
pass
def to_dot_format(self, dotfile):
dot_file = open(dotfile, "w")
dot_file.write(Digraph {"\n"})
for i in self.edges:
dot_file.writelines(i.get_from_node_list() + "->" + i.get_to_node_list() + "\n")
for j in self.isolated:
dot_file.writelines(j + "\n")
dot_file.write("}")
dot_file.close()
def is_connected(self):
return (len(self.isolated)==0)
# -
| digraph/source/DiGraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## MODIS rolling 16-day max NDVI composite
#
# `ImageCollection.map_window` lets you slide a "window" along an ImageCollection, image by image. Here, we use it to composite the previous 16 days of MODIS imagery, an effective way of correcting for clouds when looking at NDVI.
import descarteslabs.workflows as wf
wf.map.center = [36.50963615733049, -105.89996337890626]
wf.map.zoom = 8
# + jupyter={"outputs_hidden": true}
wf.map
# -
modis = wf.ImageCollection.from_id('modis:09:v2', start_datetime="2019-01-01", end_datetime="2019-08-01")
ndvi = wf.normalized_difference(modis.pick_bands('nir'), modis.pick_bands('red'))
# +
ndvi_16 = (
ndvi
.groupby(lambda img: img.properties['date'] // wf.Timedelta(days=1))
.max(axis="images")
# ^ first, flatten multiple images from the same day (at scene boundaries) into 1,
# so we're always getting exactly 16 days
.map_window(
lambda back, img, fwd: back.max(axis="images"),
back=16
)
# ^ max-composite the previous 16 days
)
# ndvi_16 is an ImageCollection; each Image is one of our 16-day composites
# -
ndvi_16[0].visualize('jan NDVI', scales=[[0, 1]], colormap="viridis")
# jan 1 - 16 composite
ndvi_16[-1].visualize('july NDVI', scales=[[0, 1]], colormap="viridis")
# july 16 - aug 1 composite
| examples/16-day-max-ndvi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Replication of the RKI model
#
# The RKI published a [model](https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Modellierung_Deutschland.pdf) which features predictions over infections for three scenarios.
#
# - A base scenario with a basic reproduction rate of 2.
# - A scenario in which the reproduction rate fluctuates with the seasons.
# - A scenario in which a third of the population is immune to the virus.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# ## Base scenario
#
# The base scenario assumes that we start with 1,000 infectious people among a population of 83,000,000. Among others, they parametrize their model with a $R_0 = 2$. $R_0$ is defined as
#
# $$
# R_0 = \kappa * D * q
# $$
#
# where $\kappa$ is the number of contacts an infected has, $D$ is the average number of days people are infectious, and $q$ is the probability of infecting another person on contact.
#
# Two of the parameters in this equation are exogenously determined. $D$ is equal to ten days and $R_0$ is either equal to 2 without pre-existing immunity in the population. If a third of the population is immune, $R_0$ is set to 3 to keep the effective reproduction rate the same in the beginning.
#
# To summarize, their whole model depends on this relationship. We can set either the infection rate and get the correspondent number of contacts or vice versa.
#
# Considering sid's current structure and anticipating seasonal fluctations in $R_0$, we set the infection rate as we have more ability to vary the number of contacts.
#
# The following function computes the number of contacts dependent on all other parameters.
def infection_rate_to_number_of_contacts(
infection_rate=0.05,
r_0=2,
n_days_infectious=10,
):
return r_0 / n_days_infectious / infection_rate
# If we set the infection rate to 5%, each individual has 4 contacts which we can set in sid's contact model.
infection_rate_to_number_of_contacts()
# ## Seasonality in the RKI model
#
# Seasonality in the [RKI model](https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Modellierung_Deutschland.pdf) means that $R_0$ fluctuates from its peak at the end of the year to its bottom at the start of July. The RKI differentiates between
#
# - weak seasonality with a peak at 2 and a low at 1.4
# - and strong seasonality with a peak at 2 and a low at 0.67.
#
# In sid, we can implement seasonality by keeping all parameters the same and varying the number of contacts in the contact model depending on the day of the year.
#
# First, we need a function which receives the number of a day in the year and returns the correct $R_0$. The sine function is completely determined by its peak and bottom value.
def dayofyear_to_r_0(dayofyear, low, high):
normalized_dayofyear = dayofyear / 365 * 2 * np.pi
a = (high + low) / 2
b = (high - low) / 2
c = 0.5 * np.pi
return a + b * np.sin(normalized_dayofyear + c)
s = pd.date_range(start="2020-03-08", end="2021-03-07")
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(s, dayofyear_to_r_0(s.dayofyear, low=1.4, high=2), label="Weak Seasonality")
ax.plot(s, dayofyear_to_r_0(s.dayofyear, low=0.67, high=2), label="Strong Seasonality")
ax.set_ylabel("$R_0$")
ax.legend();
# We already have the function which maps from $R_0$ to the number of contacts while keeping the infection rate at 5% and the duration of being infectious at 10 days.
# +
n_contacts_weak_seasonality = infection_rate_to_number_of_contacts(
r_0=dayofyear_to_r_0(s.dayofyear, low=1.4, high=2)
)
n_contacts_strong_seasonality = infection_rate_to_number_of_contacts(
r_0=dayofyear_to_r_0(s.dayofyear, low=0.67, high=2)
)
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(s, n_contacts_weak_seasonality, label="Weak Seasonality")
ax.plot(s, n_contacts_strong_seasonality, label="Strong Seasonality")
ax.set_ylabel("Number of Contacts")
ax.legend();
| docs/source/rki-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# +
año=np.arange(1790,2000,10)
t=np.arange(0,210,10)
real=np.array([3.9, 5.3, 7.2, 9.6, 12, 17, 23, 31, 38, 50, 62, 75, 91, 105, 122, 131, 151, 179, 203, 226, 249])
Pt=3.9*np.exp(0.03067*t)
plt.plot(t,real)
plt.plot(t,Pt)
_=plt.xticks(t,año,rotation='vertical')
plt.margins(0.05)
plt.xlabel("t")
plt.ylabel("P(t)")
#La gráfica muestra el real vs el sensado
# +
from scipy.integrate import odeint
def dP_dt (P,t):
dP=2*P
return dP
t=np.arange(0,3,0.1)
x=odeint(dP_dt,10,t)
plt.plot (t,x)
# +
from scipy.integrate import odeint
def dP_dt (P,t):
dP=0.03067*P
return dP
t=np.arange(0,3,0.1)
x=odeint(dP_dt,10,t)
plt.plot (t,x)
plt.plot(t,x,real)
#Revisar
# +
from scipy.integrate import odeint
def dP_dt (P,t):
dP=0.03067*(1-P/N)*P
return dP
t=np.arange(0,3,0.1)
x=odeint(dP_dt,10,t)
plt.plot (t,x)
# -
# Hay dos métodos analíticos: Odeint ó Euler
# Hacer ejercicio del modelo depredador presa
# --
#
| Apuntes de clases/a08_Clase 18 de febrero_Modelado.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pickle
import dill
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.style.use('seaborn-white')
with open('clans.pkl', 'rb') as df:
clans = dill.loads(pickle.load(df))
times = [0, 1000, -1]
# +
time = 0
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(-75, 125)
plt.ylim(-50, 150)
for c in clans:
circ = plt.Circle((c.history['x'][time], c.history['y'][time]), c.history['radius'][time], color=c.color)
ax.add_patch(circ)
# +
time = 500
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(-75, 125)
plt.ylim(-50, 150)
for c in clans:
circ = plt.Circle((c.history['x'][time], c.history['y'][time]), c.history['radius'][time], color=c.color)
ax.add_patch(circ)
# +
time = 1000
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(-75, 125)
plt.ylim(-50, 150)
for c in clans:
circ = plt.Circle((c.history['x'][time], c.history['y'][time]), c.history['radius'][time], color=c.color)
ax.add_patch(circ)
# +
time = -1
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(-75, 125)
plt.ylim(-50, 150)
for c in clans:
circ = plt.Circle((c.history['x'][time], c.history['y'][time]), c.history['radius'][time], color=c.color)
ax.add_patch(circ)
# +
plt.gcf().set_size_inches(7, 5)
sns.despine()
plt.xlabel('Time')
plt.ylabel('Population')
x = np.linspace(0, 700, len(clans[0].history['m']))
for n, c in enumerate(clans):
plt.plot(x, np.add(c.history['m'],c.history['f']), label=f'Clan {n+1}', c=c.color)
plt.legend()
# +
plt.gcf().set_size_inches(7, 5)
sns.despine()
plt.xlabel('Time')
plt.ylabel('Gender Ratio')
x = np.linspace(0, 700, len(clans[0].history['m']))
for n, c in enumerate(clans):
plt.plot(x, c.history['s'], label=f'Clan {n+1}', c=c.color)
plt.legend()
| paper_figs_mult.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/archananair17/Python-Class-1/blob/main/Salary_Tax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="OOFQT3DcSM3R" outputId="b1499ab2-8a94-42be-a25e-7d2ac6e1305e"
x=int(input("Enter a Basic Salary:"))
sal=x-250000
if sal > 250000 and sal < 500000 :
print("tax ",sal*5/100)
elif sal > 500000 and sal < 750000 :
print("tax ",x*10/100)
elif sal > 750000 and sal < 1000000 :
print("tax ",x*20/100)
elif sal > 1000000 :
print("tax ",sal*30/100)
# + colab={"base_uri": "https://localhost:8080/"} id="daE7digQTzRZ" outputId="626af420-3b7d-4d1d-c48a-67b0f72db718"
y=int(input("Enter the house rent"))
hra=y-250000
if hra > 250000 and hra < 500000 :
t=hra*5/100
print("tax ",t)
elif hra > 500000 and hra < 750000 :
print("tax ",hra*10/100)
elif hra > 750000 and hra < 1000000 :
print("tax ",hra*20/100)
# + colab={"base_uri": "https://localhost:8080/"} id="peIqBZPuY0EM" outputId="497db332-0d11-4c12-c3be-79891e1d59f9"
z=int(input("Enter special allowance"))
spa=z-250000
if spa > 250000 and spa < 500000 :
t=spa*5/100
print("tax ",t)
elif spa > 500000 and spa < 750000 :
print("tax ",spa*10/100)
elif spa > 750000 and spa < 1000000 :
print("tax ",spa*20/100)
# + colab={"base_uri": "https://localhost:8080/"} id="3neBCkGZZKx6" outputId="4e47b29b-045c-452f-8683-452f4fa05d98"
n=int(input("Enter LTA"))
lta=n-250000
if lta > 250000 and lta < 500000 :
print("You are taxable")
t=lta*5/100
print("tax ",t)
elif lta > 500000 and lta < 750000 :
print("tax ",lta*10/100)
elif lta > 750000 and lta < 1000000 :
print("tax ",lta*20/100)
# + id="1q5hLg48ZsH9"
| Salary_Tax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Master Data Science for Business - Data Science Consulting - Session 2
#
# # Notebook 3:
#
# # Web Scraping with Scrapy: Getting reviews from TripAdvisor
#
# Now you know how to get data from one review, we want to automate the spider so it will crawl through all pages of reviews, ending with a full spider able to scrape every reviews of the selected parc. You will modify here the parse function since this is where you tell the spider to get the links and to follow them. <br>
# ## 1. Importing packages
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
import sys
from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor
import json
import logging
import pandas as pd
# ## 2. Some class and functions
# +
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
class HotelreviewsItem(scrapy.Item):
# define the fields for your item here like:
rating = scrapy.Field()
review = scrapy.Field()
title = scrapy.Field()
trip_date = scrapy.Field()
trip_type = scrapy.Field()
published_date = scrapy.Field()
hotel_type = scrapy.Field()
hotel_name = scrapy.Field()
price_range = scrapy.Field()
reviewer_id = scrapy.Field()
review_language = scrapy.Field()
# -
def user_info_splitter(raw_user_info):
"""
:param raw_user_info:
:return:
"""
user_info = {}
splited_info = raw_user_info.split()
for element in splited_info:
converted_element = get_convertible_elements_as_dic(element)
if converted_element:
user_info[converted_element[0]] = converted_element[1]
return user_info
# ## 2. Creating the JSon pipeline
#JSon pipeline, you can rename the "trust.jl" to the name of your choice
class JsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open('tripadvisor2.jl', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
# ## 3. Spider
#
#
# When you go on a TripAdvisor page, you will have 5 reviews per page. Reviews are not fully displayed on the page, so you have to open them (i.e follow the link of the review to tell Scrapy to scrape this page) to scrape them. <br>
# This means we will use 2 parsing functions: <br>
# -The first one will go on the page of the parc, and get the links of the reviews, as well as the links of all the pages. <br>
# -The second one will go on each page of each reviews and scrape them using the parse_item() method. For this one, you will use the function you wrote within Notebook 2. <br>
# <b>To Do</b>: Complete the following code, to scrape all the reviews of one parc.
class MySpider(CrawlSpider):
name = 'BasicSpider'
domain_url = "https://www.tripadvisor.com"
# allowed_domains = ["https://www.tripadvisor.com"]
start_urls = [
"https://www.tripadvisor.fr/Hotel_Review-g5555792-d7107948-Reviews-Center_Parcs_Le_Bois_aux_Daims-Les_Trois_Moutiers_Vienne_Nouvelle_Aquitaine.html"]
#Custom settings to modify settings usually found in the settings.py file
custom_settings = {
'LOG_LEVEL': logging.WARNING,
'ITEM_PIPELINES': {'__main__.JsonWriterPipeline': 1}, # Used for pipeline 1
'FEED_FORMAT':'json', # Used for pipeline 2
'FEED_URI': 'tripadvisor2.json' # Used for pipeline 2
}
def parse(self, response):
all_review_pages = response.xpath(
"//a[contains(@class,'pageNum') and contains(@class,'last')]/@data-offset").extract()
next_reviews_page_url = #YOU WANT HERE THE LINK OF THE NEXT BUTTON
next_page_number = eval(#YOU WANT HERE THE NUMBER OF THE NEXT PAGE)
#Since scraping can take some time, we limit here to 10 the number of pages of reviews we want to scrape
if next_page_number < 10:
yield scrapy.Request(next_reviews_page_url, callback=self.parse)
#The following part gets reviews' links on a page
review_urls = []
for partial_review_url in response.xpath("XPATH TO GET REVIEW'S URL").extract():
review_url = response.urljoin(partial_review_url)
if review_url not in review_urls:
review_urls.append(review_url)
yield scrapy.Request(review_url, callback=self.parse_review_page)
def parse_review_page(self, response):
###COPY AND PASTE HERE YOUR PARSE() METHOD FROM NOTEBOOK 2
# ## 4. Crawling
# +
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(MySpider)
process.start()
# -
# ## 5. Importing and reading data scraped
#
# If you've succeeded, you should see here a dataframe with around 40 entries corresponding to the reviews of the Center Parc you scraped. Congratulations !
dfjson = pd.read_json('tripadvisor2.json')
#Previewing DF
dfjson.head()
dfjson.info()
| Day2/.ipynb_checkpoints/St_Notebook 3 - TripAdvisor 2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Hello World for nbextension
# ### Can we create a simple javascript and have it automatically loaded by jupyter browser?
#
#
# %mkdir nbextensions
# The name of the folder `nbextensions` is important. All custom extensions will be looked up here.
#
#
# Now lets create a simple hello world javascript inside this folder
#
# +
# %%writefile nbextensions/helloworld.js
define(function(){
function load_ipython_extension(){
console.info('HELLO WORLD!!!');
}
return {
load_ipython_extension: load_ipython_extension
};
});
# -
# # Installing the extension
# %%system
jupyter nbextension install nbextensions/helloworld.js --user
# # Enabling the extension
# %%system
jupyter nbextension enable helloworld.js --user
# This enables the nbextension is the user config file
# %system cat ~/.jupyter/nbconfig/notebook.json
# ### Open Dev Console in the browser and verify you see the text
# `HELLO WORLD!!!`
| Hello World nbextensions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
import operator
import warnings
import pandas as pd
import seaborn as sns
from string import digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from wordcloud import WordCloud
warnings.filterwarnings('ignore')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,f1_score,confusion_matrix,precision_score,recall_score
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
#read_dataset
dataframe=pd.read_csv("data/spam.csv",encoding='latin-1')
len(dataframe)
dataframe.head()
# +
#v1 -> lablel : ham=0 spam=1
#v2 -> Message : text message
dataframe=dataframe[["v2","v1"]]
dataframe.columns=["Message","Label"]
#binary encode
dataframe["Label"]=dataframe["Label"].apply(lambda x: 1 if x=="spam" else 0)
#dataframe
dataframe.head()
# -
dataframe["Message Length"]=dataframe["Message"].apply(lambda x: len(x))
# Average length of spam message & ham message:
average_length=dataframe[["Message Length","Label"]].groupby(["Label"],as_index=False).median()
average_length["Label"]=average_length["Label"].map({0:"Non-Spam",1:"Spam"})
average_length.columns=["Label","Median Message Length"]
average_length["Median Message Length"]=average_length["Median Message Length"].round(2)
average_length
# +
# Contains digit
dataframe["Contain Digit"]=dataframe["Message"].apply(lambda x: True if any(i.isdigit() for i in x) else False)
contain_digit=dataframe[["Contain Digit","Label"]].groupby(["Label"],as_index=False).sum()
contain_digit["Label"]=contain_digit["Label"].map({0:"Non-Spam",1:"Spam"})
total={"Spam":747,"Non-Spam": 4825}
def composition(x):
return str(round((x["Contain Digit"]/total[x["Label"]])*100,2))+"%"
contain_digit["Contain Digit"]=contain_digit[["Label","Contain Digit"]].apply(lambda x: composition(x), axis=1)
contain_digit.columns=["Label","Contains Digit Composition (%)"]
contain_digit
average_length.set_index("Label").join(contain_digit.set_index("Label"),on="Label",how="inner").reset_index()
# -
average_length
def process_message_text(dataframe):
#lower text message
dataframe["Clean_Message_Text"]=dataframe["Message"].apply(lambda x: x.lower() )
# Punctuation removal
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: re.sub(r'[^\w\s]', ' ', x) )
#remove digits
remove_digits = str.maketrans('', '', digits)
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: x.translate(remove_digits))
#tokenize words
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: word_tokenize(x))
#remove stop words
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: [y for y in x if y not in stopwords.words('english')])
#remove single letter
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: [y for y in x if len(y)!=1])
#restructure into sentence
dataframe["Clean_Message_Text"]=dataframe["Clean_Message_Text"].apply(lambda x: (' ').join(x))
return dataframe[["Clean_Message_Text","Label"]]
# +
clean_dataframe=process_message_text(dataframe)
#spam data
spam_dataframe=clean_dataframe[clean_dataframe["Label"]==1] # 747 data points
#non spam data
nonspam_dataframe=clean_dataframe[clean_dataframe["Label"]==0] # 4825 data points
# -
#spam dataframe
spam_words_distribution=pd.DataFrame(spam_dataframe["Clean_Message_Text"].str.split(expand=True).stack().value_counts()).reset_index()
spam_words_distribution.columns=["Words","Frequency"]
sns.barplot(x="Words",y="Frequency",data=spam_words_distribution[:11],palette="Greens")
plt.title("High Frequency Spam Words")
plt.show()
#nonspam dataframe
nonspam_words_distribution=pd.DataFrame(nonspam_dataframe["Clean_Message_Text"].str.split(expand=True).stack().value_counts()).reset_index()
nonspam_words_distribution.columns=["Words","Frequency"]
sns.barplot(x="Words",y="Frequency",data=nonspam_words_distribution[:11],palette="Greens")
plt.title("High Frequency Non-Spam Words")
plt.show()
spam_words = ' '.join(list(spam_dataframe['Clean_Message_Text']))
spam_word_cloud = WordCloud(width = 512,height = 512,background_color='white').generate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'white')
plt.imshow(spam_word_cloud)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.title("Spam Messages WordCloud", fontsize=20)
plt.show()
#non spam word cloud
spam_words = ' '.join(list(nonspam_dataframe['Clean_Message_Text']))
spam_word_cloud = WordCloud(width = 512,height = 512,background_color='white').generate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'white')
plt.imshow(spam_word_cloud)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.title("Non-Spam Messages WordCloud", fontsize=20)
plt.show()
# +
#spam data
spam_dataframe=dataframe[dataframe["Label"]==1] # 747 data points
#non spam data
nonspam_dataframe=dataframe[dataframe["Label"]==0] # 4825 data points
# -
plt.figure(figsize=(5,5))
sns.boxplot(x="Label", y="Message Length",hue="Label",data=spam_dataframe) #box plot
plt.title("Message Length Distribution")
plt.xlabel("Spam")
plt.legend().set_visible(False)
plt.ylabel("Message Length")
plt.plot()
plt.figure(figsize=(5,5))
sns.boxplot(x="Label", y="Message Length",hue="Label",data=nonspam_dataframe) #box plot
plt.title("Message Length Distribution")
plt.xlabel("Non-Spam")
plt.legend().set_visible(False)
plt.ylabel("Message Length")
plt.plot()
def models(X_train, X_test, y_train, y_test,model):
if model=="LR":
print("Logistic Regression Model")
ml_model=LogisticRegression(random_state=42,solver='liblinear',n_jobs=-1)
ml_model.fit(X_train,y_train)
print("Training Accuracy: ",round(f1_score(ml_model.predict(X_train),y_train),3))
print("Test Accuracy: ",round(f1_score(ml_model.predict(X_test),y_test),3))
return [round(f1_score(ml_model.predict(X_train),y_train),3),round(f1_score(ml_model.predict(X_test),y_test),3)]
if model=="NB":
print("Naive Bayes")
ml_model=GaussianNB()
ml_model.fit(X_train,y_train)
print("Training Accuracy: ",round(f1_score(ml_model.predict(X_train),y_train),3))
print("Test Accuracy: ",round(f1_score(ml_model.predict(X_test),y_test),3))
return [round(f1_score(ml_model.predict(X_train),y_train),3),round(f1_score(ml_model.predict(X_test),y_test),3)]
def train_test(dataframe_matrix,labels,size):
X_train, X_test, y_train, y_test = train_test_split(dataframe_matrix,labels, test_size=size,random_state=42)
return X_train, X_test, y_train, y_test
# +
def method(dataframe,sel_method="BagOfWords"):
if sel_method=="BagOfWords":
print("Method: Bag of Words")
vectorizer = CountVectorizer(token_pattern=r'\b\w+\b')
dataframe_matrix = vectorizer.fit_transform(dataframe["Clean_Message_Text"])
return dataframe_matrix.toarray(),dataframe["Label"]
if sel_method=="TF-IDF":
print("Method: TF-IDF")
vectorizer = TfidfVectorizer(analyzer='word', ngram_range = (1,1))
dataframe_matrix = vectorizer.fit_transform(dataframe['Clean_Message_Text'])
return dataframe_matrix.toarray(),dataframe["Label"]
# +
model_performance=[]
dataframe_matrix,labels=method(clean_dataframe)
X_train, X_test, y_train, y_test=train_test(dataframe_matrix,labels,0.30)
model_performance.append(["BOW+LR"]+models(X_train, X_test, y_train,y_test,model="LR"))
model_performance.append(["BOW+NB"]+models(X_train, X_test, y_train,y_test,model="NB"))
dataframe_matrix,labels=method(clean_dataframe,sel_method="TF-IDF")
X_train, X_test, y_train, y_test=train_test(dataframe_matrix,labels,0.30)
model_performance.append(["TF-IDF+LR"]+models(X_train, X_test, y_train,y_test,model="LR"))
model_performance.append(["TF-IDF+NB"]+models(X_train, X_test, y_train,y_test,model="NB"))
# -
model_performance=pd.DataFrame(model_performance,columns=["Approach","Train F1 score","Test F1 score"])
model_performance
# +
plt.figure(figsize=(5,5))
bar_plot=sns.barplot(x="Approach",y="Train F1 score",data=model_performance,palette=["#7DA4A1","#ABCFCF","#C6DCE2","#DBF4F8"])
plt.title("Model Performance: Train Data")
for index, row in model_performance.iterrows():
bar_plot.text(index, row["Train F1 score"],row["Train F1 score"], color='black', ha="center")
plt.show()
# +
plt.figure(figsize=(5,5))
bar_plot=sns.barplot(x="Approach",y="Test F1 score",data=model_performance,palette=["#7DA4A1","#ABCFCF","#C6DCE2","#DBF4F8"])
plt.title("Model Performance: Test Data")
for index, row in model_performance.iterrows():
bar_plot.text(index, row["Test F1 score"],row["Test F1 score"], color='black', ha="center")
plt.show()
# -
#logistic regression performs well both on test and train data
vectorizer = CountVectorizer(token_pattern=r'\b\w+\b')
dataframe_matrix = vectorizer.fit_transform(dataframe["Clean_Message_Text"])
dataframe_matrix=dataframe_matrix.toarray()
labels=dataframe["Label"]
X_train, X_test, y_train, y_test=train_test(dataframe_matrix,labels,0.30)
ml_model=LogisticRegression(random_state=42,solver='liblinear',n_jobs=-1)
ml_model.fit(X_train,y_train)
print("Test Accuracy: ",round(f1_score(ml_model.predict(X_test),y_test),3))
def plot_heatmap(cm,title):
df_cm2 = pd.DataFrame(cm, index = ['Non Spam', 'spam'])
df_cm2.columns=['Non Spam','spam']
ax = plt.axes()
sns.heatmap(df_cm2, annot=True, fmt="d", linewidths=.5,ax=ax,cmap="Greens")
ax.set_title(title)
plt.show()
plot_heatmap(confusion_matrix(ml_model.predict(X_test),y_test),"Confusion Matrix")
print("Precision Score: ",precision_score(ml_model.predict(X_test),y_test))
print("Recall Score: ",recall_score(ml_model.predict(X_test),y_test))
# +
print("F1 Score: ",f1_score(ml_model.predict(X_test),y_test))
# -
#Model Coefficients
model_coefficient=pd.DataFrame(ml_model.coef_,columns=[x[0] for x in sorted(vectorizer.vocabulary_.items(), key=operator.itemgetter(1))]).transpose().sort_values(0,ascending=False).reset_index()
model_coefficient.columns=["Word","Model Coeff"]
model_coefficient.head(30)
| Spam Filter using ML Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Monte Carlo and Jet Tutorial
#
# By <NAME>
import pandas
import matplotlib.pyplot as plt
import numpy as np
import random
import math
# ### 3 Monte Carlo Simulation Basics
#
# generate random numbers between 5 and 15 and create a histogram
dist = np.array([random.random() for i in range(0,1000)])
modified_dist = np.add(np.multiply(dist,10), 5)
fig, axs = plt.subplots(2, 1, tight_layout=True)
axs[0].hist(dist, bins=50)
axs[0].set_title("Uniform Dist from 0 to 1")
axs[1].hist(modified_dist, bins=50)
axs[1].set_title("Uniform Dist from 5 to 15")
plt.show()
# The uniform distribution ranging from 5 to 15 has a mean of 10 and 5 times the variance of the distribution ranging from 0 to 1
# #### 3.2 Accept-Reject Monte Carlo
#
# generate random numbers distributed according to a Gaussian distribution with a mean $\mu$ of 5 and a width $\sigma$ of 2 in the range of \[0,10\]
def gaussian_pdf(mu, sigma, n):
pdf = np.array([math.exp((-1/2)((random.random() - mu)/sigma)^2) for i in range(0,n)])
return pdf
pdf =
plt.hist(gaussian_pdf()
| Monte Carlo and Jet Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import glob
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
from builtins import any
class CrystalBall:
def __init__(self, list_of_csvs:list, csvname_to_colnames_list:dict, csvname_to_IDs:dict, csvname_to_nonIDs:dict, all_IDs:list, all_nonIDs:list, csvname_to_one_ID:list):
# get list of all files in current directory that end in .csv
self.list_of_csvs = list_of_csvs
# create dictionary where csvname maps to colnames
self.csvname_to_colnames_list = csvname_to_colnames_list
# create dictionary where csvname maps to colnames that have the substring "ID"
self.csvname_to_IDs = csvname_to_IDs
# create dictionary where csvname maps to colnames that do not have the substring "ID"
self.csvname_to_nonIDs = csvname_to_nonIDs
# create list of only unique IDs
self.all_IDs = all_IDs
# create list of only unique nonIDs
self.all_nonIDs = all_nonIDs
# create list of all column names (IDs + nonIDs)
self.all_colnames = list(all_IDs.union(all_nonIDs))
# create dictionary that maps out relationship, one csvname to one ID
self.csvname_to_one_ID = csvname_to_one_ID
@classmethod
def run(self, rel_dir):
""" Initialize the Crystal Ball object for a given directory that contains the CSVs.
Parameters
----------
rel_dir : str
- A string that contains the relative directory, which contains the CSVs to analyze.
Returns
--------
CrystalBall
- CrystalBall that has all class variables initialized by this run script.
Examples
--------
.. code-block:: python
relative_directory = './folder1/folder2'
crystalBall = CrystalBall.run(relative_directory)
"""
rel_dir = rel_dir + '/*.csv'
list_of_csvs = sorted(glob.glob(rel_dir))
csvname_to_colnames_list = {}
csvname_to_IDs = {}
csvname_to_nonIDs = {}
all_IDs = set()
all_nonIDs = set()
csvname_to_one_ID = []
for csv_name in list_of_csvs:
with open(csv_name, "rt") as f:
reader = csv.reader(f)
try:
col_names = next(reader)
csvname_to_colnames_list[csv_name] = col_names
ids = []
non_ids = []
for col_name in col_names:
if 'ID' in col_name or 'Id' in col_name:
csvname_to_one_ID.append([os.path.split(csv_name)[1], col_name])
ids.append(col_name)
else:
non_ids.append(col_name)
csvname_to_IDs[csv_name] = ids
csvname_to_nonIDs[csv_name] = non_ids
all_IDs.update(ids)
all_nonIDs.update(non_ids)
continue
except StopIteration:
continue
except:
continue
return CrystalBall(list_of_csvs, csvname_to_colnames_list, csvname_to_IDs, csvname_to_nonIDs, all_IDs, all_nonIDs, csvname_to_one_ID)
def contains(self, keywords: list, all_colnames: list=None) -> list:
""" Check if keywords exist in all_colnames.
- Determine whether a keyword (substring) exists in a given list of column names (strings).
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
all_colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
-------
list
- Each index corresponds to a keyword.
- For each index, True if substring exists in list of strings, otherwise False.
Examples
--------
>>> colnames = ['id', 'name', 'title']
>>> cb.contains(['name'], colnames)
[True]
>>> cb.contains(['Name'], colnames)
[False]
>>> cb.contains(['name', 'Name'], colnames)
[True, False]
"""
if all_colnames is None:
return [any(keyword in colname for colname in self.all_colnames) for keyword in keywords]
else:
return [any(keyword in colname for colname in all_colnames) for keyword in keywords]
def featureSearch(self, keywords: list, all_colnames: list=None, mode: str='UNION') -> list:
""" Find the columns that contain the keywords.
- Find features (column names) that contain the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
--------
list[str]
- List will contain all features (column names) that contains one/all substrings found in keywords.
- List will be sorted in alphabetical order.
Examples
--------
>>> colnames = ['id', 'name', 'nameType', 'subSpeciesName', 'title']
>>> cb.featureSearch(['name'], colnames)
['name', 'nameType']
>>> cb.featureSearch(['Name'], colnames)
['subSpeciesName']
>>> cb.featureSearch(['name', 'Name'], colnames)
['name', 'nameType', 'subSpeciesName']
"""
##implement INTERSECTION mode later
def search(keywords, colnames):
suggested_colnames = set()
for colname in colnames:
for keyword in keywords:
if keyword in colname:
suggested_colnames.add(colname)
return sorted(list(suggested_colnames))
if type(keywords) is not list:
raise Exception('keywords argument expects a list')
if mode is 'UNION':
if all_colnames is None:
return search(keywords, self.all_colnames)
else:
return search(keywords, all_colnames)
elif mode is "INTERSECTION":
print('to implement later')
def tableSearch(self, keywords, csvname_to_colnames_list=None, mode='UNION'):
""" Find the tables that contain the keywords.
- Find tables that contain column names which have the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
csvname_to_colnames_list : dict[str] = list
- Dictionary that maps a string (table name) to a list of column names it contains.
- If no argument is provided, this function will use the dictionary generated when the run method was called.
mode : str
- If mode is UNION, then return all tables that contain at least one keyword.
- If mode is INTERSECTION, then return all tables that contain all the keywords.
Returns
--------
list[str]
- List will contain all tables that contain a match with keywords.
- List will be sorted in alphabetical order.
Examples
--------
>>> csvname_to_colnames_list = {'table1': ['colName1', 'colName2'], 'table2':['colName3', 'colName4']}
>>> cb.tableSearch(['colName1'], csvname_to_colnames_list)
['table1']
>>> cb.tableSearch(['colName3'], csvname_to_colnames_list)
['table2']
>>> cb.tableSearch(['colName1', 'colName2'], csvname_to_colnames_list)
['table1', 'table2']
"""
def columnNamesContainKeyword(keyword, colname_list):
return any(keyword in colname for colname in colname_list)
if mode is 'UNION':
if csvname_to_colnames_list is None:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in self.csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in self.csvname_to_colnames_list]))
else:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in csvname_to_colnames_list]))
elif mode is 'INTERSECTION':
csv_matches = []
if csvname_to_colnames_list is None:
for csvname in self.csvname_to_colnames_list:
keyword_checklist = []
for keyword in keywords:
keyword_checklist.append(columnNamesContainKeyword(keyword, self.csvname_to_colnames_list[csvname]))
if False not in keyword_checklist:
csv_matches.append(csvname)
return sorted(csv_matches)
else:
print("implement later")
def openTable(self, rel_dir, indices=[0], encoding='utf-8'):
""" Open the csv that is referenced by the given relative directory.
Parameters
----------
rel_dir : str
- A path to the table that is relative to where the user is running Crystal Ball.
indices : list[int]
- Sets the (multi)index by columns represented by their numerical integer-locations.
Returns
--------
DataFrame
- The DataFrame containing the contents of the csv.
Examples
--------
(link juptyer notebook)
"""
df = pd.read_csv(rel_dir, engine='python', encoding=encoding , error_bad_lines=False)
df.set_index(list(df.columns[indices]), inplace=True)
return df
def subTable(self, supertable, chosen_index:list, chosen_columns:list):
""" Create a subtable from a supertable.
Parameters
----------
supertable : DataFrame
- Table from which to select chosen_columns from in order to form a subtable
chosen_index : list[str]
- The column names that will form the new (multi)index for the subtable.
chosen_columns : list[str]
- The column names that will form the new columns for the subtable.
Returns
--------
DataFrame
- DataFrame (the newly-formed subtable) that will have the (multi)index and columns specified in the arguments.
Examples
--------
(link juptyer notebook)
"""
combined = chosen_index.copy()
combined.extend(chosen_columns)
subtable = supertable[combined].set_index(primary_keys)
return subtable
def mergeTables(self, tables:list):
""" Sequentially merge a list of tables that all share a common index.
- Merge defaults to using inner joins over the index.
Parameters
----------
tables : list[DataFrame]
- Contains a list of DataFrames that will be merged sequentially.
Returns
--------
DataFrame
- Table that results from sequentially merging the DataFrames given in the argument.
Examples
--------
(link juptyer notebook)
"""
# TO IMPLEMENT LATER: other types of joins, merging by non-index
if len(tables) < 2:
raise Exception("need at least two tables in order to merge")
num_of_dropped_rows = 0
max_num_of_rows = max(len(tables[0]), len(tables[1]))
current_merge = tables[0].merge(tables[1], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
if len(tables) - 2 > 0:
for i in range(2, len(tables)):
current_merge = current_merge.merge(table[i], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
print('Number of Dropped Rows: ',num_of_dropped_rows)
current_merge.index.name = tables[0].index.name
# CHECK FOR MULTI INDEX CASE, WHETHER THE ABOVE LINE BREAKS
return current_merge
def analyzeRelationships(self, to_analyze:list, visualize=True):
""" Analyze basic stats of one or more different indexes.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze : list[list[str, Series]]
- A list of lists. The later should be of length two, in which the 0th index stores the table name and the 1st index contains a Series.
- The Series should contain the values of the column derived from the table associated with the name stored in the 0th index.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
for pair in to_analyze:
new_name = pair[1].name + ' from ' + pair[0]
descriptions.append(pair[1].describe().rename(new_name))
boxplot_data.append(pair[1])
boxplot_xtick_labels.append(new_name)
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
description_table = pd.concat(descriptions, axis=1)
return description_table
def compareRelationship(self, to_analyze1, to_analyze2, visualize=False):
""" Compare and contrast the difference between two Series.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze1 : list[str, Series]
- A list that contains the name of the first table, and the contents of a specifc column from that table as a Series.
to_analyze2 : list[str, Series]
- A list that contains the name of the second table, and the contents of a specifc column from that table as a Series.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series, as well as a third column that contains the difference between the stats.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
new_name = to_analyze1[1].name + ' from ' + to_analyze1[0]
description1 = to_analyze1[1].describe().rename(new_name)
descriptions.append(description1)
boxplot_data.append(to_analyze1[1])
boxplot_xtick_labels.append(new_name)
new_name = to_analyze2[1].name + ' from ' + to_analyze2[0]
description2 = to_analyze2[1].describe().rename(new_name)
descriptions.append(description2)
boxplot_data.append(to_analyze2[1])
boxplot_xtick_labels.append(new_name)
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
diff_description = abs(description1 - description2)
diff_description.name = "Difference"
descriptions.append(diff_description)
description_table = pd.concat(descriptions, axis=1)
return description_table
def export(self, df_to_export, write_to, export_type="CSV"):
""" Exports contents of dataframe to relative location specified by write_to parameter.
- Default export type is CSV
Parameters
----------
df_to_export : DataFrame
- DataFrame whose contents will be exported into a specifed location.
write_to : str
- Relative location (including file) that you will write into.
export_type : str
- Format that contents of df_to_export will be exported as.
Returns
--------
None
Examples
--------
(link juptyer notebook)
"""
if export_type is "CSV":
df_to_export.to_csv(write_to, encoding='utf-8', index=True, index_label=df_to_export.index.name)
else:
print('implemnt sql format')
# to implement later
# featureSearch should return a dictionary, where key is the index and value is the name of the feature
# this makes it easier for people to select the feature they want
# search function should also have an 'is_exact' option, to make search more precise.
# check if a lower case letter surrounds either side of the keyword, implies that it is an interjection
# create a function that let's you index into a python list with another list. useful for selecting many names at once
# from featureSearch result
# +
rel_dir = './tennis_demo/tennis-match-charting-project'
ball = CrystalBall.run(rel_dir)
# -
f0 = ball.featureSearch(['name', 'Player', 'player', 'id', 'ID', 'Id'])
f0_helper = [[i, f0[i]] for i in range(len(f0))]
f0_helper
f0_selected = [f0[i] for i in [0]]
t0 = ball.tableSearch(f0_selected)
t0_helper = [[i, t0[i]] for i in range(len(t0))]
t0_helper
df0 = ball.openTable(t0[0], encoding='latin-1')
df0
# +
player_ones = list(df0['Player 1'].dropna())
player_ones.extend(list(df0['Player 2'].dropna()))
all_players = list(set(player_ones))
all_players
# -
f0_helper
t1 = ball.tableSearch(f0[12])
t1_helper = [[i, t1[i]] for i in range(len(t1))]
t1_helper
df1 = ball.openTable(t1[5], encoding='latin-1')
df1
| demo/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="3ZupwIgNhjm5"
# # 2.1 テンソル(TENSOR)
# + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" executionInfo={"elapsed": 20983, "status": "ok", "timestamp": 1595326558656, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="Zd_PQDlRhIY2" outputId="3311743f-534d-43b8-bcab-9f47cce2d8fd"
# 必要なパッケージのインストール
# !pip3 install torch==1.4.0
# !pip3 install torchvision==0.5.0
# !pip3 install numpy==1.19.0
# !pip3 install matplotlib==3.2.2
# !pip3 install scikit-learn==0.23.1
# !pip3 install seaborn==0.10.1
# + colab={} colab_type="code" id="RvoCxH00h2Ml"
# 必要なパッケージをインポート
import torch
import numpy as np
# + [markdown] colab_type="text" id="XRM3hDnGkEJi"
# ## 2.1.1 Tensorの生成
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 21906, "status": "ok", "timestamp": 1595326559591, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="Wg0DDFGohyfF" outputId="8b4e8536-768f-4ec0-b2c0-8f716f15659c"
# listを渡してTensorを生成
x = torch.tensor([1, 2, 3])
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 21902, "status": "ok", "timestamp": 1595326559592, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="4gEH00lAkJZW" outputId="3ba0c75a-8209-400e-d91c-d1e60965c029"
# listを入れ子にしてTensorを生成
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 21895, "status": "ok", "timestamp": 1595326559592, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="B4y12g9ekkra" outputId="c9eb68af-e48c-44ce-cf94-0248ebe499ea"
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
x.size() # Tensorの形状を確認
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 21891, "status": "ok", "timestamp": 1595326559593, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="1ZMUsfTdkmpW" outputId="240450f3-9ed5-44c9-c988-649fe1f3e9a8"
# データ型を指定せずにTensorを生成
x1 = torch.tensor([[1, 2, 3], [4, 5, 6]])
# dtypeを指定して64ビット浮動小数点数型のTensorを生成
x2 = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float64)
# torchのメソッドから64ビット浮動小数点数型のTensorを生成
x3 = torch.DoubleTensor([[1, 2, 3], [4, 5, 6]])
# dtypeの確認
x1.dtype # データ型指定なし
x2.dtype # データ型指定
x3.dtype # データ型指定
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 21886, "status": "ok", "timestamp": 1595326559593, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="tl64ckdTkoZL" outputId="d4d8c996-43d8-4cac-926a-477aadaa1d79"
# 0から9までの1次元Tensorを生成
x = torch.arange(0, 10)
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 21881, "status": "ok", "timestamp": 1595326559594, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="B-MCUbUgko6k" outputId="9871ab5e-28be-4f78-8da1-0fef54023b19"
# 0から始まって9まで2ずつ増えていく1次元Tensorを生成
x = torch.linspace(0, 9, 5)
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 21876, "status": "ok", "timestamp": 1595326559594, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="T_ZsfJGhkrKO" outputId="dcfdc6f3-90f2-474e-c33c-33a46cb38e2b"
# 0から1の間の乱数を生成
x = torch.rand(2, 3)
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 21871, "status": "ok", "timestamp": 1595326559595, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="-zcjPQQlksUw" outputId="539d4f0b-38b8-40a0-8959-4d91b5651910"
# 2x3の零テンソルを生成
x = torch.zeros(2, 3)
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 21866, "status": "ok", "timestamp": 1595326559595, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="oFXn4ezfktRd" outputId="fd94bdc4-fdee-441b-ad90-de72652bd879"
# 形状が2x3で要素がすべて1のテンソルを生成
x = torch.ones(2, 3)
print(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 25933, "status": "ok", "timestamp": 1595326563667, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="4UrCSzfZkuM_" outputId="02cf4db1-7890-4ff0-e1d9-ba4b351ad7d9"
x = torch.tensor([1, 2, 3]).to('cuda')
x.device
# + [markdown] colab_type="text" id="nc62Vyn8tsiu"
# ## 2.1.2 Tensorの変換
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 25928, "status": "ok", "timestamp": 1595326563667, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="izmRL--dtrky" outputId="5bfe8a39-f3ea-469c-8ea5-7b55ed0fa7f0"
# ndarrayの生成
array = np.array([[1,2,3],[4,5,6]])
print(array)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 25924, "status": "ok", "timestamp": 1595326563668, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="tNofgtPpuFpn" outputId="016bb241-57ad-4546-c8e3-59eb711a66d1"
# ndarrayからTensorへ変換
tensor = torch.from_numpy(array)
print(tensor)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 25919, "status": "ok", "timestamp": 1595326563668, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="xAyX3K8QudbM" outputId="aa6b1225-26a6-49f3-9b86-d49fd843dd39"
# Tensorからndarrayへ変換
tensor2array = tensor.numpy()
print(tensor2array)
# + [markdown] colab_type="text" id="tUQ4ZWitujZL"
# ## 2.1.3 Tensorの操作
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 25914, "status": "ok", "timestamp": 1595326563668, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="th5KAMYtuhri" outputId="863820ef-82f4-4d37-c9ca-8260636e8a73"
# インデックスの指定
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(x[1, 2])
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 25910, "status": "ok", "timestamp": 1595326563669, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="4VezLYOau0D3" outputId="b6d56c98-5ce7-4df8-c4d8-185f02450218"
# スライスで要素を取得
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(x[1, :])
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 25905, "status": "ok", "timestamp": 1595326563669, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="Ybb82kRpu2Dh" outputId="22d7b40a-d206-4b6b-fc95-af2fd490676c"
# 2x3から3x2のTensorに変換
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
x_reshape = x.view(3, 2)
print(x) # 変換前の2x3のTensor
print(x_reshape) # 変換後の3x2のTensor
# + [markdown] colab_type="text" id="6f3ey8j6u-sx"
# ## 2.1.4 Tensorの演算
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" executionInfo={"elapsed": 25899, "status": "ok", "timestamp": 1595326563670, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="lkKWF7Gpu3wm" outputId="0e90f595-4eec-44f2-fc1d-8af379f521e4"
# Tensorとスカラーの四則演算
x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float64)
print(x + 2) # 足し算
print(x - 2) # 引き算
print(x * 2) # 掛け算
print(x / 2) # 足し算
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" executionInfo={"elapsed": 25893, "status": "ok", "timestamp": 1595326563670, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="lo8b2z-qu-JP" outputId="e80f2709-9638-4937-f3cc-0c9f5ce5edec"
# Tensor同士の四則演算
x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float64)
y = torch.tensor([[4, 5, 6], [7, 8, 9]], dtype=torch.float64)
print(x + y) # 足し算
print(x - y) # 引き算
print(x * y) # 掛け算
print(x / y) # 割り算
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 25889, "status": "ok", "timestamp": 1595326563671, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="Ru2B2sw3vMGE" outputId="cda4312d-9c62-48f2-b7ca-a016157cf9b0"
x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float64)
print(torch.min(x)) # 最小値
print(torch.max(x)) # 最大値
print(torch.mean(x)) # 平均値
print(torch.sum(x)) # 合計値
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 25885, "status": "ok", "timestamp": 1595326563672, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "06964401837614789891"}, "user_tz": -540} id="b0YGGZsAvOCf" outputId="0fb138fd-2ee1-43ed-e48d-f04c978e5352"
x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float64)
print(torch.sum(x).item()) # 合計値
| Ubuntu/Chapter2/.ipynb_checkpoints/Section2-1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocess SMART-seq
import anndata
import scanpy as sc
# ## Load data
#
# - Gene raw counts matrix from SMART-seq
adata = anndata.read_h5ad('../input/SMART.Neuron.h5ad')
adata
# ## Preprocessing
# +
sc.pp.filter_cells(adata, min_genes=200)
sc.pp.filter_genes(adata, min_cells=3)
adata
# -
# basic steps of the scanpy clustering process
adata.layers['raw'] = adata.X.copy()
sc.pp.normalize_per_cell(adata)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, n_bins=100, n_top_genes=10000)
sc.pl.highly_variable_genes(adata)
# ## Save AnnData
# this is the total adata without downsample
adata.write_h5ad(f'SMART.TotalAdata.norm_log1p.h5ad')
adata
| docs/allcools/cell_level/integration/multi_modalities/Neurons/02-preprocess_SMART.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="pq35Hi-auwNJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4428e0b1-05ed-4933-d145-8bb16362aee8"
import os
path = 'flower_images/'
names = sorted(os.listdir(path))
len(names)
# + id="dDDtKDk5NkHU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="24c778a3-6d99-4e87-e5b7-74c2b64f7fd1"
import pandas as pd
df = pd.read_csv(os.path.join(path,names[-1]))
df.head()
# + id="GHTe2UygNm0T" colab_type="code" colab={}
dic = df.to_dict('index')
raw_data = []
for val in dic.values():
raw_data.append(val)
# + id="_0FimKJ_NuoK" colab_type="code" colab={}
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
# + id="3rUtg3IRNzZd" colab_type="code" colab={}
data = []
for e in raw_data:
temp = {}
img_path = os.path.join(path,e['file'])
img = image.load_img(img_path,target_size=(224,224))
x = image.img_to_array(img)
temp['array'] = x
temp['label'] = e['label']
data.append(temp)
# + id="s_EcAB_dN2yL" colab_type="code" colab={}
X = []
y = []
for i in range(len(data)):
X.append(data[i]['array'])
for j in range(len(data)):
y.append(data[i]['label'])
X = np.array(X)
X = X.reshape(X.shape[0],224,224,3)
y = np.array(y)
y = y.reshape(y.shape[0],1)
# + id="QMKNr4GrN5_r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="40b3496c-5213-4a24-f294-e1f076946ad3"
final_data = np.array(list(zip(X,y)))
train_size = int(0.85*final_data.shape[0])
np.random.shuffle(final_data)
X_train = []
for i in range(train_size):
X_train.append(final_data[:train_size,0][i])
X_train = np.array(X_train)
y_train = final_data[:train_size,1]
y_train = y_train.reshape(y_train.shape[0],1)
X_test = []
for j in range(final_data.shape[0] - train_size):
X_test.append(final_data[train_size:,0][j])
X_test = np.array(X_test)
y_test = final_data[train_size:,1]
y_test = y_test.reshape(y_test.shape[0],1)
X_test.shape
# + id="5UOWgAHGN_Gr" colab_type="code" colab={}
base_model = ResNet50(weights='imagenet',include_top=False)
# + id="1DtycSXuPzgr" colab_type="code" colab={}
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.models import Model
# + id="tIwziz2MOK7I" colab_type="code" colab={}
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
pred = Dense(10,activation='softmax')(x)
# + id="uGrAduG2PwjJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3203} outputId="b51089c8-e7b8-4e7d-d46f-c826845188d3"
model = Model(inputs=base_model.input,outputs=pred)
for i, layer in enumerate(model.layers):
print(i, layer.name)
# + id="uDfz8oFBQOr8" colab_type="code" colab={}
for layer in model.layers[:175]:
layer.trainable = False
for layer in model.layers[175:]:
layer.trainable = True
# + id="Toi14winQcKK" colab_type="code" colab={}
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',metrics=['accuracy'])
# + id="LKlZ4RxgQh7q" colab_type="code" colab={}
from tensorflow.python.keras.utils import to_categorical
y_train = to_categorical(y_train,num_classes=10)
y_test = to_categorical(y_test,num_classes=10)
# + id="4Hyib_LtRUxb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="9f7b0c19-b521-4953-a54c-d45ee6debdcf"
model.evaluate(X_test,y_test)
# + id="YUNUzcztQ2zb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3617} outputId="a347fb9d-6238-4c66-e092-8e13cb9e9285"
hist = model.fit(X_train,y_train,epochs=10,batch_size=4)
# + id="Cl-VcrkGQ_B1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="0012b490-f182-4b12-90b1-9f9f387dcb02"
model.evaluate(X_test,y_test)
# + id="Oi6rCWJhSb0M" colab_type="code" colab={}
model.save('flower_model.h5')
# + id="VOue-vCdSqNe" colab_type="code" colab={}
from tensorflow.python.keras.models import load_model
m = load_model('flower_model.h5')
# + id="uYKZgF0LS89x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="7bd454d4-62c1-4a1b-a83e-26a29a4dd945"
m.evaluate(X_test,y_test)
# + id="leYMFdiDTC4d" colab_type="code" colab={}
base_model.save('ResNet-base.h5')
# + id="RWT_wo2HT1ex" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="445a3a2a-0ac8-4736-d00c-eaebae866fe3"
t = load_model('ResNet-base.h5')
# + id="GWyHMKnCUBZC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 6695} outputId="11a5d7e1-fdc6-4256-a185-62bca948a330"
t.summary()
# + id="7larDwuBUGwE" colab_type="code" colab={}
| TensorFlow/Flowers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import temperature data from the DWD and process it
#
# This notebook pulls historical temperature data from the DWD server and formats it for future use in other projects. The data is reported hourly for each of the available weather stations and packaged in a zip file. To use the data, we need to store everything in a single .csv file, all stations side-by-side. Also, we need the daily average.
#
# To reduce computing time, we also exclude data earlier than 2007.
#
# Files should be executed in the following pipeline:
# * 1-dwd_konverter_download
# * 2-dwd_konverter_extract
# * 3-dwd_konverter_build_df
# * 4-dwd_konverter_final_processing
# ## 2.) Extract all .zip-archives
# In this next step, we extract a single file from all the downloaded .zip files and save them to the 'import' folder. Beware, there is going to be a lot of data (~6 GB of .csv files)
# +
from pathlib import Path
import glob
import re
from zipfile import ZipFile
# Folder definitions
download_folder = Path.cwd() / 'download'
import_folder = Path.cwd() / 'import'
# Find all .zip files and generate a list
unzip_files = glob.glob('download/stundenwerte_TU_*_hist.zip')
# Set the name pattern of the file we need
regex_name = re.compile('produkt.*')
# Open all files, look for files that match ne regex pattern, extract to 'import'
for file in unzip_files:
with ZipFile(file, 'r') as zipObj:
list_of_filenames = zipObj.namelist()
extract_filename = list(filter(regex_name.match, list_of_filenames))[0]
zipObj.extract(extract_filename, import_folder)
display('Done')
# -
| case_study_weather/2-dwd_konverter_extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
import torch
import numpy as np
from tqdm.auto import tqdm
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
from torch import nn
import torch.nn.functional as F
from causal_util import load_env
from causal_util.collect_data import EnvDataCollector
from matplotlib import pyplot as plt
from keychest.keychestenv import keychest_obs3d_to_obs2d, keychest_obs2d_to_image
from sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper import load_config_files
import pickle as pickle
from causal_util.helpers import one_hot_encode
import gin
from keychest.features_xy import obs_features_handcoded, arr_to_dict, dict_to_arr
import io
# -
import ray
ray.init()
ckpt = '/home/sergei/ray_results/ve5_rec_nonlin_gnn_gumbel_siamese_l2_ve5_dec_rec/main_fcn_2671b_00000_0_2021-01-26_08-36-06/checkpoint_3000/checkpoint'
# https://github.com/pytorch/pytorch/issues/16797
class CPU_Unpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super().find_class(module, name)
with open(ckpt, 'rb') as f:
learner = CPU_Unpickler(f).load()#pickle.load(f)
plt.hist(np.log10(learner.model.Mf.flatten()))
plt.hist(np.log10(learner.model.Ma.flatten()))
sw = learner.model.model_fout_0000.switch
sw.shape
sampled = [sw(torch.ones((1, 22)))[0].detach().numpy() for _ in range(1000)]
np.array(sampled).mean(0)
torch.nn.functional.gumbel_softmax(sw.logits, tau=1, hard=True, dim=0)[1]
plt.scatter(sw.softmaxed().detach().numpy(), np.array(sampled).mean(0))
| debug/learned_model_analysis_gumbel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DAT210x - Programming with Python for DS
# ## Module5- Lab1
# Start by importing whatever you need to import in order to make this lab work:
# .. your code here ..
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from scipy import stats
# ### How to Get The Dataset
# 1. Open up the City of Chicago's [Open Data | Crimes](https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2) page.
# 1. In the `Primary Type` column, click on the `Menu` button next to the info button, and select `Filter This Column`. It might take a second for the filter option to show up, since it has to load the entire list first.
# 1. Scroll down to `GAMBLING`
# 1. Click the light blue `Export` button next to the `Filter` button, and select `Download As CSV`
# Now that you have th dataset stored as a CSV, load it up being careful to double check headers, as per usual:
# .. your code here ..
#Uploaded the 2018 crime scene dataset file since the 2001 to present dataset file is too large
df = pd.read_csv('Datasets/Crimes_-_2018.csv')
# Get rid of any _rows_ that have nans in them:
# .. your code here ..
df.dropna(axis = 0, how = 'any', inplace = True)
df.head()
# Display the `dtypes` of your dset:
# .. your code here ..
df.dtypes
# Coerce the `Date` feature (which is currently a string object) into real date, and confirm by displaying the `dtypes` again. This might be a slow executing process...
# .. your code here ..
df.Date = pd.to_datetime(df['Date'])
df.dtypes
def doKMeans(df):
# Let's plot your data with a '.' marker, a 0.3 alpha at the Longitude,
# and Latitude locations in your dataset. Longitude = x, Latitude = y
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(df.Longitude, df.Latitude, marker='.', alpha=0.3)
# TODO: Filter `df` using indexing so it only contains Longitude and Latitude,
# since the remaining columns aren't really applicable for this lab:
#
# .. your code here ..
df1 = pd.concat([df.Longitude, df.Latitude], axis = 1)
# TODO: Use K-Means to try and find seven cluster centers in this df.
# Be sure to name your kmeans model `model` so that the printing works.
#
# .. your code here ..
model = KMeans(n_clusters = 7)
model.fit_predict(df1)
# Now we can print and plot the centroids:
centroids = model.cluster_centers_
print(centroids)
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='red', alpha=0.5, linewidths=3, s=169)
return centroids
# Print & Plot your data
kmean1 = doKMeans(df)
#Displaying some statistical facts of the centroid location kmean1
stats.describe(kmean1)
# Filter out the data so that it only contains samples that have a `Date > '2011-01-01'`, using indexing. Then, in a new figure, plot the crime incidents, as well as a new K-Means run's centroids.
# .. your code here ..
#Since I have have the data in 2018. Filter out the data after 2018-07-01
df2 = df[df['Date'] > '2018-07-01']
# Print & Plot your data
kmean2 = doKMeans(df2)
#Displaying some statistical facts of the centroid location kmean2
stats.describe(kmean2)
#Question
#Did your centroid locations change after you limited the date range to +2011?
#Answer: The centroid location did not change much by comparing the means of kmean1 and kmean2
| Module5/Module5 - Lab1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="844X7EVp92MY"
# # How common is introduced unfairness
#
# In the paper
#
# *Why fair lables can yield unfair predictions: graphical conditions on introduced unfairness*
# <NAME>, <NAME>, <NAME>, <NAME>
# AAAI, 2022
#
#
# we explore conditions under which fair labels can yield optimal unfair models. This notebook illustrates how PyCID can be used to investigate this question, by defining methods for graphical criteria and randomly sampling supervised learning CIDs.
#
# To open and run this notebook in your browser with Google Colab, use this link:
# [](https://colab.research.google.com/github/causalincentives/pycid/blob/master/notebooks/Why_fair_labels_can_yield_unfair_predictions_AAAI_22.ipynb)
# + id="kMhVKhv892MY" cellView="form"
# @title Imports
# importing necessary libraries
try:
import pycid
except ModuleNotFoundError:
import sys
# !{sys.executable} -m pip install git+https://github.com/causalincentives/pycid
import pycid
import numpy as np
import networkx as nx
import random
# + [markdown] id="eggYXjhyTINj"
# ## Definitions
#
# Define ITV and methods for checking the graphical conditions
# + id="U_pvnepmUqjT"
def total_variation(cid: pycid.CID, a: str, x: str) -> float:
"""the total variation on x from varying a"""
return cid.expected_value([x], {a: 1})[0] - cid.expected_value([x], {a: 0})[0]
def introduced_total_variation(cid: pycid.CID, a: str, d: str, y: str) -> float:
"""The total introduced variation, comparing the effect of a on d and y"""
return np.abs(total_variation(cid, a, d)) - np.abs(total_variation(cid, a, y))
def theorem9_gc(cid: pycid.CID) -> bool:
"""ITV is possible under arbitrary loss"""
for x in cid.get_parents(list(cid.decisions)[0]):
condition = [list(cid.decisions)[0]] + cid.get_parents(list(cid.decisions)[0])
condition.remove(x)
if cid.is_dconnected("A", x, []) and cid.is_dconnected(x, "U", condition):
return True
return False
def theorem11_gc(cid: pycid.CID) -> bool:
"""ITV is possible under P-admissible loss"""
return (
theorem9_gc(cid)
and not "A" in cid.get_parents(list(cid.decisions)[0])
and cid.is_dconnected("A", "U", [list(cid.decisions)[0]] + cid.get_parents(list(cid.decisions)[0]))
)
# + id="0RCzeQhGlaPz"
def assess_unfairness(cid):
cid_copy = cid.copy() # make a copy to avoid making unwanted changes
print("ITV under 0-1 loss possible: ", theorem9_gc(cid_copy))
print("ITV under P-adm loss possible: ", theorem11_gc(cid_copy))
# impute an optimal policy under the specified 0-1 loss, and compute actual ITV
cid_copy.impute_optimal_policy()
print("actual ITV under 0-1 loss", introduced_total_variation(cid_copy, "A", list(cid.decisions)[0], "Y"))
# the optimal policy under P-admissible loss is the conditional expectation.
# We impute that policy instead, and recompute ITV
cid_copy.impute_conditional_expectation_decision(list(cid.decisions)[0], "Y")
print("actual ITV under P-adm loss", introduced_total_variation(cid_copy, "A", list(cid.decisions)[0], "Y"))
# + [markdown] id="XcJh3q3xlax9"
# ## Examples
# + [markdown] id="SzDAYpHGle1Q"
# ### Hiring example
# + colab={"base_uri": "https://localhost:8080/", "height": 335} id="0g2o4MFqli5q" outputId="2fbe277c-148e-4aa7-d626-6000ebf0ecb7"
hiring = pycid.CID([("A", "D"), ("D", "Y"), ("D", "Yh"), ("Y", "U"), ("Yh", "U")], decisions=["Yh"], utilities=["U"])
hiring.add_cpds(
A=pycid.bernoulli(0.5),
D=lambda A: pycid.noisy_copy(A, 0.8),
Y=lambda D: pycid.bernoulli(0.49 + 0.02 * D),
U=lambda Y, Yh: int(Y == Yh),
Yh=[0, 1],
)
print(hiring.check_model())
hiring.draw()
assess_unfairness(hiring)
# + [markdown] id="KcTThayj2Tb1"
# ## Music Example
#
# First consider the version where A is not a feature. There is ITV under either loss function.
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="ToPA4S812Wnu" outputId="809cde45-41c5-4fec-d27c-67f02cfbe43b"
music_cid = pycid.CID(
[("A", "T"), ("M", "T"), ("M", "Y"), ("T", "Yh"), ("Yh", "U"), ("Y", "U")], decisions=["Yh"], utilities=["U"]
)
music_cid.add_cpds(
A=pycid.bernoulli(0.5),
M=pycid.bernoulli(0.5),
T=lambda A, M: {0: None, 1: (0.05 if M == 0 else 0.9 if M == 1 and A == 1 else 1)}, # this becomes 1-P(T=1)
Y=lambda M: pycid.noisy_copy(M, 0.95),
U=lambda Y, Yh: -((Yh - Y) ** 2),
Yh=[0, 0.25, 0.5, 0.75, 1],
)
music_cid.draw()
assess_unfairness(music_cid)
# + [markdown] id="bvhCYLhzW-oa"
# Next, lets make A a feature by adding an arrow A -> Yh
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="muy3bNXdQ8z7" outputId="08a2632b-bd34-44de-fac7-a93acf592d1e"
music_a_feature = pycid.CID(
[
("A", "T"),
("M", "T"),
("M", "Y"),
("T", "Yh"),
("Yh", "U"),
("Y", "U"),
("A", "Yh"), # now A is a feature
],
decisions=["Yh"],
utilities=["U"],
)
music_a_feature.add_cpds(
A=pycid.bernoulli(0.5),
M=pycid.bernoulli(0.5),
T=lambda A, M: {0: None, 1: (0.05 if M == 0 else 0.9 if M == 1 and A == 1 else 1)}, # this becomes 1-P(T=1)
Y=lambda M: pycid.noisy_copy(M, 0.95),
U=lambda Y, Yh: int(Y == Yh),
Yh=[0, 1],
)
music_a_feature.draw()
assess_unfairness(music_a_feature)
# + [markdown] id="ZVsWjj3HYBTU"
# A predictor optimizing P-admissible loss now has ITV=0.
#
# However, as we discuss in the paper, one caveat to this result is that women who passed the test are actually slightly disfavoured compared to men who pass the test. Meanwhile, women who fail the test benefit:
# + colab={"base_uri": "https://localhost:8080/"} id="9l0xrtCCWD_J" outputId="de8588f5-05f3-477c-fec5-a5a99e01f1ed"
music_a_feature.impute_conditional_expectation_decision("Yh", "Y")
print(
"chance of getting accepted given passed test: ",
"{:.3f}".format(music_a_feature.expected_value(["Yh"], {"T": 1})[0]),
)
print(
"chance of getting accepted for men who passed the test",
"{:.3f}".format(music_a_feature.expected_value(["Yh"], {"T": 1, "A": 0})[0]),
)
print(
"chance of getting accepted for women who passed the test",
"{:.3f}".format(music_a_feature.expected_value(["Yh"], {"T": 1, "A": 1})[0]),
)
print()
print(
"chance of getting accepted for men who failed the test",
"{:.2f}".format(music_a_feature.expected_value(["Yh"], {"T": 0, "A": 0})[0]),
)
print(
"chance of getting accepted for women who failed the test",
"{:.2f}".format(music_a_feature.expected_value(["Yh"], {"T": 0, "A": 1})[0]),
)
# + [markdown] id="BKB5CYjY3AAc"
# ## Generate random supervised learning CIDs
#
# Here we create a method to generate a random supervised learning CID, with 'special' nodes D, Y, U, and A. The only parents of U are D and Y, and D only has a single child U.
# + id="oR8SE_qY-NfV"
def random_supervised_learning_cid(number_of_nodes: int = 6, edge_density: float = 0.4) -> pycid.CID:
"""
Generates a CID with 'special' nodes D, Y, U, and A to model a supervised
learning setup.
The only parents of U are D and Y, and D only has a single child U.
The graph is always connected. The edge_density parameter specifies how many
edges are added additionally.
Parameters:
-----------
number_of_nodes: how many nodes in the graph
edge_density: how densely connected is the graph, between 0 and 1
"""
dag = pycid.random_dag(number_of_nodes=number_of_nodes - 1, edge_density=edge_density)
remaning_nodes = list(nx.topological_sort(dag))
# choose a decision node
decision_node = remaning_nodes.pop()
# choose a label node
y_node = random.choice(remaning_nodes)
remaning_nodes.remove(y_node)
# choose a sensitive attribute
a_node = random.choice(remaning_nodes)
remaning_nodes.remove(a_node)
# add appropriate names, and add a U node
dag = nx.relabel_nodes(dag, {decision_node: "Yh", y_node: "Y", a_node: "A"})
dag.add_edge("Yh", "U")
dag.add_edge("Y", "U")
# convert the DAG into a CID with labeled decisions and utilities
cid = pycid.CID(dag.edges, decisions=["Yh"], utilities=["U"])
# add random cpds and a 0-1 loss function
cid.add_cpds(
Yh=[0, 1],
U=lambda Yh, Y: int(Yh == Y), # 0-1 loss function
A=pycid.random_cpd.RandomCPD(),
Y=pycid.random_cpd.RandomCPD(),
)
for n in remaning_nodes:
cid.add_cpds(**{n: pycid.random_cpd.RandomCPD()})
return cid
# + [markdown] id="LP9H4LHz3det"
# ## Investigating a single CID
#
# Before sampling repeatedly to get statistical estimates, let us investigate a single, randomly sampled CID. Rerun the code to see different samples.
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="tJalAREzPhl7" outputId="bbad7bd0-b6d0-44cf-9d63-1495e42df225"
cid = random_supervised_learning_cid()
cid.draw()
assess_unfairness(cid)
# + [markdown] id="FLBbCfrL3lRx"
# ## Sample CIDs and measure ITV
#
# Now let's sample 20 different CIDs, and count how many have an introduced total effect under 0-1 loss, and under P-admissible loss. Increase the iterations parameter to get more reliable estimates.
# + colab={"base_uri": "https://localhost:8080/"} id="L0JUgURNa5ft" outputId="934f79a0-9bf1-41a8-f965-145e8774d940"
samples = 20
theorem9_total = 0
theorem9_results = []
while theorem9_total < samples:
cid = random_supervised_learning_cid()
if theorem9_gc(cid):
theorem9_total += 1
cid.impute_optimal_policy()
theorem9_results.append(introduced_total_variation(cid, "A", "Yh", "Y"))
theorem9_rate = sum(i > 0.01 for i in theorem9_results) / theorem9_total
print("{:.2f}".format(theorem9_rate) + f" positive under 0-1 loss, count {theorem9_total}")
else:
continue # if the CID doesn't satisfy the graphical condition, we resample
# + colab={"base_uri": "https://localhost:8080/"} id="Z7G87Iv3a5te" outputId="09883701-ad2f-42f5-8140-667cbae3001a"
samples = 20
theorem11_total = 0
theorem11_results = []
while theorem11_total < samples:
cid = random_supervised_learning_cid()
if theorem11_gc(cid):
theorem11_total += 1
# impute the policy that'd be optimal under P-admissible loss
cid.impute_conditional_expectation_decision("Yh", "Y")
theorem11_results.append(introduced_total_variation(cid, "A", "Yh", "Y"))
theorem11_rate = sum(i > 0.01 for i in theorem11_results) / theorem11_total
print("{:.2f}".format(theorem11_rate) + f" positive under P-adm loss, count {theorem11_total}")
else:
continue # if the CID doesn't satisfy the graphical condition, we resample
# + id="j9m0h1zc6axT"
| notebooks/Why_fair_labels_can_yield_unfair_predictions_AAAI_22.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.2 64-bit (''venv'': venv)'
# name: python3
# ---
# +
import numpy as np # для работы с массивами
# import pandas as pd # для работы DataFrame
# import seaborn as sns # библиотека для визуализации статистических данных
# import matplotlib.pyplot as plt # для построения графиков
# # %matplotlib inline
# -
x1 = np.array([1,2,1,1]).T
x2 = np.array([70,130,65,60]).T
x1_c = x1-x1.mean()
x2_c = x2-x2.mean()
print(x1_c)
print(x2_c)
x1_c_norm = np.linalg.norm(x1_c)
x2_c_norm = np.linalg.norm(x2_c)
print(x1_c_norm)
print(x2_c_norm)
x1_st = x1_c / x1_c_norm
x2_st = x2_c / x2_c_norm
print(x1_st)
print(x2_st)
A = np.array([
[1, 0.9922],
[0.9922, 1]
])
B = np.linalg.eig(A)
B
B[1][:,0]
# pca1 = np.column_stack((x1,x2))@B[1][:,0]
pca1 = np.column_stack((x1_st,x2_st))@np.array([1,1])
pca1_c = pca1 - pca1.mean()
pca1_c_norm = np.linalg.norm(pca1_c)
pca1_st = pca1_c / pca1_c_norm
pca1_st.round(4)
A1 = np.array([
[1, -0.77],
[-0.77, 1]
])
B1 = np.linalg.eig(A1)
B1
| unit_3/math_2.16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# # Series
data = [1, 2, 3, 4]
s = pd.Series(data)
s
index = ['Linha' + str(i)]
| Alura/intro_pandas/extras/.ipynb_checkpoints/Criando Estruturas de Dados-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from all_imports import *
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
from cifar10 import *
from mobile_net import *
bs=64
sz=32
torch.cuda.is_available()
torch.backends.cudnn.enabled
data = get_data(sz, bs)
# tuple list of form
# expansion, out_planes, num_blocks, stride
tpl = [(1, 64, 2, 1),
(3, 128, 2, 2),
(3, 256, 2, 1),
(6, 128, 2, 2),
(6, 256, 2, 1)]
md_mbl = mblnetv2(exp_dw_block, 1, 64,
tpl,
num_classes=10)
learn = ConvLearner.from_model_data(md_mbl, data)
total_model_params(learn.summary())
wd=1e-4
learn.opt_fn = partial(optim.Adam, betas=(0.95,0.99))
learn.save('init')
learn.load('init')
learn.lr_find(wds=wd, use_wd_sched=True)
learn.sched.plot()
lr=1e-4
visl = VisdomLinePlotter(6009)
visc = visdom_callback(visl)
learn.fit(5e-1, 1, wds=1e-4, cycle_len=30, use_clr_beta=(20,20,0.95,0.85), callbacks=[visc],
best_save_name='best_compact_mbnetv2_clrb_xp_1')
learn.fit(1e-1, 1, wds=1e-4, cycle_len=30, use_clr_beta=(20,20,0.95,0.85), callbacks=[visc],
best_save_name='best_compact_mbnetv2_clrb_xp_2')
learn.fit(1e-2, 1, wds=1e-4, cycle_len=30, use_clr_beta=(20,20,0.95,0.85), callbacks=[visc],
best_save_name='best_compact_mbnetv2_clrb_xp_3')
| code/MobileNetv2-cifar-compact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## What is a Variable?
#
# A variable is any characteristics, number, or quantity that can be measured or counted. For example:
#
# - Age (21, 35, 62, ...)
# - Gender (male, female)
# - Income (GBP 20000, GBP 35000, GBP 45000, ...)
# - House price (GBP 350000, GBP 570000, ...)
# - Country of birth (China, Russia, Costa Rica, ...)
# - Eye colour (brown, green, blue, ...)
# - Vehicle make (Ford, VolksWagen, ...)
#
# ...are examples of variables. They are called 'variable' because the value they take may vary (and it usually does) in a population.
#
# Most variables in a data set can be classified into one of two major types:
#
# **Numerical variables** and **categorical variables**
#
# In this notebook, I will discuss Categorical variables
#
# ===================================================================================
#
#
# ## Categorical variables
#
#
# The values of a categorical variable are selected from a group of **categories**, also called **labels**. Examples are gender (male or female) and marital status (never married, married, divorced or widowed).
# Other examples of categorical variables include:
#
# - Intended use of loan (debt-consolidation, car purchase, wedding expenses, ...)
# - Mobile network provider (Vodafone, Orange, ...)
# - Postcode
#
# Categorical variables can be further categorised into ordinal and nominal variables.
#
# ### Ordinal categorical variables
#
# Categorical variable in which categories can be meaningfully ordered are called ordinal. For example:
#
# - Student's grade in an exam (A, B, C or Fail).
# - Days of the week can be ordinal with Monday = 1 and Sunday = 7.
# - Educational level, with the categories Elementary school, High school, College graduate and PhD ranked from 1 to 4.
#
# ### Nominal categorical variable
#
# There isn't an intrinsic order of the labels. For example, country of birth (Argentina, England, Germany) is nominal. Other examples of nominal variables include:
#
# - Postcode
# - Vehicle make (Citroen, Peugeot, ...)
#
# There is nothing that indicates an intrinsic order of the labels, and in principle, they are all equal.
#
#
# **To be considered:**
#
# Sometimes categorical variables are coded as numbers when the data are recorded (e.g. gender may be coded as 0 for males and 1 for females). The variable is still categorical, despite the use of numbers.
#
# In a similar way, individuals in a survey may be coded with a number that uniquely identifies them (for example to avoid storing personal information for confidentiality). This number is really a label, and the variable then categorical. The number has no meaning other than making it possible to uniquely identify the observation (in this case the interviewed subject).
#
# Ideally, when we work with a dataset in a business scenario, the data will come with a dictionary that indicates if the numbers in the variables are to be considered as categories or if they are numerical. And if the numbers are categoriies, the dictionary would explain what they intend to represent.
#
# =============================================================================
#
# ## Real Life example: Peer to peer lending (Finance)
#
# ### Lending Club
#
# **Lending Club** is a peer-to-peer Lending company based in the US. They match people looking to invest money with people looking to borrow money. When investors invest their money through Lending Club, this money is passed onto borrowers, and when borrowers pay their loans back, the capital plus the interest passes on back to the investors. It is a win for everybody as they can get typically lower loan rates and higher investor returns.
#
# If you want to learn more about Lending Club follow this [link](https://www.lendingclub.com/).
#
# The Lending Club dataset contains complete loan data for all loans issued through 2007-2015, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. Features include credit scores, number of finance inquiries, address including zip codes and state, and collections among others. Collections indicates whether the customer has missed one or more payments and the team is trying to recover their money.
#
# The file is a matrix of about 890 thousand observations and 75 variables. More detail on this dataset can be found in [Kaggle's website](https://www.kaggle.com/wendykan/lending-club-loan-data)
#
# Let's go ahead and have a look at the variables!
# ====================================================================================================
#
# To download the Lending Club loan book from Kaggle go to this [website](https://www.kaggle.com/wendykan/lending-club-loan-data)
#
# Scroll down to the bottom of the page, and click on the link 'loan.csv', and then click the 'download' blue button towards the right of the screen, to download the dataset.
# Unzip it, and save it to a directory of your choice.
#
# **Note that you need to be logged in to Kaggle in order to download the datasets**.
#
# If you save it in the same directory from which you are running this notebook, then you can load it the same way I will load it below.
#
# ====================================================================================================
# +
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# let's load the dataset with just a few columns and a few rows,
# to speed things up
use_cols = ['id', 'purpose', 'loan_status', 'home_ownership']
data = pd.read_csv(
'loan.csv', usecols=use_cols).sample(
10000, random_state=44) # set a seed for reproducibility
data.head()
# +
# let's inspect the variable home ownership,
# which indicates whether the borrowers own their home
# or if they are renting for example, among other things.
data.home_ownership.unique()
# +
# let's make a bar plot, with the number of loans
# for each category of home ownership
fig = data['home_ownership'].value_counts().plot.bar()
fig.set_title('Home Ownership')
fig.set_ylabel('Number of customers')
# -
# The majority of the borrowers either own their house on a mortgage or rent their property. A few borrowers own their home completely. The category 'Other' seems to be empty. To be completely sure, we could print the numbers as below.
data['home_ownership'].value_counts()
# There are 2 borrowers that have other arrangements for their property. For example, they could live with their parents, or live in a hotel.
# +
# the "purpose" variable is another categorical variable
# that indicates how the borrowers intend to use the
# money they are borrowing, for example to improve their
# house, or to cancel previous debt.
data.purpose.unique()
# +
# let's make a bar plot with the number of borrowers
# within each category
fig = data['purpose'].value_counts().plot.bar()
fig.set_title('Loan Purpose')
fig.set_ylabel('Number of customers')
# -
# The majority of the borrowers intend to use the loan for 'debt consolidation' or to repay their 'credit cards'. This is quite a common among borrowers. What the borrowers intend to do is, to consolidate all the debt that they have on different financial items, in one single debt, the new loan that they will take from Lending Club in this case. This loan will usually provide an advantage to the borrower, either in the form of lower interest rates than a credit card, for example, or longer repayment period.
# +
# let's look at one additional categorical variable,
# "loan status", which represents the current status
# of the loan. This is whether the loan is still active
# and being repaid, or if it was defaulted,
# or if it was fully paid among other things.
data.loan_status.unique()
# +
# let's make a bar plot with the number of borrowers
# within each category
fig = data['loan_status'].value_counts().plot.bar()
fig.set_title('Status of the Loan')
fig.set_ylabel('Number of customers')
# -
# We can see that the majority of the loans are active (current) and a big number have been 'Fully paid'. The remaining labels have the following meaning:
# - Late (16-30 days): customer missed a payment
# - Late (31-120 days): customer is behind in payments for more than a month
# - Charged off: the company declared that they will not be able to recover the money for that loan ( money is typically lost)
# - Issued: loan was granted but money not yet sent to borrower
# - In Grace Period: window of time agreed with customer to wait for payment, usually, when customer is behind in their payments
# +
# finally, let's look at a variable that is numerical,
# but its numbers have no real meaning, and therefore
# should be better considered as a categorical one.
data['id'].head()
# -
# In this case, each id represents one customer. This number is assigned in order to identify the customer if needed, while
# maintaining confidentiality.
# +
# The variable has as many different id values as customers,
# in this case 10000, because we loaded randomly
# 10000 rows/customers from the original dataset.
len(data['id'].unique())
# -
# **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| Feature Engineering/Feature-Engineering-master/02.2_Categorical_variables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 4
# Add the specified code for each code cell, running the cells _in order_. <small>(This exercise was adapted from [Erle Robotics S.L.](https://www.gitbook.com/@erlerobotics))</small>
# Create a _list_ **`students`** that contains dictionaries, each representing a student. A student has a `name`, as well as _lists_ of scores for each of `homework`, `quizzes`, and `tests`. Your dictionary should contain the following dictionaries for the following students:
# - Lloyd, who earned the following scores:
# - Homework: 90, 97, 75, 92
# - Quizzes: 88, 40, 94
# - Exams: 75, 90
# - Alice, who earned the following scores:
# - Homework: 100, 92, 98, 100
# - Quizzes: 82, 83, 91
# - Exams: 89, 97
# - Tyler, who earned the following scores:
# - Homework: 0, 87, 75, 22
# - Quizzes: 0, 75, 78
# - Exams: 100, 100
#
# Print out the list after you create it.
students = [{'name': 'Lloyd', 'homework': [90,97,75,92], 'quizzes': [88,40,94], 'exams': [75, 90]}, {'name': 'Alice', 'homework':[100,92,98,100], 'quizzes':[82,83,91], 'exams':[89,97]}]
students.append({'name': 'Tyler', 'homework': [0,87,75,22], 'quizzes':[0,75,78], 'exams':[100,100]})
students
# Define a function **`weighted_average()`** that takes in a dictionary representing a student (as above), as well as 3 values between 0 and 1 representing the amount of weight that each of homework (default 40%), quizzes (default 40%), and tests (default 20%) receive in determing the final grade. The function should then calculate and return the overall score (between 0 and 100) based on these weights.
# - Hint: use the `sum()` function to easily sum up the scores for each category.
def weighted_average(grade_dict):
for grade in grade_dict
# Create a variable **`anonymous_scores`** that is a list of the average scores of all the students in the class. Homework gets 10% weight, quizzes get 30% weight, and exams get 60% weight (it's not a well-designed course). Output the variable once done.
# Define a function **`get_letter_grade()`** that takes in a score and returns a "letter grade" associated with that score based on the following criteria:
# - 90-100 is an A
# - 80-90 is a B
# - 70-80 is a C
# - 60-70 is a D
# - 0-60 is an F
# Assign a new key `grade` to each student in the `students` list that is the final letter grade for their work (using the same weights as you used for the anonymous scores).
# Print the class's average letter grade.
| exercise-4/exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1.2. For loop, while loop and If conditions
# -------
# +
L =['apple','banana','kite','cellphone']
'''Iterate over the items in a list'''
# comment
for item in L:
print(item)
# -
range(10)
L = []
for k in range(10):
print(k)
'''Append values to list L'''
L.append(10*k)
L
# +
D = {}
for i in range(5):
for j in range(5):
if i == j :
print(i,"is equal to",j)
'''update dictionary with (i,j) as key and 2*i as value'''
D.update({(i,j) : 10*i+j})
elif i!= j:
print(i,"is not equal to",j)
'''update dictionary with (i,j) as key and 2*i as value'''
D.update({(i,j) : 101*i+j})
# -
D
for i,j in zip(range(5),range(5)):
print(i,j)
for item,j in zip(['apple','banana','kite','cellphone','pen'],range(5)):
print(item,j)
for i,item in enumerate(['apple','banana','kite','cellphone']):
print(i, item)
A = [0 for k in range(10)]
A
A = [k for k in range(10)]
A
# +
L = [[1,2,3],[3,4,5],[5,7,9]]
for i in range(len(L)):
for j in range(len(L[0])):
print(L[i][j])
# -
LL = [[k+j for k in range(10)]for j in range(10)]
LL
i = 0
while i < 10:
print( i, "th turn")
i = i+2
import json
'''download'''
with open('mylist.json', 'w') as f1:
json.dump(LL,f1)
# -------
import json
'''upload'''
with open('mydata.json', 'r') as f2:
AA = json.load(f2)
for key,value in AA.items():
print("key : ", key,"|", "value: ", value)
DNA = 'ATGCATGCATATCAAGCTAGCTAGCTAGCTAGCTAGAGCTATTTAATGCTA\
GCTATATAGCGCTAGCTATAGCTAGCTAGCTAGCTAGCTAGCTAGCTAGCT\
AGCTAGCTAGCTAGCTAGCTAGCTAGCTAGCTAGCTAGCTAGCGCGCGCTA\
TATATAGAGAGAGAGAGAGACACACATATATCTCTCTCTCTCGAGATCGAT\
CGTACTAGCTAGCTAGCTAGCTAGCTAGCT'
DNA.count('A'),DNA.count('AT')
count = 0
for letter in DNA:
if letter == 'T':
count = count+1
count
'I am ok'.split()
# #### Q: find sum from 0 to 1000
s = 0
for i in range(1000+1):
s = s+i
s
sum([i for i in range(1000+1)])
# #### Q: find sum from 0 to 1000 (only even)
s = 0
LE = []
for i in range(1001):
if i%2 == 0:
LE.append(i)
s = s+i
s, sum(LE)
# --------
# ### Project: Fern
import numpy as np
import json
import matplotlib.pyplot as plt
import seaborn as sns
import random as random
# %matplotlib inline
sns.set()
# https://en.wikipedia.org/wiki/Barnsley_fern
# #### Method - 1: Blind implementation
# +
import random as random
x = 0
y = 0
X = [x]
Y = [y]
n=1
isprint = False
while n < 10000:
r = random.uniform(0,100)
if r < 1.0:
x = 0
y = 0.16*Y[n-1]
X.append(x) ; Y.append(y)
elif r > 1.0 and r < 86.0:
x = 0.85*X[n-1] + 0.04*Y[n-1]
y = -0.04*X[n-1] + 0.85*Y[n-1]+1.6
X.append(x);Y.append(y)
elif r > 86.0 and r < 93.0:
x = 0.2*X[n-1] - 0.26*Y[n-1]
y = 0.23*X[n-1] + 0.22*Y[n-1] + 1.6
X.append(x);Y.append(y)
elif r > 93.0 and r < 100.0:
x = -0.15*X[n-1] + 0.28*Y[n-1]
y = 0.26*X[n-1] + 0.24*Y[n-1] + 0.44
X.append(x);Y.append(y)
if isprint:
print("step: ",n,"random number is: ", r, "coordinate is : ", x,y)
n = n+1
# +
#for i in range(len(X)):
# print(X[i],Y[i])
# -
with open ('fern.json', 'w')as f1:
json.dump([X,Y],f1)
plt.figure(figsize = [10,12])
plt.scatter(X,Y,color = 'g',marker = '.')
plt.savefig('fern.png')
plt.savefig('fern.pdf')
plt.show()
# #### Method-2 : Manual Matrix Multiplication
# These correspond to the following transformations:
#
# $ {\displaystyle f_{1}(x,y)={\begin{bmatrix}\ 0.00&\ 0.00\ \\0.00&\ 0.16\end{bmatrix}}{\begin{bmatrix}\ x\\y\end{bmatrix}}}$
#
# ${\displaystyle f_{2}(x,y)={\begin{bmatrix}\ 0.85&\ 0.04\ \\-0.04&\ 0.85\end{bmatrix}}{\begin{bmatrix}\ x\\y\end{bmatrix}}+{\begin{bmatrix}\ 0.00\\1.60\end{bmatrix}}}$
#
# $ {\displaystyle f_{3}(x,y)={\begin{bmatrix}\ 0.20&\ -0.26\ \\0.23&\ 0.22\end{bmatrix}}{\begin{bmatrix}\ x\\y\end{bmatrix}}+{\begin{bmatrix}\ 0.00\\1.60\end{bmatrix}}}$
#
# ${\displaystyle f_{4}(x,y)={\begin{bmatrix}\ -0.15&\ 0.28\ \\0.26&\ 0.24\end{bmatrix}}{\begin{bmatrix}\ x\\y\end{bmatrix}}+{\begin{bmatrix}\ 0.00\\0.44\end{bmatrix}}}$
ITR = 100000
x = np.array([[0.0,0.0] for k in range(ITR)])
A = np.array([[0.0,0.0],[0.0,0.16]])
B = np.array([[0.85,0.04],[-0.04,0.85]])
C = np.array([[0.20,-0.26],[0.23,0.22]])
D = np.array([[-0.15,0.28],[0.26,0.24]])
AD = np.array([[0.0,0.0],
[0.0,1.6],
[0.0,1.6],
[0.0,0.44]])
# ---------
# +
X =[]
Y = []
x[0,0] = 0.0
x[0,1] = 0.0
t = 0
while t < ITR:
ct = random.uniform(0,100)
'''First condition'''
if ct < 1.0:
for p in range(2):
x[t,p] = 0.0
for q in range(2):
x[t,p] = x[t,p] + A[p,q]*x[t-1,q]
'''second condition'''
elif ct > 1.0 and ct < 86.0:
for p in range(2):
x[t,p] = 0.0
for q in range(2):
x[t,p] = x[t,p] + B[p,q]*x[t-1,q]
for p in range(2):
x[t,p] = x[t,p] + AD[1,p]
'''third condition'''
elif ct > 86.0 and ct < 93.0:
for p in range(2):
x[t,p] = 0.0
for q in range(2):
x[t,p] = x[t,p] + C[p,q]*x[t-1,q]
for p in range(2):
x[t,p] = x[t,p] + AD[2,p]
'''fourth condition '''
elif ct > 93.0 and ct < 100.0:
for p in range(2):
x[1,p] = 0.0
for q in range(2):
x[t,p] = x[t,p] + D[p,q]*x[t-1,q]
for p in range(2):
x[t,p] = x[t,p] + AD[3,p]
X.append(x[t,0])
Y.append(x[t,1])
t = t +1
# -
plt.figure(figsize = [10,12])
plt.scatter(X,Y,color = 'g',marker = '.')
plt.savefig('plot/fern.pdf')
plt.show()
# ### Method 3-Numpy
'''Matrices'''
A = np.array([[0.0,0.0],[0.0,0.16]])
B = np.array([[0.85,0.04],[-0.04,0.85]])
C = np.array([[0.20,-0.26],[0.23,0.22]])
D = np.array([[-0.15,0.28],[0.26,0.24]])
AD = np.array([[0.0,0.0],
[0.0,1.6],
[0.0,1.6],
[0.0,0.44]])
# +
u = np.array([0,0])
U = [u]
n = 1
while n < 10000:
'''generate a random number'''
r = random.uniform(0,100)
'''1rst condition'''
if r < 1.0:
u = np.dot(A,u)
U.append(u)
'''second condition'''
elif r > 1.0 and r < 86.0:
u = np.dot(B,u) + AD[1]
U.append(u)
'''third condition'''
elif r > 86.0 and r < 93.0:
u = np.dot(C,u) + AD[2]
U.append(u)
'''fourth condition'''
elif r > 93.0 and r < 100.0:
u = np.dot(D,u) + AD[3]
U.append(u)
'''update n'''
n = n+1
# -
plt.figure(figsize = [10,12])
for item in U:
plt.scatter(item[0],item[1],color = 'g',marker = '.')
plt.show()
# ### The End
| 1.2.LoopsAndConditions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Training an XOR network using PySyft
#
# In this notebook we'll be training an XOR network using PySyft.
# What's special about this is that the training data will be divided between two domain nodes that are owned by different parties.
import syft as sy
import numpy as np
from syft.core.adp.entity import DataSubject
from syft.core.tensor.smpc.mpc_tensor import MPCTensor
sy.logger.remove()
# + [markdown] tags=[]
# ## Party 1 - OpenMined's team in the UK
# -
# Party 1 logs into their domain node
p1_domain_node = sy.login(email="<EMAIL>",password="<PASSWORD>",port=8081)
# +
# They take their training data for the XOR network
training_data_p1 = np.array(
[[0,0,1],
[0,1,1]], dtype=np.int64
)
# rasswanth = DataSubject(name="Rasswanth")
training_data_p1 = sy.Tensor(training_data_p1).private(
min_val=0,
max_val=1,
entities=["Rasswanth"] * training_data_p1.shape[0],
ndept=True
)
training_targets_p1 = np.array([[0,1]]).T
training_targets_p1 = sy.Tensor(training_targets_p1).private(
min_val=0,
max_val=1,
entities=["Rasswanth"] * training_data_p1.shape[0],
ndept=True
)
p1_domain_node.load_dataset(
assets={
"training_data":training_data_p1,
"training_targets":training_targets_p1
},
name="Our training data for XOR networks!",
description="Collected on Jan 27 2022"
)
# This is just used to check our results afterwards.
# t1 = sy.Tensor(np.array([1,2,3])).send(p1)
# -
# <hr>
# Let's check and make sure our dataset was uploaded!
p1_domain_node.datasets
# Looks good!
#
# Now the last thing this team needs to do is to create an account for the Data Scientist who will be training the network using their data.
p1_domain_node.users.create(
**{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"budget": 100
}
)
p1_domain_node.users
# <hr>
# + [markdown] tags=[]
# ## Party 2 - Facebook's team in Menlo Park
# +
DOMAIN2_PORT = 8082
p2_domain_node = sy.login(email="<EMAIL>",password="<PASSWORD>",port=DOMAIN2_PORT)
training_data_p2 = np.array([
[1,0,1],
[1,1,1]]
)
training_data_p2 = sy.Tensor(training_data_p2).private(
min_val=0,
max_val=1,
entities=["Rasswanth"] * training_data_p2.shape[0],
ndept=True
)
training_targets_p2 = np.array([[1,0]]).T
training_targets_p2 = sy.Tensor(training_targets_p2).private(
min_val=0,
max_val=1,
entities=["Rasswanth", "Rasswanth"],
ndept=True
)
p2_domain_node.load_dataset(
assets={
"training_data":training_data_p2,
"training_targets":training_targets_p2
},
name="Our training data for XOR networks!",
description="Collected on Jan 27 2022"
)
# Used to check our results afterwards
# t2 = sy.Tensor(np.array([1,2,3])).send(p2)
# -
p2_domain_node.datasets
# Looks good!
#
# Now the last thing this team needs to do is to create an account for the Data Scientist who will be training the network using their data.
p2_domain_node.users.create(
**{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"budget": 100
}
)
p2_domain_node.users
# <hr>
# <hr>
# ### Data Scientists
ds_domain1 = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8081)
ds_domain1.datasets
ds_domain2 = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8082)
ds_domain2.datasets
ds_domain1.privacy_budget
ds_domain2.privacy_budget
# +
X, y = sy.concatenate(p1, p2, X, y)
## This replaces the following code:
# d1 = X.send(p1)
# X = MPCTensor(secret=d1,parties=parties,shape=X.shape)
# d2 = y.send(p1)
# y = MPCTensor(secret=d2,parties=parties,shape=y.shape)
# -
def relu(x,deriv=False):
if deriv==True:
return x>0
return x*(x>0)
layer0_weights = 2*np.random.random((3,4)) - 1
layer1_weights = 2*np.random.random((4,1)) - 1
# + active=""
#
# + active=""
#
# -
for j in range(1):
# Forward propagation
layer1_inputs = relu(X @ layer0_weights) ; layer1_inputs.block
layer2_inputs = relu(layer1_inputs @ layer1_weights) ; layer2_inputs.block
# Calculate errors
layer2_inputs_delta = (y - layer2_inputs)* relu(layer2_inputs,deriv=True) ; layer2_inputs_delta.block
layer1_inputs_delta = (layer2_inputs_delta@(layer1_weights.T)) * relu(layer1_inputs,deriv=True) ; layer1_inputs_delta.block
# Update weights
layer1_weights = layer1_weights + layer1_inputs.T @ layer2_inputs_delta ; layer1_weights.block
layer0_weights = layer0_weights + X.T @ layer1_inputs_delta ; layer0_weights.block
# Let's see how our privacy budget changed as a result of training for a single epoch:
ds_domain1.privacy_budget
ds_domain2.privacy_budget
# And voila! We've trained a neural network using PySyft's adversaril differential privacy system and its secure multiparty computation system working in tandem.
#
# <hr>
# <hr>
# Demo ends above- the cells below are for temporary Debugging:
X
layer0_weights
layer1_inputs = relu(X @ layer0_weights) ; layer1_inputs.block
layer2_inputs = relu(layer1_inputs @ layer1_weights) ; layer2_inputs.block
layer2_inputs_delta = (y - layer2_inputs)* relu(layer2_inputs,deriv=True) ; layer2_inputs_delta.block
layer1_inputs_delta = (layer2_inputs_delta@(layer1_weights.T)) * relu(layer1_inputs,deriv=True) ; layer1_inputs_delta.block
layer1_weights = layer1_inputs.T @ layer2_inputs_delta + layer1_weights; layer1_weights.block
layer0_weights = X.T @ layer1_inputs_delta + layer0_weights; layer0_weights.block
res = X.T @ layer1_inputs_delta
v1 = X.T
v2 = layer1_inputs_delta
def get_val(val):
t1 = val.child[0].get_copy()
t2 = val.child[0].get_copy()
return t1+t2
v1 = get_val(v1)
v2 = get_val(v2)
v1.child.min_vals.to_numpy() @ v2.child.min_vals.to_numpy()
v2.child.min_vals
layer0_weights_dp = layer0_weights.publish(sigma=1e4)
layer1_weights_dp = layer1_weights.publish(sigma=1e4)
print(layer0_weights_dp.get_copy())
print(layer1_weights_dp.get_copy())
| notebooks/smpc/ModelTraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.008956, "end_time": "2020-03-28T16:40:20.916491", "exception": false, "start_time": "2020-03-28T16:40:20.907535", "status": "completed"} tags=[]
# # COVID-19 Tracking Europe Cases
# > Tracking coronavirus total cases, deaths and new cases in Europe by country.
#
# - comments: true
# - author: <NAME>
# - categories: [overview, interactive, europe]
# - hide: true
# - permalink: /covid-overview-europe/
# + papermill={"duration": 0.018508, "end_time": "2020-03-28T16:40:20.940634", "exception": false, "start_time": "2020-03-28T16:40:20.922126", "status": "completed"} tags=[]
#hide
print('''
Example of using jupyter notebook, pandas (data transformations), jinja2 (html, visual)
to create visual dashboards with fastpages
You see also the live version on https://gramener.com/enumter/covid19/europe.html
''')
# + papermill={"duration": 0.35433, "end_time": "2020-03-28T16:40:21.300590", "exception": false, "start_time": "2020-03-28T16:40:20.946260", "status": "completed"} tags=[]
#hide
import numpy as np
import pandas as pd
from jinja2 import Template
from IPython.display import HTML
# + papermill={"duration": 0.014005, "end_time": "2020-03-28T16:40:21.320573", "exception": false, "start_time": "2020-03-28T16:40:21.306568", "status": "completed"} tags=[]
#hide
from pathlib import Path
if not Path('covid_overview.py').exists():
# ! wget https://raw.githubusercontent.com/pratapvardhan/notebooks/master/covid19/covid_overview.py
# + papermill={"duration": 0.329265, "end_time": "2020-03-28T16:40:21.655457", "exception": false, "start_time": "2020-03-28T16:40:21.326192", "status": "completed"} tags=[]
#hide
import covid_overview as covid
# + papermill={"duration": 0.157197, "end_time": "2020-03-28T16:40:21.818528", "exception": false, "start_time": "2020-03-28T16:40:21.661331", "status": "completed"} tags=[]
#hide
COL_REGION = 'Country/Region'
europe_countries = covid.mapping['df'].pipe(lambda d: d[d['Continent'].eq('Europe')])['Name'].values
filter_europe = lambda d: d[d['Country/Region'].isin(europe_countries)]
kpis_info = [
{'title': 'Italy', 'prefix': 'IT'},
{'title': 'Spain', 'prefix': 'SP'},
{'title': 'Germany', 'prefix': 'GE'}]
data = covid.gen_data(region=COL_REGION, filter_frame=filter_europe, kpis_info=kpis_info)
# + papermill={"duration": 0.258793, "end_time": "2020-03-28T16:40:22.083743", "exception": false, "start_time": "2020-03-28T16:40:21.824950", "status": "completed"} tags=[]
#hide_input
template = Template(covid.get_template(covid.paths['overview']))
dt_cols, LAST_DATE_I = data['dt_cols'], data['dt_last']
html = template.render(
D=data['summary'], table=data['table'],
newcases=data['newcases'].loc[:, dt_cols[LAST_DATE_I - 40]:dt_cols[LAST_DATE_I]],
COL_REGION=COL_REGION,
KPI_CASE='Europe',
KPIS_INFO=kpis_info,
LEGEND_DOMAIN=[5, 50, 500, np.inf],
np=np, pd=pd, enumerate=enumerate)
HTML(f'<div>{html}</div>')
# + [markdown] papermill={"duration": 0.012879, "end_time": "2020-03-28T16:40:22.108707", "exception": false, "start_time": "2020-03-28T16:40:22.095828", "status": "completed"} tags=[]
# Visualizations by [<NAME>](https://twitter.com/PratapVardhan)[^1]
#
# [^1]: Source: ["COVID-19 Data Repository by Johns Hopkins CSSE"](https://systems.jhu.edu/research/public-health/ncov/) [GitHub repository](https://github.com/CSSEGISandData/COVID-19). Link to [notebook](https://github.com/pratapvardhan/notebooks/blob/master/covid19/covid19-overview.ipynb), [orignal interactive](https://gramener.com/enumter/covid19/europe.html)
| _notebooks/2020-03-21-covid19-overview-europe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Sum sin functions over a range of frequencies
# +
import numpy as np
import matplotlib.pyplot as plt
import obspy
from obspy import Stream
from obspy import Trace
from obspy import UTCDateTime
#time pars
t0 = 0
t1 = 5*np.pi
nt = 4001
#frequency pars
nf = 100
f1 = 0.5
f2 = 40
wf1 = 2
wf2 = 18
#data arrays
t = np.linspace(t0,t1,nt,endpoint=True)
print(t)
d = np.zeros_like(t)
#sum frequency range
freqs = np.linspace(f1,f2,nf,endpoint=True)
for f in freqs:
d += np.sin(2*np.pi*f*t)
d += np.cos(2*np.pi*f*t)
#make obspy Stream(Trace())
stats = {'network': 'TAC', 'station': 'Tyler', 'location': '',
'channel': 'KBPI', 'npts': len(d), 'delta': t[1]-t[0]}
stats['starttime'] = UTCDateTime(2020, 7, 16, 18, 49, 0, 0)
st = Stream([Trace(data=d, header=stats)])
cst = st.copy()
cst.filter('bandpass',freqmin=wf1,freqmax=wf2,corners=4,zerophase=True)
#cst.filter('bandpass',freqmin=wf1,freqmax=wf2)
#plot Stream(Trace())
fig, ax = plt.subplots(1,figsize=(12,4))
#ax.plot(t,d,c='black')
pcolors = ['black','red']
ax.plot(st[0].times(),st[0].data,c=pcolors[0],zorder=0)
ax.plot(cst[0].times(),cst[0].data,c=pcolors[1],zorder=1)
station = st[0].stats.station
ptitle = 'Trace-0: %s (%s)' %(str(station),pcolors[0])
ax.set_title(ptitle)
ax.grid(b=True, which='major', color='#666666', linestyle='-')
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.show()
st[0].spectrogram()
cst[0].spectrogram()
# -
| notebooks/Test Obspy Filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis
# * As expected, the weather becomes significantly warmer as one approaches the equator (0 Deg. Latitude). More interestingly, however, is the fact that the southern hemisphere tends to be warmer this time of year than the northern hemisphere. This may be due to the tilt of the earth.
# * There is no strong relationship between latitude and humidity. However, it is more noticeable more cities with humditity at latitude 60-65
# * There is no strong relationship between latitude and cloudiness. However, it is interesting to see that a strong band of cities sits at 0, 40, and 90% cloudiness.
# * There is no strong relationship between latitude and wind speed. However, in northern hemispheres at 70 latitude, a few cities has strongest winds with at least 25 mph
#
#dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
from citipy import citipy
from datetime import datetime
from random import uniform
from api_keys import api_key
# +
#create a df to lat, lon
latlng_df = pd.DataFrame()
# Create a set of random lat and lng combinations
latlng_df['lat'] = np.random.uniform(low=-90.000, high=90.000, size=1500)
latlng_df['lng'] = np.random.uniform(low=-180.000, high=180.000, size=1500)
# -
latlng_df.head()
# # Generate Cities List
# +
# # Identify nearest city for each lat, lng combination and country
# just in case there may be multiple same city name but at different locations.
cities = []
countries = []
for index, row in latlng_df.iterrows():
city = citipy.nearest_city(row['lat'],row['lng'])
cities.append(city.city_name)
countries.append(city.country_code)
latlng_df['City'] = cities
latlng_df['Country'] = countries
# -
len(latlng_df)
latlng_df = latlng_df.drop_duplicates(['City','Country'])
len(latlng_df)
latlng_df.head()
cities_list = latlng_df.drop_duplicates(["City","Country"]).reset_index()
cities_list.count()
# # Perform API Calls
# Perform a weather check on each city using a series of successive API calls.
# Include a print log of each city as it's being processed (with the city number and city name).
# +
city_data = []
temp = []
max_temp = []
humidity = []
clouds = []
winds = []
countries = []
city_name = []
city_lat = []
city_lng = []
counter = 0
base_url = "https://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
query_url = base_url + "appid=" + api_key + "&units=" + units + "&q="
for index, row in cities_list.iterrows():
counter += 1
city = row["City"]
response = requests.get(query_url + city).json()
searchCityID = response.get("id")
if response.get("id"):
temp.append(response['main']['temp'])
max_temp.append(response['main']['temp_max'])
city_name.append(response["name"])
city_lat.append(response['coord']["lat"])
city_lng.append(response['coord']["lon"])
countries.append(response["sys"]["country"])
clouds.append(response["clouds"]["all"])
humidity.append(response["main"]["humidity"])
winds.append(response["wind"]["speed"])
print(f"Processing Record {counter} | {city}")
else:
print(f"City not found. Skipping... | {city}")
# +
weather_data = pd.DataFrame({ "City" : city_name,
"Country": countries,
"lat": city_lat,
"lng": city_lng,
"Temp": temp,
"Max Temp": max_temp,
"humidity": humidity,
"Cloudiness": clouds,
"Wind Speed": winds })
# -
#column order
weather_data = weather_data[["City","Country","lat","lng","Temp","Max Temp","humidity","Cloudiness","Wind Speed"]]
weather_data.head()
weather_data.to_csv("weather.csv",encoding="utf-8",index=False)
# # Latitude vs. Temperature Plot
# +
plt.scatter(weather_data["lat"],weather_data["Max Temp"],marker="o",edgecolor="black")
plt.title("City Latitude vs. Max Temperature (04/21/18)")
plt.ylabel("Max Temperature (F)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("Max_Temp_vs_Latitude_plots.png")
plt.show()
# -
# # Latitude vs. Humidity Plot
# +
plt.scatter(weather_data["lat"],weather_data["humidity"],marker="o",edgecolor="black")
plt.title("City Latitude vs. Humidity (04/21/18)")
plt.ylabel("Humidity (%)")
plt.xlabel("Latitude")
plt.grid(True)
plt.ylim(0,100)
plt.savefig("Humidity_vs_Latitude_plots.png")
plt.show()
# -
# # Latitude vs. Cloudiness Plot
# +
plt.scatter(weather_data["lat"],weather_data["Cloudiness"],marker="o",edgecolor="black")
plt.title("City Latitude vs. Cloudiness (04/21/18)")
plt.ylabel("Cloudiness (%)")
plt.xlabel("Latitude")
plt.grid(True)
plt.ylim(0,100)
plt.savefig("Cloudiness_vs_Latitude_plots.png")
plt.show()
# -
# # Latitude vs. Wind Speed Plot
# +
plt.scatter(weather_data["lat"],weather_data["Wind Speed"],marker="o",edgecolor="black")
plt.title("City Latitude vs. Wind Speed (04/21/18)")
plt.ylabel("Wind Speed (mpg)")
plt.xlabel("Latitude")
plt.grid(True)
#plt.ylim(0,30)
plt.savefig("WindSpeed_vs_Latitude_plots.png")
plt.show()
# -
| .ipynb_checkpoints/WeatherPy_final-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DS Automation Assignment
# Using our prepared churn data from week 2:
# - use pycaret to find an ML algorithm that performs best on the data
# - Choose a metric you think is best to use for finding the best model; by default, it is accuracy but it could be AUC, precision, recall, etc. The week 3 FTE has some information on these different metrics.
# - save the model to disk
# - create a Python script/file/module with a function that takes a pandas dataframe as an input and returns the probability of churn for each row in the dataframe
# - your Python file/function should print out the predictions for new data (new_churn_data.csv)
# - the true values for the new data are [1, 0, 0, 1, 0] if you're interested
# - test your Python module and function with the new data, new_churn_data.csv
# - write a short summary of the process and results at the end of this notebook
# - upload this Jupyter Notebook and Python file to a Github repository, and turn in a link to the repository in the week 5 assignment dropbox
#
# *Optional* challenges:
# - return the probability of churn for each new prediction, and the percentile where that prediction is in the distribution of probability predictions from the training dataset (e.g. a high probability of churn like 0.78 might be at the 90th percentile)
# - use other autoML packages, such as TPOT, H2O, MLBox, etc, and compare performance and features with pycaret
# - create a class in your Python module to hold the functions that you created
# - accept user input to specify a file using a tool such as Python's `input()` function, the `click` package for command-line arguments, or a GUI
# - Use the unmodified churn data (new_unmodified_churn_data.csv) in your Python script. This will require adding the same preprocessing steps from week 2 since this data is like the original unmodified dataset from week 1.
import pandas as pd
df = pd.read_csv('updated_churn_data.csv', index_col='customerID')
df
conda install -c conda-forge pycaret -y
from pycaret.classification import setup, compare_models, predict_model, save_model, load_model
automl = setup(df, target='Churn')
automl[6]
best_model = compare_models()
best_model
best = compare_models(sort = 'Kappa')
I selected Kappa as a metric I thought could be the best to use for the model, knowing it produced
the second lowest percentage. Interesting that after I did that, the automl pulls the ada as
the best possible model for my dataset. I will try again by sorting to the metric AUC instead this time.
best = compare_models(sort = 'AUC')
Using AUC as the best metric for my test, the logistic regression again returns as the best model to use
for my data set. Now if I want to return the top three models based on my data, I can run this code below, based
on the default metric of accuracy I can run this code below.
top3 = compare_models(n_select = 3)
df.iloc[-1].shape
df.iloc[-2:-1].shape
You can see that they differ because df.iloc[-1].shape only returns the total number of columns since
we only specified an indexing of a 1D array.
predict_model(best_model, df.iloc[-2:-1])
We can see this line of code creates a score column with the probability class of 1. It also
creates a 'label' column with the predicted label, where it rounds up the score if the score
is greater than or equal to 0.5.
save_model(best_model, 'LR')
We save our trained model based on our best model comparison code ran earlier so we can use
it in a python file later.
# +
import pickle
with open('LR_model.pk', 'wb') as f:
pickle.dump(best_model, f)
# -
with open('LR_model.pk', 'rb') as f:
loaded_model = pickle.load(f)
new_data = df.iloc[-2:-1].copy()
new_data.drop('Churn', axis=1, inplace=True)
loaded_model.predict(new_data)
loaded_lr = load_model('LR')
predict_model(loaded_lr, new_data)
I saved my pycaret model and test it with loading it and making predictions to make sure it works which it does.
# +
from IPython.display import Code
Code('predict_churn.py')
# -
# %run predict_churn.py
I created a separate python module to take in new data and make a prediction.
I then import the code and test the code to make sure it reads and pulls up necesssary data.
I can see that we have binary data returning for churn and no churn so the model is working ok but it is
not perfect. I need to lookout for false positives and false negatives to ensure we understand our new data correctly.
# # Summary
# Write a short summary of the process and results here.
| Week_5_assignment_starter_RLee.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''diplom_env'': conda)'
# language: python
# name: python37664bitdiplomenvconda096ff76efae746af98b52aeb23852b0d
# ---
# + [markdown] id="sXR7SX9fXuXM"
# # Problem Set 1 (93 points)
# + [markdown] id="mDf62nPpXuXR"
# ## Important information
# We provide signatures of the functions that you have to implement. Make sure you follow the signatures defined, otherwise your coding solutions will not be graded.
# + [markdown] id="2D9M7n2oXuXT"
# # Problem 1 (Theoretical tasks) (36 pts)
#
# _1._
# - (1 pts) what are the constants $C_1$ and $C_2$ such that $C_1 \|x\|_{2} \leq \|x\|_{\infty} \leq C_2 \| x\|_{2}$ for any vector $x$
# - (5 pts) Prove that $\| U A \|_F = \| A U \|_F = \| A \|_F$ for any unitary matrix $U$.
# - (5 pts) Prove that $\| U A \|_2 = \| A U \|_2 = \| A \|_2$ for any unitary matrix $U$.
#
# _2._
# - (5 pts) Using the results from the previous subproblem, prove that $\| A \|_F \le \sqrt{\mathrm{rank}(A)} \| A \|_2$. _Hint:_ SVD will help you.
# - (5 pts) Show that for any $m, n$ and $k \le \min(m, n)$ there exists $A \in \mathbb{R}^{m \times n}: \mathrm{rank}(A) = k$, such that $\| A \|_F = \sqrt{\mathrm{rank}(A)} \| A \|_2$. In other words, show that the previous inequality is not strict.
# - (5 pts) Prove that if $\mathrm{rank}(A) = 1$, then $\| A \|_F = \| A \|_2$.
# - (5 pts) Prove that $\| A B \|_F \le \| A \|_2 \| B \|_F$.
#
# _3._
#
# - (2 pts) Let $U \in \mathbb{C}^{n \times k}, k < n$ be a matrix so that $U^*U = I_k$.
# Find a pseudoinverse of $UU^*$
# - (3 pts) Compute SVD of the matrix analytically, i.e. provide the analytical expression for every natural $n$
#
# $$
# A = \begin{bmatrix}
# 1\\
# \sqrt{3}\\
# \sqrt{5}\\
# \vdots\\
# \sqrt{2n-1}
# \end{bmatrix}
# \begin{bmatrix}
# -1 & 1 & -1 & \ldots & (-1)^{n}
# \end{bmatrix}
# $$
# + id="rBaO6yWvXuXV"
# Your solution is here
# + [markdown] id="FVrgoNOgXuXk"
# **1.1**
#
# 1.1.1
#
# Let's suppose $x \in \mathbb{R}^n$. Then:
# \begin{gather*}
# \Vert x \Vert_2 = \sqrt{\sum |x_i|^2} \leq \sqrt{n \max\limits_{i \in [n]} |x_i|^2} = \sqrt{n} \max\limits_{i \in [n]} |x_i| = \sqrt{n} \Vert x \Vert_{\infty}
# \Rightarrow \frac{1}{\sqrt{n}} \Vert x \Vert_2 \leq \Vert x \Vert_{\infty} \\
# \Vert x \Vert_{\infty} = \sqrt{\max\limits_{i \in [n]} |x_i|^2} \leq \sqrt{\sum |x_i|^2} = \Vert x \Vert_2 \Rightarrow \Vert x \Vert_{\infty} \leq \Vert x \Vert_2
# \end{gather*}
#
# 1.1.2
# \begin{gather*}
# \Vert U A \Vert_{F} = \sqrt{\text{trace} A^* U^* U A} = \sqrt{ \text{trace} A^* A} = \Vert A \Vert_F = \sqrt{ \text{trace} I A^* A} = \sqrt{\text{trace} U U^* A^* A} = \sqrt{\text{trace} U^* A^* A U} = \Vert AU \Vert_F
# \end{gather*}
#
# 1.1.3
#
# Let's prove the following statement:
#
# \textbf{St 1}
# \par Let's denote: $S(0, 1) = \{x \in \mathbb{R}, \Vert x \Vert_2 = 1\}$. If $U$ is unitary matrix, consider $U S(0, 1) := \{y \in \mathbb{R}\, ; \, y = U x\,,\, x \in S(0, 1) \}$. Then $U S(0, 1) = S(0, 1)$
#
# $\blacktriangleright$
# Let's consider $x \in S(0, 1)$. Consider $y = U* x$. Since $\Vert U* x \Vert_2 = \sqrt{ x* U U* x} = \sqrt{x* x} = \Vert x \Vert_2 = 1$, so, $y \in S(0, 1)$. Moreover, $U y = U U* x = x$.
# So, finally: $$\forall x \in S(0, 1) \, \exists y \in S(0, 1)\, : \, x = U y$$ which proves the statement.
# $\blacktriangleleft$
#
# Move on to our main task:
#
# \begin{gather*}
# \textbf{a) } \Vert UA \Vert_2 = \sup\limits_{\Vert x \Vert_2 = 1} \Vert UAx \Vert_2 = \sup \limits_{\Vert x \Vert_2 = 1} \sqrt{x^* A^* U* U A x} = \sup \limits_{\Vert x \Vert_2 = 1} \sqrt{x^* A^* A x} = \Vert A \Vert_2 \\
# \textbf{b) } \Vert AU \Vert_2 = \sup\limits_{\Vert x \Vert_2 = 1} \Vert A Ux \Vert_2 = \sup\limits_{\begin{subarray}{l}
# y = U x\\
# x \in S(0, 1)
# \end{subarray}} \Vert A y \Vert_2 \overset{\textbf{St 1}}{=} \sup\limits_{y \in S(0, 1)} \Vert Ay \Vert_2 = \Vert A \Vert_2
# \end{gather*}
# \newline
#
# **1.2**
#
# 1.2.1
#
# Let's consider SVD decomposition of $A$: $A = U \Sigma V*$, where $U, V$ are both left unitary. Then:
# \begin{gather*}
# \Vert A \Vert_F^2 = \text{trace} A^* A = \text{trace} V \Sigma U^* U \Sigma V^* = \text{trace} \Sigma^2 = \sum \sigma_i^2(A)
# \end{gather*}
# Here $\sigma_i(A)$ are singular values of $A$.
# From Singular value decomposition theorem (from the lecture) we know, that count of positive singular values equals to $\text{rank} A$
# So, we conclude:
# \begin{gather*}
# \Vert A \Vert_F^2 = \sum\limits_{i = 1}^{\text{rank} A} \sigma_i^2(A) \leq \text{rank} (A) \sigma_1^2(A) = \text{rank} (A) \Vert A \Vert_2^2
# \end{gather*}
# The last inequality gives us the solution for the problem
#
# 1.2.2
#
# Given $n, m, k$, let's consider the following block matrix:
# $$
# A = \left[\begin{array}{@{}c|c@{}}
# I_k & 0_{k \times (n - k)} \\
# \hline
# 0_{(m - k) \times k} & 0_{(m-k) \times (n-k)}
# \end{array}\right]
# $$
#
# Obviously, $\Vert A \Vert_F = \sqrt{k}$, $\text{rank} A = k$, $\Vert A \Vert_2 = 1$, so $\Vert A \Vert_F = \sqrt{\text{rank} A} \Vert A \Vert_2$
#
# 1.2.3
#
# In case $\text{rank} A = 1$ we have the following:
# \begin{gather*}
# \Vert A \Vert_F^2 = \sum\limits_{i = 1}^{\text{rank} A} \sigma_i^2(A) = \sigma_1^2(A) = \Vert A \Vert_2^2
# \end{gather*}
# The equality above gives us the appropriate proof.
#
# 1.2.4
#
# We have to prove, that $\Vert A B \Vert_F \leq \Vert A \Vert_2 \Vert B \Vert_F \Leftrightarrow \Vert A B \Vert_F^2 \leq \Vert A \Vert_2^2 \Vert B \Vert_F^2$
#
# Let's $A \in \mathbb{C}^{n \times m} \, , \, B \in \mathbb{C}^{m \times k}$. Besides, I will use the following fact (which we use in the proof of Singular Value decomposition: $A^*A = V \Sigma^2 V^*$, where $V$ is unitary matrix, and $\Sigma = \text{diag}( \sigma_1(A), \sigma_2(A), \dots ,\sigma_k(A))$. Actually, we can treat this decomposition as special case of Schur theorem with hermitian positive semidefined matrix $A^* A$.
#
# So, we have the following:
# \begin{gather*}
# \Vert A B \Vert_F^2 \leq \Vert A \Vert_2^2 \Vert B \Vert_F^2 \Leftrightarrow \\
# \text{trace} ( B^* A^* A B ) \leq \sigma_1^2(A) \text{trace} (B^* B) \Leftrightarrow \\
# \text{trace} ( A^* A B B^* ) \leq \sigma_1^2(A) \text{trace} (B B^*) \Leftrightarrow \\
# \text{trace} \left( \left[\sigma_1^2(A) I_k - A^* A\right] B B^* \right) \geq 0 \Leftrightarrow \\
# \text{trace} \Big( V \left[ \sigma_1^2(A) I_k - \text{diag} (\sigma_1^2(A), \dots, \sigma_k^2(A))\right] V^* B B^* \Big) \geq 0 \Leftrightarrow \\
# \text{trace} \Big( \text{diag}\left(0, \sigma_1^2(A) - \sigma_2^2(A), \dots, \sigma_1^2(A) - \sigma_k^2(A)\right) V^* B B^* V \Big) \geq 0
# \end{gather*}
#
# Let's note, that $\text{diag}\left(0, \sigma_1^2(A) - \sigma_2^2(A), \dots, \sigma_1^2(A) - \sigma_k^2(A)\right)$ is diagonal matrix with non-negative elements. Let's denote:
# \begin{gather*}
# \text{diag}\left(0, \sqrt{\sigma_1^2(A) - \sigma_2^2(A)}, \dots, \sqrt{\sigma_1^2(A) - \sigma_k^2(A)}\right) =: C \Rightarrow \\
# \Rightarrow \text{diag}\left(0, \sigma_1^2(A) - \sigma_2^2(A), \dots, \sigma_1^2(A) - \sigma_k^2(A)\right) = C C^*
# \end{gather*}
#
# So, we finally obtain:
# \begin{gather*}
# \Vert A B \Vert_F^2 \leq \Vert A \Vert_2^2 \Vert B \Vert_F^2 \Leftrightarrow \text{trace}(CC^*V^* B B^* V) \geq 0 \Leftrightarrow \text{trace}(C^*V^* B B^*VC) \geq 0 \Leftrightarrow \Vert B^* V C \Vert_F^2 \geq 0
# \end{gather*}
#
# The last inequality holds true which proves the desired inequality.
#
# **1.3**
#
#
# 1.3.1
#
# *Not yet solved*
#
# 1.3.2
#
# We want to decompose $A$ in the following way: $A = U \Sigma V^*$, where $U$ and $V$ are left-unitary matrices of size $n \times 1$ and $\Sigma$ is $1 \times 1$ matrix.
#
# We have:
# \begin{gather*}
# \begin{bmatrix}
# 1, \sqrt{3}, \dots , \sqrt{2n-1}
# \end{bmatrix}
# \begin{bmatrix}
# 1\\
# \sqrt{3}\\
# \sqrt{5}\\
# \vdots\\
# \sqrt{2n-1}
# \end{bmatrix} = \sum\limits_{i = 1}^{n} (2i - 1) = n ( n + 1) - n = n^2
# \end{gather*}
#
# Then,
# \begin{gather*}
# \begin{bmatrix}
# -1 & 1 & -1 & \ldots & (-1)^{n}
# \end{bmatrix}
# \begin{bmatrix}
# -1\\1\\-1\\\vdots\\(-1)^{n}
# \end{bmatrix} = \sum\limits_{i = 1}^{n} 1 = n
# \end{gather*}
#
# So, let's introduce $U$, $V$ and $\Sigma$ as follows:
# \begin{gather*}
# U = \frac{1}{n} \begin{bmatrix}
# 1\\\sqrt{3}\\\sqrt{5}\\\vdots\\\sqrt{2n-1}
# \end{bmatrix} \, ; \, V = \frac{1}{\sqrt{n}} \begin{bmatrix}
# -1\\1\\-1\\\vdots\\(-1)^{n}
# \end{bmatrix} \, ; \, \Sigma = n^3 I_1
# \end{gather*}
#
# It gives us the desired decomposition
# + [markdown] id="INzaZGxiXuXl"
# # Problem 2 (Matrix calculus) (12 pts)
#
# _1._ (10 pts) Consider the following function
#
# $$ F(U, V) = \frac{1}{2}\|X - UV\|_F^2, $$
#
# where $X \in \mathbb{R}^{n \times n}$, $U \in \mathbb{R}^{n \times k}$ and $V \in \mathbb{R}^{k \times n}$ and $k < n$.
#
# - (2 pts) Derive analytical expression for the gradient of the function $F$ with respect to $U$
# - (2 pts) Derive analytical expression for the gradient of the function $F$ with respect to $V$
# - (6 pts) Estimate computational complexity of computing these gradients (in big-O notation). Also, compare timing of analytical computations versus timing the automatic differentiation with JAX. Study some range of dimensions to extract asymptotic complexity and make a conclusion, what approach is faster. Plot the dependence of running time on the dimension (row and column separately) to proof your conclusion
#
# _2._ (2 pts) Derive analytical expression for the gradient of the function $f$:
#
# $$ R(x) = \frac{(Ax, x)}{(x, x)}, $$
#
# where $A$ is a symmetric real matrix. Why the gradient of this function is important in NLA you will know in the lectures later.
# + [markdown] id="2uC4zUq-X0bJ"
# ### Solutions for tasks 2.1.1 , 2.1.2
#
# **2.1**
#
# We have the following expression for $F(U, V) = \frac{1}{2} \Vert X - UV \Vert_F^2$ using Einstein sommation notation:
# \begin{gather*}
# F(U, V) = \frac{1}{2} \text{trace} \left(X^TX - V^TU^TX - X^TUV + V^TU^TUV\right) = \\
# = \frac{1}{2}\left(x_{ab}^T x_{ba} - v_{ab}^Tu_{cb} x_{ca} - x_{ab}^Tu_{bc}v_{ca} + v_{ab}^T u_{cb} u_{cd} v_{da}\right)
# \end{gather*}
#
# 2.1.1
#
# Let'c compute $\frac{d}{d u_{ef}} F(U, V)$:
# \begin{gather*}
# \frac{d}{d u_{ef}} F(U, V) = - \frac{1}{2} v_{af}^Tx_{ea} - \frac{1}{2} x_{ae}^T v_{fa} + \frac{1}{2} v_{ab}^T v_{da} \left(u_{cb} \delta_{ce} \delta_{df} + u_{cd} \delta_{ce} \delta_{bf}\right) = \\
# = - x_{ea} v_{af}^T + \frac{1}{2} \big( v_{ab}^T v_{fa} u_{eb} + v_{af}^T v_{da} u_{ed}\big) = - x_{ea} v_{af}^T + \frac{1}{2}\big(u_{eb} v_{ba} v_{af}^T + u_{ed} v_{da} v_{af}^T\big) = \\
# = - x_{ea} v_{af}^T + u_{eb} v_{ba} v_{af}^T
# \end{gather*}
#
# So, we can conclude, that $\frac{d}{d U} F(U, V) = - X V^T + U V V^T = (UV - X) V^T$
#
# 2.1.2
#
# In the same way, let's compute $\frac{d}{d v_{ef}} F(U, V)$:
# \begin{gather*}
# \frac{d}{d v_{ef}} F(U, V) = \frac{1}{2} u_{ec}^T x_{cf} - \frac{1}{2} x_{fb}^T u_{be}^T + \frac{1}{2} u_{bc}^T u_{cd} \big(v_{ab}^T \delta_{de} \delta_{af} + v_{da} \delta_{eb} \delta_{af}\big) = \\ = -\frac{1}{2} u_{ec}^T x_{cf} - \frac{1}{2} u_{eb}^T x_{bf} + \frac{1}{2}u_{bc}^T u_{ce} v_{fb}^T + \frac{1}{2} u_{ec}^T u_{cd} v_{df} = \\
# = - u_{ec}^T x_{cf} + u_{ec}^T u_{cb} v_{bf}
# \end{gather*}
#
# So, we can conclude, that $\frac{d}{d V} F(U, V) = - U^T X + U^T U V = U^T (UV - X)$
# + [markdown] id="AQ1afsJbkri7"
# ### Solution for task 2.1.3
#
# We will suppose the following:
# $$
# A \in \mathbb{R}^{n \times m} \, , \, B \in \mathbb{R}^{m \times k} \Rightarrow \text{ multiplication } A B \text{ takes } \mathcal{O} (n m k) \\
# A \in \mathbb{R}^{n \times m} \, , \, B \in \mathbb{R}^{n \times m} \Rightarrow \text{ sum } A + B \text { takes } \mathcal{O} (n m )
# $$
#
# Of course, there are more efficient algorithms, but as I understand they are not much differ.
#
# So, considerint $X \in \mathbb{R}^{n \times n}$, $U \in \mathbb{R}^{n \times k}$ and $V \in \mathbb{R}^{k \times n}$ we obtain, that time complexity for computation of $\frac{d}{d U} F(U, V) = (UV - X) V^T$ and $\frac{d}{d V} F(U, V) = U^T (UV - X)$ is $\mathcal{O}(k n^2)$
# + id="fdJeYBVwXuXn" outputId="9e70f77e-6e80-4ec3-9205-100cc30aad26" colab={"base_uri": "https://localhost:8080/", "height": 581}
# Your solution is here
from collections import defaultdict
import jax.numpy as jnp
import jax
from jax import grad
import numpy as np
import timeit
from timeit import Timer
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def F(U, V, X):
return 0.5 * jnp.sum((X - U@V)**2)
gradU = grad(F, argnums=0, has_aux=False)
gradV = grad(F, argnums=1, has_aux=False)
def timeit_grad(n, k, key, method='JAX', obj='U', n_regenerate=2, n_times=None):
assert method in ['JAX', 'analytic']
assert obj in ['U', 'V']
def gradU_JAX_once():
gradU(U, V, X).block_until_ready()
def gradV_JAX_once():
gradV(U, V, X).block_until_ready()
def gradU_analytic_once():
((U@V - X)@V.T).block_until_ready()
def gradV_analytic_once():
(U.T@(U@V - X)).block_until_ready()
map = {
'JAX': {
'U': gradU_JAX_once,
'V': gradV_JAX_once
},
'analytic': {
'U':gradU_analytic_once,
'V':gradV_analytic_once
}
}
time = 0.
for _ in range(n_regenerate):
key, Xkey, Ukey, Vkey = jax.random.split(key, num=4)
X = jax.random.normal(Xkey, shape=(n, n), dtype=jnp.float32)
U = jax.random.normal(Ukey, shape=(n, k), dtype=jnp.float32)
V = jax.random.normal(Vkey, shape=(k, n), dtype=jnp.float32)
func = map[method][obj]
if n_times is not None:
time += timeit.timeit(func, number=n_times)/n_times
else:
n_loops, time_elapsed = Timer(func).autorange()
time += time_elapsed/n_loops
return time/n_regenerate
def timeit_range(n_range, k_range, n_cnst, k_cnst, key):
methods = ['JAX', 'analytic']
objs = ['U', 'V']
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(12, 8))
for i, obj in enumerate(objs):
for j in range(2):
res = defaultdict(list)
for method in methods:
method_key = key
if j == 0:
for k in k_range:
method_key, curr_key = jax.random.split(method_key)
res[method].append(
timeit_grad(n_cnst, k, curr_key, method=method, obj=obj))
ax[i, j].plot(k_range, res[method], '-|', label=method)
ax[i, j].set_xlabel('k_range, n={}'.format(n_cnst))
else:
for n in n_range:
method_key, curr_key = jax.random.split(method_key)
res[method].append(
timeit_grad(n, k_cnst, curr_key, method=method, obj=obj))
ax[i, j].plot(n_range, res[method], '-|', label=method)
ax[i, j].set_xlabel('n_range, k={}'.format(k_cnst))
ax[i, j].set_title('Grad with respect to {}'.format(obj))
ax[i, j].legend()
plt.tight_layout()
plt.show()
timeit_range(
[10, 100, 500, 1000, 2000, 3000, 5000, 7000, 9000, 11000, 13000],
[10, 100, 500, 1000, 2000, 3000, 5000, 7000, 9000, 11000, 13000],
500, 500, jax.random.PRNGKey(713))
# + [markdown] id="Nu_-maqHmzRh"
# **Conclusion**
#
# We can see, that if we fix $n$ and vary $k$ we obtain the dependence of gradient computation time with respect to $k$ resembling linear trend. The computations of gradient using `JAX` and analytic comutations take similar time.
#
# Also, if we fix $k$ and vary $n$ we observe the power law dependence looking similar to $x^2$ (may be the deegre is different but not much). The computations via `JAX` and analytic gradient shows similar power law but the constant for `JAX` is greater.
#
#
# + [markdown] id="gOMLhqbfXuXv"
# ### Solution for task 2.2
#
# **2.2**
#
# So, we have the following $R(x) = \frac{(Ax, x)}{(x, x)} = \frac{x_a A_{ab} x_b}{x_cx_c}$
#
# Let's take the derivative:
# \begin{gather*}
# \frac{d}{d x_d} R(x) = \frac{x_cx_c \left[\frac{d}{d x_d}(x_a A_{ab} x_b)\right]- x_a A_{ab}x_b \left[\frac{d}{d x_d}(x_cx_c)\right]}{(x_cx_c)^2} = \\
# = \frac{A_{db} x_b + x_aA_{ad}}{x_cx_c} + \frac{2 x_a A_{ab} x_b x_d}{(x_cx_c)^2}
# \end{gather*}
#
# Since $A$ is symmetric, so we finally obtain:
# \begin{gather*}
# \frac{d}{d x} R(x) = \frac{2 A x}{(x, x)} + \frac{2 (Ax, x) x}{(x, x)^2}
# \end{gather*}
# + [markdown] id="ZB1BKWnGXuXw"
# # Problem 3. Compression of the fully-connected layers in neural network with simple architecture (30 pts)
#
# In this problem we consider the neural network that performs classification of the dataset of images.
# Any neural network can be considered as composition of simple linear and non-linear functions.
# For example, a neural network with 3 layers can be represented as
#
# $$f_3(f_2(f_1(x, w_1), w_2), w_3),$$
#
# where $x$ is input data (in our case it will be images) and $w_i, \; i =1,\dots,3$ are parameters that are going to be trained.
#
# We will study the compression potential of neural network with simple architecture: alternating some numbers of linear and non-linear functions.
#
# The main task in this problem is to study how the compression of fully-connected layers affects the test accuracy.
# Any fully-connected layer is represented as linear function $AX + B$, where $X$ is input matrix and $A, B$ are trainable matrices. Matrices $A$ in every layer are going to be compressed.
# The main result that you should get is the plot of dependence of test accuracy on the total number of parameters in the neural network.
# + [markdown] id="c5WN6aZNXuXy"
# #### Zero step: install PyTorch
#
# - Follow the steps in [official instructions](https://pytorch.org/get-started/locally/)
# + [markdown] id="1jqYrPCsXuXz"
# #### First step: download CIFAR10 dataset
# + id="Q2SyNjKzXuX0" outputId="7cb2689b-ddc1-40fe-d095-b6cb19eeb90d" colab={"base_uri": "https://localhost:8080/", "height": 105, "referenced_widgets": ["5115826bfb46460c9a575b7fdf3fdbb4", "ce02dcb00a874486afef943a7f6c0f4f", "5cfde69c31c0443988acc4177742b5b3", "6a014ac5952a47db9c896a2e367a1e9b", "1f502d79e6a5404bb7c2225e5f49c8c7", "4cdf8335e8d74515a686d699aa5238d4", "1ea1ab1173b844c6bc403e09ade8f5a1", "4ef31f4c947d4819b4437fd7144b8b89"]}
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
batch_size = 100
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./', train=False, transform=transform),
batch_size=batch_size, shuffle=True)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# + [markdown] id="fxFM4j8NXuX7"
# #### Check what images are we going to classify
# + id="vNVzC-zWXuX9" outputId="d11c1792-29e8-4504-fe83-fd378362267a" colab={"base_uri": "https://localhost:8080/", "height": 614}
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.figure(figsize=(20, 10))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(8)))
# + [markdown] id="xtieWgXNXuYG"
# ### Second step: neural network architecture
#
# For simplicity and demonstration purposes of the neural network compression idea consider the architecture consisting of the only fully-connected layers and non-linear ReLU functions between them.
# To demonstrate compression effect, consider the dimension of the inner layers equals to 1000.
#
# Below you see implementation of such neural network in PyTorch.
# More details about neural networks you will study in the *Deep learning* course in one of the upcoming term
# + id="Lh0yaaB0XuYI"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(3 * 32 * 32, 1000)
self.fc2 = nn.Linear(1000, 1000)
self.fc3 = nn.Linear(1000, 1000)
self.fc4 = nn.Linear(1000, 1000)
self.fc5 = nn.Linear(1000, 1000)
self.fc6 = nn.Linear(1000, 10)
self.ReLU = nn.ReLU()
def forward(self, x):
x = self.fc1(x.view(-1, 3 * 32*32))
x = self.ReLU(x)
x = self.fc2(x)
x = self.ReLU(x)
x = self.fc3(x)
x = self.ReLU(x)
x = self.fc4(x)
x = self.ReLU(x)
x = self.fc5(x)
x = self.ReLU(x)
x = self.fc6(x)
return F.log_softmax(x, dim=1)
# + [markdown] id="GpkEgA1aXuYO"
# #### Implement functions for training and testing after every sweep over all dataset entries
# + id="8HIFZ94tXuYO"
def train(model, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# + id="psc8J_qOXuYU"
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
# data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# + [markdown] id="0J54xDjkXuYa"
# ### Set parameters for training and print intermediate loss values
# + id="J4PM22RkXuYc"
log_interval = 50
epochs = 7
# + [markdown] id="ueBhyEfzXuYi"
# ### Third step: run training with the [Adam](https://arxiv.org/pdf/1412.6980.pdf%20%22%20entire%20document) optimization method
#
# If your laptop is not very fast, you will wait some time till training is finished.
# + id="KOE38EV5XuYi" outputId="54d9fd19-049f-4447-a023-5e5c65541e98" colab={"base_uri": "https://localhost:8080/"}
model = Net()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(1, epochs + 1):
train(model, train_loader, optimizer, epoch)
test(model, test_loader)
# + [markdown] id="T4fO5cxoXuYp"
# Now we have somehow trained neural network and we are ready to perform compression of the weigths in the fully-connected layers.
# + [markdown] id="zJnmwUZmXuYp"
# - (3 pts) Compute SVD of the matrix $1000 \times 1000$, which corresponds to a weight matrix $A$ in any layer of the trained neural network of the appropriate dimension. To find more information about accessing this matrix please refer to [PyTorch manual](https://pytorch.org/docs/stable/index.html).
# Plot decaying of the singular values like it was shown in the lecture. What conclusion can you make?
# - (17 pts) Create a new model, which is an analogue to the class ```Net```, but with some significant distinctions.
# It takes as input parameters the instance of the class ```Net``` and compression rank $r > 0$.
# After that, this model has to compress all matrices $A$ in fully-connected layers with SVD using first $r$ singular vectors and singular values.
# Pay attention to efficient storing of compress representation of the layers.
# Also ```forward``` method of your new model has to be implemented in a way to use compressed representation of the fully-connected layers in the most efficient way. In all other aspects it has to reproduce ```forward``` method in the original non-compressed model (number of layers, activations, loss function etc).
# - (5 pts) Plot dependence of test accuracy on the number of parameters in the compressed model. This number of parameters obviously depends on the compression rank $r$.
# Also plot dependence of time to compute inference on the compression rank $r$.
# Explain obtained results.
# To measure time, use [%timeit](https://docs.python.org/3.6/library/timeit.html) with necessary parameters (examples of using this command see in lectures)
#
# - (5 pts) After such transformations, your model depends on the factors obtained from SVD. Therefore, these factors are also can be trained with the same gradient method during some number of epochs. This procedure is called fine-tuning. We ask you make this fine-tuning of your compressed model during from 1 to 5 epoch and compare the result test accuracy with the test accuracy that you get after compression. Explain the observed results
# + [markdown] id="NlseZaHYCmDp"
# ### Task 3.1
# + id="Qh7K8hIrDnsx"
from copy import deepcopy
model_copy = deepcopy(model)
import matplotlib.pyplot as plt
# + id="DMdFNYfOCksC" outputId="5db1353a-d27f-40a2-d682-1a1c93cae9c9" colab={"base_uri": "https://localhost:8080/", "height": 311}
for m in model_copy.modules():
if isinstance(m, nn.Linear):
if m.in_features == 1000 and m.out_features == 1000:
A = m.weight.data.cpu()
break
U, S, V = torch.svd(A)
plt.semilogy(S[:1000]/S[0], 'x')
plt.ylabel(r"$\sigma_i / \sigma_0$", fontsize=16)
plt.xlabel(r"Singular value index, $i$", fontsize=16)
plt.grid(True)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.title('Singular value decay', fontsize=16)
plt.show()
# + [markdown] id="4woIwlMpEEfh"
# **Conclusion**
#
# As we can see, the singular values with numbers from `900` to `1000` much smaller, then the first once. However, previous singular values smaller the first one not so much (about $10^{-1}$, $10^{-2}$), so we need to analyze if we can remove them without significant accuracy decay.
# + [markdown] id="jV3DeWdLFYaI"
# ### Task 3.2
# + id="MPSK4n7aXuYq"
# Your solution is here
import warnings
from copy import deepcopy
class SVDLinear(nn.Module):
def __init__(self, layer, r, device='cpu'):
super().__init__()
assert isinstance(layer, nn.Linear)
self.device = device
W = layer.weight.data.cpu()
U, S, V = torch.svd(W)
if S.size(0) < r:
warnings.warn(
"count of singular values {} less "
"then r = {}".format(S.size(0), r)
)
r = S.size(0)
S = S[:r]
U = U[:, :r]
V = V[:, :r]
self.U = nn.Parameter(U.T).to(self.device)
self.S = nn.Parameter(S).to(self.device)
self.V = nn.Parameter(V).to(self.device)
self.bias = layer.bias
assert(U.size(0) == layer.out_features)
assert(V.size(0) == layer.in_features)
def forward(self, x):
return ((x @ self.V) * self.S) @ self.U + self.bias
class SVDNet(nn.Module):
def __init__(self, net, r, device='cpu'):
super().__init__()
self.device = device
modules = [nn.Flatten(),]
for m in net.modules():
if isinstance(m, nn.Linear):
if m.in_features == 1000 and m.out_features == 1000:
modules.append(SVDLinear(m, r, device=self.device))
else:
modules.append(deepcopy(m))
modules.append(nn.ReLU())
del modules[-1]
modules.append(nn.LogSoftmax(dim=1))
self.layers = nn.Sequential(*modules)
def forward(self, x):
return self.layers(x)
# + id="Elv2NBF4-ikx" outputId="006f2dba-9aeb-495f-94a8-e10e7a884f01" colab={"base_uri": "https://localhost:8080/"}
svd_model = SVDNet(model_copy, 10)
test(svd_model, test_loader)
# + [markdown] id="EkOpd9XkGqqr"
# ### Task 3.3
# + id="5UUbbpy7Gt63"
from time import perf_counter
import seaborn as sns
sns.set()
def test_acc_time(model, test_loader):
model.eval()
test_loss = 0
correct = 0
_time = 0.
with torch.no_grad():
for data, target in test_loader:
# data, target = data.to(device), target.to(device)
curr_time = perf_counter()
output = model(data)
curr_time = perf_counter() - curr_time
_time += curr_time
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return correct / len(test_loader.dataset), _time
def measure_time_acc(rs, model, test_loader):
times = []
accs = []
for r in rs:
svd_model = SVDNet(model, r)
acc, _time = test_acc_time(svd_model, test_loader)
times.append(_time)
accs.append(acc)
return accs, times
def draw_accs_times(rs, accs, times):
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(8, 3))
ax[0].plot(rs, accs, "-o")
ax[0].set_xlabel('r')
ax[0].set_title('Accuracies')
ax[1].plot(rs, times, "-o")
ax[1].set_title('Working times')
ax[1].set_xlabel('r')
plt.tight_layout()
plt.show()
# + id="GFdLnlkFLIwU" outputId="3195698e-158e-44ba-ce94-fa4d9d97efcf" colab={"base_uri": "https://localhost:8080/", "height": 221}
rs = list(range(1, 11, 1))
accs, times = measure_time_acc(rs, model_copy, test_loader)
draw_accs_times(rs, accs, times)
# + id="yAYvjr3aLPFI" outputId="013cbd59-1f7a-461d-d36b-c05e02acf9aa" colab={"base_uri": "https://localhost:8080/", "height": 221}
rs = list(range(10, 110, 10))
accs, times = measure_time_acc(rs, model_copy, test_loader)
draw_accs_times(rs, accs, times)
# + id="tRj_2I3xNH0U" outputId="2dd82939-9f3b-4064-e04f-d36601aee479" colab={"base_uri": "https://localhost:8080/", "height": 221}
rs = list(range(100, 1100, 100))
accs, times = measure_time_acc(rs, model_copy, test_loader)
draw_accs_times(rs, accs, times)
# + [markdown] id="g8RxWgwHNxGz"
# **Conclusion**
#
# As we can see, using SVD we can significantly reduce the amount of parameters of neural network, and, finally, time complexity. It turns out, that using $r = 0.05 n$, where $n$ is the dimension of matrix in intermediate layer, we acheve almost the same accuracy as with not-compressed matrices.
# + [markdown] id="y0zvguLPO8dn"
# ### Task 3.4
# + id="1bTJScfyPZA6" outputId="64f66f3e-09ad-48e0-f530-b9287687e70e" colab={"base_uri": "https://localhost:8080/"}
from IPython.display import clear_output
rs = [1, 5, 10, 20]
epochs=5
ft_accs = []
v_accs = []
for r in rs:
model_copy = deepcopy(model)
svd_model = SVDNet(model_copy, r)
v_acc, _ = test_acc_time(svd_model, test_loader)
v_accs.append(v_acc)
optimizer = optim.Adam(svd_model.parameters(), lr=1e-3)
for epoch in range(1, epochs + 1):
train(svd_model, train_loader, optimizer, epoch)
ft_acc, _ = test_acc_time(svd_model, test_loader)
ft_accs.append(ft_acc)
clear_output(wait=True)
# + id="zYunPcLiW7cH" outputId="f1dd51b4-f255-45fe-c039-cdfb1378d676" colab={"base_uri": "https://localhost:8080/", "height": 301}
plt.plot(rs, v_accs, "-o", label='original')
plt.plot(rs, ft_accs, "-o", label='fine_tuned')
plt.legend()
plt.xlabel('r')
plt.ylabel('accs')
plt.title('Accuracies comparison')
plt.show()
# + [markdown] id="P5GXq1F6XsYT"
# **Conclusion**
#
# As we can see, fine-tuning of the compressed models improves the accuracy of these models. We can conclude, that number of parameters in the original model is not optimal - reducing significantly this number (by compressing) we can still acheve the same accuracy.
#
# Another conclusion is that `fine-tuning` is efficient technique, which can improve the accuracy significantly.
# + [markdown] id="xqY7wg3gW4pZ"
#
# + [markdown] id="pm2Ym7TGXuYx"
# ## Problem 4. «Reducio!» (15 pts)
# + [markdown] id="NbdXsPXgXuYy"
# <NAME> is well versed in all magical disciplines. In particular, she is a great expert in Numerical Linear Algebra and JAX.
#
# She has invited <NAME> to play the game.
#
# Hermione chooses a number $r \in [1, 95]$ and two matrices $W_1 \in \mathbb{R}^{r \times 100}$ and $W_2 \in \mathbb{R}^{100 \times r}$.
# Harry can tell her any 100-dimensional vector $x$, and Hermione gives Harry the result of
#
# $$
# \sin(W_2 \cos(W_1 x)),
# $$
#
# where trigonometric functions are applied element-wise.
# This result is also a 100-dimensional vector.
#
# Not to lose, Harry should guess what number $r$ Hermione has chosen.
# Harry knows the python language, but he is an absolute layman in algebra. Please, help him to get at least 95% correct answers!
#
# <i> Hint 1: SVD might help you, but use it wisely! </i>
#
# <i> Hint 2: Suppose that a special magic allows you to compute gradients through automatic differentiation in JAX. You can also estimate gradients via finite differences, if you want it.</i>
#
# <i> Hint 3: You can estimate the matrix rank using simple heuristics.
#
# You should write code into the `harry_answers` function. Good luck!
# + id="0KxOrn9wXuYy"
import jax.numpy as jnp
import jax
from jax import jacfwd
import numpy as np
import random
from tqdm import tqdm
class Game:
def __init__(self, key):
# key is a jax.random.PRNGKey
self.key = key
return
def hermione_chooses_r_and_W(self):
self.r = random.randint(1, 95)
self.key, subkey = jax.random.split(self.key)
self.W1 = jax.random.uniform(subkey, (self.r, 100), maxval=100., minval=0., dtype=jnp.float32)
self.key, subkey = jax.random.split(self.key)
self.W2 = jax.random.uniform(subkey, (100, self.r), maxval=100., minval=0., dtype=jnp.float32)
def hermione_computes_function(self, x):
return jnp.sin(self.W2 @ jnp.cos(self.W1 @ x))
def harry_answers(self):
# <your code here>
# you shouldn't use self.r, self.W1, or self.W2
# you can call `hermione_computes_function` multiple times
x = jnp.ones(100, dtype=jnp.float32)
J = jacfwd(self.hermione_computes_function)(x)
u, s, v = jnp.linalg.svd(J)
return (s > 1.0).sum()
def play(self, n_rounds, verbose=True):
# n_rounds: a number of rounds of the game
# verbose: print or not the result of each round
n_right_answers = 0
for _ in range(n_rounds):
self.hermione_chooses_r_and_W()
r = self.harry_answers()
if abs(r - self.r) == 0:
if verbose:
print("Good job! The true answer is {}, Harry's answer is {}!".format(self.r, r))
n_right_answers += 1
else:
if verbose:
print("Harry's answers is {}, but the true answer is {} :(".format(r, self.r))
if float(n_right_answers) / n_rounds > 0.95:
print('Well done: {}/{} right answers!'.format(n_right_answers, n_rounds))
else:
print('Only {}/{} right answers :( Work a little more and you will succeed!'.format(
n_right_answers, n_rounds))
# + id="ymni_2VYXuY3" outputId="e914974a-7a79-488f-d9c0-4bf182df679c" colab={"base_uri": "https://localhost:8080/"}
key = jax.random.PRNGKey(713)
game = Game(key)
game.play(n_rounds=100, verbose=False)
# + [markdown] id="govUnRrPXuY9"
# # Problem 5 (Bonus)
#
# 1. The norm is called absolute if $\|x\|=\| \lvert x \lvert \|$ holds for any vector $x$, where $x=(x_1,\dots,x_n)^T$ and $\lvert x \lvert = (\lvert x_1 \lvert,\dots, \lvert x_n \lvert)^T$. Give an example of a norm which is not absolute.
#
# 2. Write a function ```ranks_HOSVD(A, eps)```
# that calculates Tucker ranks of a d-dimensional tensor $A$ using High-Order SVD (HOSVD) algorithm, where ```eps``` is the relative accuracy in the Frobenius norm between the approximated and the initial tensors. Details can be found [here](http://ca.sandia.gov/~tgkolda/pubs/pubfiles/TensorReview.pdf) on Figure 4.3.
# ```python
# def ranks_HOSVD(A, eps):
# return r #r should be a tuple of ranks r = (r1, r2, ..., rd)
# ```
# + id="AjGIi3MAXuY-"
| hw/hw1/hw1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finetuning AlexNet for Oxford-102
# **Author: <NAME>**
#
# This is a demonstration of the finetuning process done on pretrained weights from AlexNet (2012).
#
# * *Note: The `.py` version of the project will be available in the same repository.*
#
# ## The dataset
#
# * The Oxford-102 dataset is a flower dataset with 102 classes of flowers.
# * The dataset can be found [here](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/).
#
# ## The network
#
# * AlexNet was created by <NAME>, <NAME>, and <NAME> in 2012 featuring a deep convolutional network.
# * AlexNet was the winner of **2012 ILSVRC** (ImageNet Large-Scale Visual Recognition Challenge).
# * The network features 6 layers of convolutional and pooling, and 3 layers of fully connected neural networks (the network architecture image will be included in this project).
# * Click [here](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf) to read the research paper.
# ### The ImageNet mean
# * The mean of the ImageNet dataset, which was defined as `[104., 117., 124.]` was used to normalize the images.
# * The mean will help center all the images to around `0` (originally was from `0` to `255`)
# <center>`imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)`</center>
# ## The network architecture in TensorBoard
# <p align="center">
# <img src="./images/the_graph.png">
# </p>
# +
# @hidden_cell
import os
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import cv2
from scipy.io import loadmat
import tensorflow as tf
imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)
os.mkdir('./summary')
os.mkdir('./models')
# -
# ### Load up the train and test indexes
#
# * We are going to use `loadmat` from `scipy.io` to load MatLab file.
# * It is odd that there are more testing images than training images, so I decided to flip them to increase accuracy.
# * Converting them to list for easier iteration later on.
set_ids = loadmat('setid.mat')
test_ids = set_ids['trnid'].tolist()[0]
train_ids = set_ids['tstid'].tolist()[0]
# ### Preprocessing image indexes
# * Obtained that all the provided images were named from `00001` to `0xxxx` so we need a special function to pad the zeros in front of our indexes
def indexes_processing(int_list):
returned_list = []
for index, element in enumerate(int_list):
returned_list.append(str(element))
for index, element in enumerate(returned_list):
if int(element) < 10:
returned_list[index] = '0000' + element
elif int(element) < 100:
returned_list[index] = '000' + element
elif int(element) < 1000:
returned_list[index] = '00' + element
else:
returned_list[index] = '0' + element
return returned_list
raw_train_ids = indexes_processing(train_ids)
raw_test_ids = indexes_processing(test_ids)
# ### Load the labels for train and test sets
image_labels = (loadmat('imagelabels.mat')['labels'] - 1).tolist()[0]
labels = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'english marigold', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'mexican aster', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barbeton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'oxeye daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'pelargonium', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia?', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'bearded iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'ball moss', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily']
# ## Image Preprocessing
# ### Two Different Approaches, two distinct results
# #### 1. Normalize by dividing by `255`
# * Dividing by `255` to normalize the images between `0s` and `1s` is the way I usually do when I preprocess images to feed to Convolutional Neural Network.
# * The top accuracy for this method falls somewhere between `80%` and `82%`. This is not bad at all for a simple network architecture
# * Below is the snapshot during runtime of this method, the network converged to `80%` at epoch `20000` and did not improve further even with learning rate decay.
#
# ```On Step 32500
# At: 2018-02-21 02:46:02.002311
# Accuracy: 81.96%
# Saving model...
# Model saved at step: 32500```
#
#
# ```On Step 33000
# At: 2018-02-21 02:50:38.211141
# Accuracy: 82.25%
# Saving model...
# Model saved at step: 33000```
#
#
# ```On Step 33500
# At: 2018-02-21 02:55:13.426248
# Accuracy: 82.35%
# Saving model...
# Model saved at step: 33500```
# #### 2. Normalize by subtracting the mean
# * This is by far the best method for AlexNet since the images used to feed this network were normalized this way.
# * Simply call `image -= mean` and `image` is ready to feed to the network.
# * The top accuracy for this method is around `90%`. This is absolutely amazing, I got `8%` accuracy boost just by using a different normalization approach.
# * The network also converged incredibly fast (see the output below).
class ImageProcessor():
def __init__(self, num_classes=102):
self.i = 0
self.num_classes = num_classes
self.training_images = np.zeros((6149, 227, 227, 3))
self.training_labels = None
self.testing_images = np.zeros((1020, 227, 227, 3))
self.testing_labels = None
def one_hot_encode(self, labels):
'''
One hot encode the output labels to be numpy arrays of 0s and 1s
'''
out = np.zeros((len(labels), self.num_classes))
for index, element in enumerate(labels):
out[index, element] = 1
return out
def set_up_images(self):
print('Processing Training Images...')
i = 0
for element in raw_train_ids:
img = cv2.imread('/input/image_{}.jpg'.format(element))
img = cv2.resize(img, (227, 227)).astype(np.float32)
img -= imagenet_mean
self.training_images[i] = img
i += 1
print('Done!')
i = 0
print('Processing Testing Images...')
for element in raw_test_ids:
img = cv2.imread('/input/image_{}.jpg'.format(element))
img = cv2.resize(img, (227, 227)).astype(np.float32)
img -= imagenet_mean
self.testing_images[i] = img
i += 1
print('Done!')
print('Processing Training and Testing Labels...')
encoded_labels = self.one_hot_encode(image_labels)
for train_id in train_ids:
train_labels.append(encoded_labels[train_id - 1])
for test_id in test_ids:
test_labels.append(encoded_labels[test_id - 1])
self.training_labels = train_labels
self.testing_labels = test_labels
print('Done!')
def next_batch(self, batch_size):
x = self.training_images[self.i:self.i + batch_size]
y = self.training_labels[self.i:self.i + batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y
# Initialize ImageProcessor instance
image_processor = ImageProcessor()
# Call set_up_images
image_processor.set_up_images()
# ## The Architecture
# <p align="center">
# <img src="./images/alex_ar.png">
# </p>
class AlexNet():
def __init__(self, X, keep_prob, num_classes, skip_layer, weights_path='DEFAULT'):
self.X = X
self.KEEP_PROB = keep_prob
self.NUM_CLASSES = num_classes
self.SKIP_LAYER = skip_layer
if weights_path == 'DEFAULT':
self.WEIGHTS_PATH = '/weights/bvlc_alexnet.npy'
else:
self.WEIGHTS_PATH = weights_path
self.initialize()
def initialize(self):
# 1st Layer: Conv (w ReLu) -> Lrn -> Pool
conv_1 = self.conv_layer(self.X, 11, 11, 96, 4, 4, name='conv1', padding='VALID')
norm_1 = self.lrn(conv_1, 2, 1e-05, 0.75, name='norm1')
pool_1 = self.max_pool(norm_1, 3, 3, 2, 2, name='pool1', padding='VALID')
# 2nd Layer: Conv (w ReLu) -> Lrn -> Pool
conv_2 = self.conv_layer(pool_1, 5, 5, 256, 1, 1, name='conv2', groups=2)
norm_2 = self.lrn(conv_2, 2, 1e-05, 0.75, name='norm2')
pool_2 = self.max_pool(norm_2, 3, 3, 2, 2, name='pool2', padding='VALID')
# 3rd Layer: Conv (w ReLu)
conv_3 = self.conv_layer(pool_2, 3, 3, 384, 1, 1, name='conv3')
# 4th Layer: Conv (w ReLu)
conv_4 = self.conv_layer(conv_3, 3, 3, 384, 1, 1, name='conv4', groups=2)
# 5th Layer: Conv (w ReLu) -> Pool
conv_5 = self.conv_layer(conv_4, 3, 3, 256, 1, 1, name='conv5', groups=2)
pool_5 = self.max_pool(conv_5, 3, 3, 2, 2, name='pool5', padding='VALID')
# 6th Layer: Flatten -> FC (w ReLu) -> Dropout
pool_6_flat = tf.reshape(pool_5, [-1, 6*6*256])
full_6 = self.fully_connected(pool_6_flat, 6*6*256, 4096, name='fc6')
full_6_dropout = self.drop_out(full_6, self.KEEP_PROB)
# 7th Layer: FC (w ReLu) -> Dropout
full_7 = self.fully_connected(full_6_dropout, 4096, 4096, name='fc7')
full_7_dropout = self.drop_out(full_7, self.KEEP_PROB)
# 8th Layer: FC and return unscaled activations
self.y_pred = self.fully_connected(full_7_dropout, 4096, self.NUM_CLASSES, relu=False, name='fc8')
def load_weights(self, session):
# Load the weights into memory
weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item()
# Loop over all layer names stored in the weights dict
for op_name in weights_dict:
# Check if layer should be trained from scratch
if op_name not in self.SKIP_LAYER:
with tf.variable_scope(op_name, reuse=True):
for data in weights_dict[op_name]:
if len(data.shape) == 1:
var = tf.get_variable('biases')
session.run(var.assign(data))
else:
var = tf.get_variable('weights')
session.run(var.assign(data))
def conv_layer(self, x, filter_height, filter_width, num_filters, stride_y, stride_x, name, padding='SAME', groups=1):
num_channels = int(x.get_shape()[-1])
convolve = lambda i, k: tf.nn.conv2d(i, k, strides=[1,stride_y,stride_x,1], padding=padding)
with tf.variable_scope(name) as scope:
weights = tf.get_variable('weights', shape=[filter_height,
filter_width,
num_channels/groups,
num_filters])
biases = tf.get_variable('biases', shape=[num_filters])
if groups == 1:
conv = convolve(x, weights)
else:
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=weights)
output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
conv = tf.concat(axis=3, values=output_groups)
bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))
return tf.nn.relu(bias, name=scope.name)
def max_pool(self, x, filter_height, filter_width, stride_y, stride_x, name, padding='SAME'):
return tf.nn.max_pool(x, ksize=[1,filter_height,filter_width,1],
strides=[1,stride_y,stride_x,1], padding=padding,
name=name)
def lrn(self, x, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(x, depth_radius=radius,
alpha=alpha, beta=beta,
bias=bias, name=name)
def fully_connected(self, input_layer, num_in, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
weights = tf.get_variable('weights', shape=[num_in, num_out], trainable=True)
biases = tf.get_variable('biases', shape=[num_out], trainable=True)
activation = tf.nn.xw_plus_b(input_layer, weights, biases, name=scope.name)
if relu:
return tf.nn.relu(activation)
else:
return activation
def drop_out(self, x, keep_prob):
return tf.nn.dropout(x, keep_prob=keep_prob)
# ### Placeholders for inputs, outputs, and hold probability
x = tf.placeholder(tf.float32, [None, 227, 227, 3])
y_true = tf.placeholder(tf.float32, [None, 102])
keep_prob = tf.placeholder(tf.float32)
# ### The Hyperparameters
# * Epoch is set to 50000.
# * Drop rate is set to 0.5.
#
# *The parameter choices are adapted from [here](https://github.com/jimgoo/caffe-oxford102).*
#
# #### Learning rate decay
# ### $$calculated = base \times decay rate^{\frac{global step}{decay step}}$$
#
# Where:
# * $calculated$ is the calculated learning rate.
# * $base$ is the base learning rate.
global_step = tf.Variable(0, trainable=False)
base_lr = 0.001
base_lr = tf.train.exponential_decay(base_lr, global_step, 20000, 0.5, staircase=True)
num_epochs = 50000
drop_rate = 0.5
train_layers = ['fc8']
# ## Picking layers to train from scratch
# ### 1. Choosing last two layers `fc7` and `fc8`
# * The network performs quite well at top accuracy of `77%`.
# * The learning rate are all the same for all variables.
# * All other variables are set to `trainable=False` to prevent learning.
#
# ### 2. Choosing only the last `fc8` layer
# * The network performs well at top accuracy of `90%`.
# * The learning rates are different for each variables with pretrained weights learn slower.
# * All variables are trainable.
model = AlexNet(x, keep_prob, 102, train_layers)
with tf.name_scope('network_output'):
y_pred = model.y_pred
# ## Custom learning rate
# ### Pretrained layers
# * The pretrained layers include `conv1`, `conv2`, `conv3`, `conv4`, `conv5`, `fc6`, `fc7`.
# * The pretrained `weights` will have a learning rate of `1*base_lr`.
# * The pretrained `biases` will have a learning rate of `2*base_lr`.
#
# ### Untrained layers
# * The untrained layer includes `fc8`.
# * The untrained `weights` will have a learning rate of `10*base_lr`.
# * The untrained `biases` will have a learning rate of `20*base_lr`.
#
# *`conv` means convolution layer, `fc` means fully connected layer.*
#
# *These learning rate choices are adapted from [here](https://github.com/jimgoo/caffe-oxford102).*
# Spliting variables into batches which have the same learning rate.
all_vars = tf.global_variables()
all_vars = all_vars[1:]
conv_vars = [all_vars[0], all_vars[2], all_vars[4], all_vars[6], all_vars[8], all_vars[10], all_vars[12]]
bias_vars = [all_vars[1], all_vars[3], all_vars[5], all_vars[7], all_vars[9], all_vars[11], all_vars[13]]
last_weights = [all_vars[14]]
last_bias = [all_vars[15]]
# +
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))
tf.summary.scalar('cross_entropy', cross_entropy)
# +
with tf.name_scope('train'):
gradients = tf.gradients(cross_entropy, conv_vars + bias_vars + last_weights + last_bias)
conv_vars_gradients = gradients[:len(conv_vars)]
bias_vars_gradients = gradients[len(conv_vars):len(conv_vars) + len(bias_vars)]
last_weights_gradients = gradients[len(conv_vars) + len(bias_vars):len(conv_vars) + len(bias_vars) + len(last_weights)]
last_bias_gradients = gradients[len(conv_vars) + len(bias_vars) + len(last_weights):len(conv_vars) + len(bias_vars) + len(last_weights) + len(last_bias)]
trained_weights_optimizer = tf.train.GradientDescentOptimizer(base_lr)
trained_biases_optimizer = tf.train.GradientDescentOptimizer(2*base_lr)
weights_optimizer = tf.train.GradientDescentOptimizer(10*base_lr)
biases_optimizer = tf.train.GradientDescentOptimizer(20*base_lr)
train_op1 = trained_weights_optimizer.apply_gradients(zip(conv_vars_gradients, conv_vars))
train_op2 = trained_biases_optimizer.apply_gradients(zip(bias_vars_gradients, bias_vars))
train_op3 = weights_optimizer.apply_gradients(zip(last_weights_gradients, last_weights))
train_op4 = biases_optimizer.apply_gradients(zip(last_bias_gradients, last_bias))
train = tf.group(train_op1, train_op2, train_op3, train_op4)
# +
with tf.name_scope('accuracy'):
matches = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
acc = tf.reduce_mean(tf.cast(matches, tf.float32))
tf.summary.scalar('accuracy', acc)
# -
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter('./summary')
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=3)
with tf.Session() as sess:
sess.run(init)
writer.add_graph(sess.graph)
model.load_weights(sess)
print('Training process started at {}'.format(datetime.now()))
for i in range(num_epochs):
batches = image_processor.next_batch(128)
sess.run(train, feed_dict={x:batches[0], y_true:batches[1], keep_prob:0.5})
global_step += 1
if (i%500==0):
print('On Step {}'.format(i))
print('Current base learning rate: {0:.5f}'.format(sess.run(base_lr)))
print('At: {}'.format(datetime.now()))
accuracy = sess.run(acc, feed_dict={x:image_processor.testing_images, y_true:image_processor.testing_labels, keep_prob:1.0})
print('Accuracy: {0:.2f}%'.format(accuracy * 100))
print('Saving model...')
saver.save(sess, './models/model_iter.ckpt', global_step=i)
print('Model saved at step: {}'.format(i))
print('\n')
print('Saving final model...')
saver.save(sess, './models/model_final.ckpt')
print('Saved')
print('Training finished at {}'.format(datetime.now()))
# ## Conclusion
# * The model converges incredibly fast and reaches a stable accuracy of 90% at epoch 11000.
# * Training took 6 hours on one Tesla K80 GPU.
# * The whole process would take around 20 hours.
#
# ### Final Accuracy: 89.51%
| Oxford_102_AlexNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Docker
#
# **Learning Objectives**
# * Build and run Docker containers
# * Pull Docker images from Docker Hub and Google Container Registry
# * Push Docker images to Google Container Registry
# ## Overview
#
# Docker is an open platform for developing, shipping, and running applications. With Docker, you can separate your applications from your infrastructure and treat your infrastructure like a managed application. Docker helps you ship code faster, test faster, deploy faster, and shorten the cycle between writing code and running code.
#
# Docker does this by combining kernel containerization features with workflows and tooling that helps you manage and deploy your applications.
#
# Docker containers can be directly used in Kubernetes, which allows them to be run in the Kubernetes Engine with ease. After learning the essentials of Docker, you will have the skillset to start developing Kubernetes and containerized applications.
# ## Basic Docker commands
# See what docker images you have.
# !docker images
# If this is the first time working with docker you won't have any repositories listed.
#
# **Note**. If you are running this in an AI Notebook, then you should see a single image `gcr.io/inverting-proxy/agent`. This is the container that is currently running the AI Notebook.
#
# Let's use `docker run` to pull a docker image called `hello-world` from the public registry. The docker daemon will search for the `hello-world` image, if it doesn't find the image locally, it pulls the image from a public registry called Docker Hub, creates a container from that image, and runs the container for you.
# !docker run hello-world
# Now when we look at our docker images we should see `hello-world` there as well.
# !docker images
# This is the image pulled from the Docker Hub public registry. The Image ID is in `SHA256` hash format—this field specifies the Docker image that's been provisioned. When the docker daemon can't find an image locally, it will by default search the public registry for the image. Let's run the container again:
# Now, if we want to run `docker run hello-world` again, it won't have to download from the container registry.
# To see all docker containers running, use `docker ps`.
# !docker ps
# There are no running containers. **Note. If you are running this in at AI Notebook, you'll see one container running.**
#
# The `hello-world` containers you ran previously already exited. In order to see all containers, including ones that have finished executing, run docker `ps -a`:
# !docker ps -a
# This shows you the Container ID, a UUID generated by Docker to identify the container, and more metadata about the run. The container Names are also randomly generated but can be specified with docker run --name [container-name] hello-world.
# ## Build a Docker container
# Let's build a Docker image that's based on a simple node application.
# **Exercise**
#
# Open the text file called `intro.docker` in the `dockerfiles` folder and complete the TODO there.
# Your dockerfile should have the following steps
#
# 1. use `FROM` to inherit an official Node runtime as the parent image; e.g. node:6
# 2. use `WORKDIR` to seet the working directory to /app
# 3. use `ADD` to copy the current directory to the container at /app
# 4. use `EXPOSE` to make the containers port 80 available to the outside world
# 5. use `CMD` to run the command `node ./src/app.js`
# This file instructs the Docker daemon on how to build your image.
#
# The initial line specifies the base parent image, which in this case is the official Docker image for node version 6.
# In the second, we set the working (current) directory of the container.
# In the third, we add the current directory's contents (indicated by the "." ) into the container.
# Then we expose the container's port so it can accept connections on that port and finally run the node command to start the application.
#
# Check out the other [Docker command references](https://docs.docker.com/engine/reference/builder/#known-issues-run) to understand what each line does.
# We're going to use this Docker container to run a simple node.js app. Have a look at `app.js`. This is a simple HTTP server that listens on port 80 and returns "Hello World."
#
# Now let's build the image. Note again the "`.`", which means current directory so you need to run this command from within the directory that has the Dockerfile.
#
# The `-t` is to name and tag an image with the `name:tag` syntax. The name of the image is `node-app` and the tag is `0.1`. The tag is highly recommended when building Docker images. If you don't specify a tag, the tag will default to latest and it becomes more difficult to distinguish newer images from older ones. Also notice how each line in the Dockerfile above results in intermediate container layers as the image is built.
# **Exercise**
#
# Use `docker build` to build the docker image at `dockerfiles/intro.docker`. Tag the image `node-app:0.1`.
# # %%bash
# !docker build -t node-app:0.1 -f dockerfiles/intro.docker .
# Let's check that the image has been created correctly.
# !docker images
# You should see a `node-app` repository that was created only seconds ago.
#
# Notice `node` is the base image and `node-app` is the image you built. You can't remove `node` without removing `node-app` first. The size of the image is relatively small compared to VMs. Other versions of the node image such as `node:slim` and `node:alpine` can give you even smaller images for easier portability. The topic of slimming down container sizes is further explored in Advanced Topics. You can view all versions in the official repository here.
#
# Note, you can remove an image from your docker images using `docker rmi [repository]:[tag]`.
# ## Run a Docker container
#
# Now we'll run the container based on the image you built above using the `docker run` command. The `--name` flag allows you to name the container if you like. And `-p` instructs Docker to map the host's port 4000 to the container's port 80. This allows you to reach the server at http://localhost:4000. Without port mapping, you would not be able to reach the container at localhost.
# !docker ps -a
# **Exercise**
#
# Use `docker run` to run the container you just build called `node-app:0.1`. Assign the host port `4000` to port `80` and assign it the name `my-app`.
# # %%bash
# !docker run -p 4000:80 --name my-app node-app:0.1
# To test out the server, open a terminal window and type the following command:
#
# ```bash
# curl http://localhost:4000
# ```
#
# You should see the server respond with `Hello World`
# The container will run as long as the initial terminal is running. If you want to stop the container, run the following command in the terminal to stop and remove the container:
#
# ```bash
# docker stop my-app && docker rm my-app
# ```
# After a few moments the container will stop. You should notice the cell above will complete execution.
#
# #### Running the container in the background
# If you want to the container to run in the background (not tied to the terminal's session), you need to specify the `-d` flag.
# Now run the following command to start the container in the background
# **Exercise**
#
# Modify your command above with `-d` flag to run `my-app` in the background.
# # %%bash
# !docker run -p 4000:80 --name my-app -d node-app:0.1
# Your container is now running in the background. You can check the status of your running container using `docker ps`
# !docker ps
# Notice the container is running in the output of docker ps. You can look at the logs by executing `docker logs [container_id]`.
# Note, your container id will be different
# !docker logs b9d5fd6b8e33
# You should see
# ```bash
# Server running at http://0.0.0.0:80/
# ```
# If you want to follow the log's output as the container is running, use the `-f` option.
# ## Modify & Publish
#
# Let's modify the application and push it to your Google Cloud Repository (gcr). After that you'll remove all local containers and images to simulate a fresh environment, and then pull and run your containers from gcr. This will demonstrate the portability of Docker containers.
#
# ### Edit `app.js`
# Open the file `./src/app.js` with the text editor and replace "Hello World" with another string. Then build this new image.
# **Exercise**
#
# After modifying the `app.js` file, use `docker build` to build a new container called `node-app:0.2` from the same docker file.
# # %%bash
# !docker build -t node-app:0.2 -f dockerfiles/intro.docker .
# Notice in `Step 2` of the output we are using an existing cache layer. From `Step 3` and on, the layers are modified because we made a change in `app.js`.
#
# Run another container with the new image version. Notice how we map the host's port 8000 instead of 80. We can't use host port 4000 because it's already in use.
# **Exercise**
#
# Run this new container in the background using a different port and with the name `my-app-2`.
# # %%bash
# !docker run -p 8000:80 --name my-app-2 -d node-app:0.2
# You can check that both container are running using `docker ps`.
# !docker ps
# And let's test boht containers using `curl` as before:
# !curl http://localhost:8000
# !curl http://localhost:4000
# Recall, to stop a container running, you can execute the following command either in a terminal or (because they are running in the background) in a cell in this notebook.
# ### Publish to gcr
#
# Now you're going to push your image to the Google Container Registry (gcr). To push images to your private registry hosted by gcr, you need to tag the images with a registry name. The format is `[hostname]/[project-id]/[image]:[tag]`.
#
# For gcr:
#
# * `[hostname]`= gcr.io
# * `[project-id]`= your project's ID
# * `[image]`= your image name
# * `[tag]`= any string tag of your choice. If unspecified, it defaults to "latest".
# +
import os
PROJECT_ID = "qwiklabs-gcp-04-248da7eb1719" # REPLACE WITH YOUR PROJECT NAME
""
os.environ["PROJECT_ID"] = PROJECT_ID
# -
# Let's tag `node-app:0.2`.
# !docker images
# **Exercise**
#
# Tag the `node-app:0.2` image with a new image name conforming to the naming convention `gcr.io/[project-id]/[image]:[tag]`. Keep the image and tag names the same.
# + language="bash"
# docker tag node-app:0.2 gcr.io/${PROJECT_ID}/node-app:0.2
# -
# Now when we list our docker images we should see this newly tagged repository.
# !docker images
# Next, let's push this image to gcr.
# **Exercise**
#
# Push this new image to the gcr.
# + language="bash"
# docker push gcr.io/${PROJECT_ID}/node-app:0.2
# -
# Check that the image exists in `gcr` by visiting the image registry Cloud Console. You can navigate via the console to `Navigation menu > Container Registry` or visit the url from the cell below:
# + language="bash"
# echo "http://gcr.io/${PROJECT_ID}/node-app"
# -
# ### Test the published gcr image
#
# Let's test this image. You could start a new VM, ssh into that VM, and install gcloud. For simplicity, we'll just remove all containers and images to simulate a fresh environment.
#
# First, stop and remove all containers using `docker stop` and `docker rm`. **Be careful not to stop the container running this AI Notebook!**.
# !docker stop my-app && docker rm my-app
# !docker stop my-app-2 && docker rm my-app-2
# Now remove the docker images you've created above using `docker rmi`.
# !docker images
# + language="bash"
# docker rmi node-app:0.2
# docker rmi gcr.io/${PROJECT_ID}/node-app:0.2
# docker rmi node-app:0.1
# docker rmi node:6
# docker rmi -f hello-world:latest
# -
# Confirm all images are removed with `docker images`.
# !docker images
# At this point you should have a pseudo-fresh environment. Now, pull the image and run it.
# # #### %%bash
# docker pull gcr.io/${PROJECT_ID}/node-app:0.2
# docker run -p 4000:80 -d gcr.io/${PROJECT_ID}/node-app:0.2
# You can check that it's running as expected using before:
# !curl http://localhost:4000
# Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| immersion/docker_and_kubernetes/labs/1_intro_docker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Globalized Inexact Newton Method
# +
import numpy as np
from IPython.display import display, Image, HTML
display(HTML("""
<style>
.output {
display: flex;
align-items: center;
text-align: center;
}
</style>
"""))
# -
# ## Algorithm
Image(filename='globalized_inexact_newton_method.png')
# ## Step Size: Armijo Rule
# - We want to combine the search direction $d^k = - \nabla f(x^k)$ with step-size $t_k$
# - The Armijo rule is supposed to ensure a sufficient decrease of the objective function
def step_size(self, beta, sigma, x, d, func):
"""
Armijo's Rule
"""
i = 0
inequality_satisfied = True
while inequality_satisfied:
if func.eval(x + np.power(beta, i) * d) <= func.eval(x) + np.power(beta, i) * sigma * func.gradient(x).dot(
d):
break
i += 1
return np.power(beta, i)
# ## Rosenbrock Function
# - Introduced by <NAME> in 1960, used as a performance test problem for optimization problems.
# - The Rosenbrock function $r: \mathbb{R}^2 \rightarrow \mathbb{R}$ is given by:
# $$r(x) = 100 (x_2 - x_1^2)^2+ (1 - x_1)^2$$
# +
import numpy as np
from src.function import Function
class Rosenbrock(Function):
def eval(self, x):
assert len(x) == 2, '2 dimensional input only.'
return 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
def gradient(self, x):
assert len(x) == 2, '2 dimensional input only.'
return np.array([
2 * (-200 * x[0] * x[1] + 200 * np.power(x[0], 3) - 1 + x[0]),
200 * (x[1] - x[0] ** 2)
])
def hessian(self, x):
assert len(x) == 2, '2 dimensional input only.'
df_dx1 = -400 * x[1] + 1200 * x[0] ** 2 + 2
df_dx1dx2 = -400 * x[0]
df_dx2dx1 = -400 * x[0]
df_dx2 = 200
return np.array([[df_dx1, df_dx1dx2], [df_dx2dx1, df_dx2]])
# -
# ## Example
# - The parameters will be the following:
# $$\beta := 0.5, \sigma := 10^{-4}, \varepsilon := 10^{-4}$$
# - Start point will be the following:
# $$x^0 := (-1.2, 1)$$
# +
from src.optimizers.newton_method_inexact_minimization import InexactNewtonMethod
objective = Rosenbrock()
starting_point = np.array([-1.2, 1])
rho = 1e-8
p = 2.1
beta = 0.5
sigma = 1e-4
epsilon = 1e-6
n = 2
optimizer = InexactNewtonMethod()
x = optimizer.optimize(starting_point,
objective,
beta,
sigma,
epsilon,
n,
rho,
p)
print(f'Optimal Point: {x}')
print(f'Iterations: {optimizer.iterations}')
| notebooks/globalized_inexact_newton_method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # BikeNow XGBoost Regression
#
# Import libraries.
# +
# %%time
import os
import boto3
import re
from sagemaker import get_execution_role
role = get_execution_role()
region = boto3.Session().region_name
bucket='bike-demo-stack-applicationdatal-s3bucketdatalake-dncpxowjgqbf' # put your s3 bucket name here, and create s3 bucket
src_file = 'unload/station_status_history_000'
prefix = 'sagemaker/bikenow-xgboost-regression'
# customize to your bucket where you have stored the data
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket)
print(bucket_path)
# -
# Helper functions to split data into training, validation, and testing sets.
# +
# %%time
import io
import boto3
import random
def data_split(FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST):
data = [l for l in open(FILE_DATA, 'r')]
train_file = open(FILE_TRAIN, 'w')
valid_file = open(FILE_VALIDATION, 'w')
tests_file = open(FILE_TEST, 'w')
num_of_data = len(data)
num_train = int((PERCENT_TRAIN/100.0)*num_of_data)
num_valid = int((PERCENT_VALIDATION/100.0)*num_of_data)
num_tests = int((PERCENT_TEST/100.0)*num_of_data)
data_fractions = [num_train, num_valid, num_tests]
split_data = [[],[],[]]
rand_data_ind = 0
for split_ind, fraction in enumerate(data_fractions):
for i in range(fraction):
rand_data_ind = random.randint(0, len(data)-1)
split_data[split_ind].append(data[rand_data_ind])
data.pop(rand_data_ind)
for l in split_data[0]:
train_file.write(l)
for l in split_data[1]:
valid_file.write(l)
for l in split_data[2]:
tests_file.write(l)
train_file.close()
valid_file.close()
tests_file.close()
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def upload_to_s3(bucket, channel, filename):
fobj=open(filename, 'rb')
key = prefix+'/'+channel
url = 's3://{}/{}/{}'.format(bucket, key, filename)
print('Writing to {}'.format(url))
write_to_s3(fobj, bucket, key)
# -
# Download data and split files.
# +
# %%time
import urllib.request
# Load the dataset
FILE_DATA = 'bikenow'
boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(src_file).download_file(FILE_DATA)
#urllib.request.urlretrieve("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone", FILE_DATA)
#split the downloaded data into train/test/validation files
FILE_TRAIN = 'bikenow.train'
FILE_VALIDATION = 'bikenow.validation'
FILE_TEST = 'bikenow.test'
PERCENT_TRAIN = 70
PERCENT_VALIDATION = 15
PERCENT_TEST = 15
data_split(FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST)
#upload the files to the S3 bucket
upload_to_s3(bucket, 'train', FILE_TRAIN)
upload_to_s3(bucket, 'validation', FILE_VALIDATION)
upload_to_s3(bucket, 'test', FILE_TEST)
# -
# Get XGBoost container image.
# %%time
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region, 'xgboost', '0.90-1')
# Create training job.
# +
# %%time
import boto3
from time import gmtime, strftime
job_name = 'bikenow-xgboost-regression-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Training job", job_name)
#Ensure that the training and validation data folders generated above are reflected in the "InputDataConfig" parameter below.
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": bucket_path + "/" + prefix + "/single-xgboost"
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m4.4xlarge",
"VolumeSizeInGB": 5
},
"TrainingJobName": job_name,
"HyperParameters": {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"subsample":"0.7",
"silent":"0",
"objective":"reg:linear",
"num_round":"50"
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 3600
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/" + prefix + '/train',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/" + prefix + '/validation',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
}
]
}
client = boto3.client('sagemaker', region_name=region)
client.create_training_job(**create_training_params)
import time
status = client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
while status !='Completed' and status!='Failed':
time.sleep(60)
status = client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
# +
# %matplotlib inline
from sagemaker.analytics import TrainingJobAnalytics
metric_name = 'validation:rmse'
metrics_dataframe = TrainingJobAnalytics(training_job_name=job_name, metric_names=[metric_name]).dataframe()
plt = metrics_dataframe.plot(kind='line', figsize=(12,5), x='timestamp', y='value', style='b.', legend=False)
plt.set_ylabel(metric_name);
# +
# %%time
import boto3
from time import gmtime, strftime
model_name=job_name + '-model'
print(model_name)
info = client.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = client.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
# +
from time import gmtime, strftime
endpoint_config_name = 'bikenow-XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = client.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
# +
# %%time
import time
endpoint_name = 'bikenow-XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = client.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = client.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
while status=='Creating':
time.sleep(60)
resp = client.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
# -
runtime_client = boto3.client('runtime.sagemaker', region_name=region)
# !head -1 bikenow.test > bikenow.single.test
# +
# %%time
import json
from itertools import islice
import math
import struct
file_name = 'bikenow.single.test' #customize to your test file
with open(file_name, 'r') as f:
payload = f.read().strip().split(',', 1)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType='text/csv',
Body=payload[1])
result = response['Body'].read()
result = result.decode("utf-8")
result = result.split(',')
result = [round(float(i)) for i in result]
label = payload[0]
print ('Label: ',label,'\nPrediction: ', result[0])
# +
import sys
import math
def do_predict(data, endpoint_name, content_type):
payload = '\n'.join(data)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
result = response['Body'].read()
result = result.decode("utf-8")
result = result.split(',')
preds = [float((num)) for num in result]
preds = [round(num) for num in preds]
return preds
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
if offset+batch_size < items:
results = do_predict(data[offset:(offset+batch_size)], endpoint_name, content_type)
arrs.extend(results)
else:
arrs.extend(do_predict(data[offset:items], endpoint_name, content_type))
sys.stdout.write('.')
return(arrs)
# +
# %%time
import json
import numpy as np
with open(FILE_TEST, 'r') as f:
payload = f.read().strip()
labels = [int(line.split(',', 1)[0]) for line in payload.split('\n')]
test_data = [line.split(',', 1)[1] for line in payload.split('\n')]
preds = batch_predict(test_data, 100, endpoint_name, 'text/csv')
print('\n Mean Squared Error = ', np.mean((np.array(labels) - np.array(preds))**2))
# -
| lambdas/setup_upload_artifacts/artifacts/bikenow-xgboost-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Write a Python program to convert a binary number to decimal number.
b_num = list(input("Input a binary number: "))
value = 0
for i in range(len(b_num)):
digit = b_num.pop()
if digit == '1':
value = value + pow(2, i)
print("The decimal value of the number is", value)
# -
binary=input("input a binary number")
dic=len(binary)
result=0
for i in reversed(range(dic)):
result+=int(binary[i])*(2**i)
print(result)
print(lambda x: x*x for x in range(10))
binary=input("input a binary number")
dic,result=len(binary),0
lambda i:int(binary[i]*(2**i)) for i in reversed(range(dic))
print(result(binary))
| math/binary_to_decimal_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import librosa
import librosa.display
from IPython.display import Audio
import os
import pickle
import json
from tqdm import tqdm_notebook
# +
#path_dataset = '../input/'
path_dataset = '/home/edoardobucheli/Datasets/FSDKaggle2018'
path_train = os.path.join(path_dataset,'audio_train_16k')
path_test = os.path.join(path_dataset,'audio_test_16k')
# -
# ### Load Label Data
train_data = pd.read_csv(os.path.join(path_dataset,'train_post_competition.csv'))
test_data = pd.read_csv(os.path.join(path_dataset,'test_post_competition_scoring_clips.csv'))
from utilities import get_all_classes_dict, get_classes_to_meta_dict, get_labels
num_to_label, label_to_num, n_classes = get_all_classes_dict(train_data)
label_to_meta, label_num_to_meta = get_classes_to_meta_dict(label_to_num)
data_cur = train_data[train_data['manually_verified']==1]
data_noi = train_data[train_data['manually_verified']==0]
meta_labels_all, labels_all = get_labels(train_data,label_to_meta, label_to_num)
meta_labels_cur, labels_cur = get_labels(data_cur,label_to_meta, label_to_num)
meta_labels_noi, labels_noi = get_labels(data_noi,label_to_meta, label_to_num)
meta_labels_test, labels_test = get_labels(test_data,label_to_meta, label_to_num)
n_meta_classes = len(np.unique(meta_labels_all))
is_curated = train_data['manually_verified'].tolist()
indx_curated = [i for i,f in enumerate(is_curated) if f == 1]
# ### Load Data
pickle_train = './preprocessed_train/MS-80-HL512-WF16k-64k'
pickle_test = './preprocessed_test/MS-80-HL512-WF16k-64k'
with open(pickle_train,'rb') as fp:
x_train = pickle.load(fp)
with open(pickle_test, 'rb') as fp:
x_test = pickle.load(fp)
# ### Separate Curated and MC
x_cur = [x_train[f] for f in indx_curated]
my_x = x_train
meta_labels = meta_labels_all
my_labels = labels_all
indx_mc = [i for i,f in enumerate(meta_labels) if f == 5]
# +
x_mc = [my_x[f] for f in indx_mc]
labels_mc = [my_labels[f] for f in indx_mc]
labels_name_mc = [num_to_label[f] for f in labels_mc]
mc_new_label_mapping = dict([[f,i] for i,f in enumerate(np.unique(labels_mc))])
new_train_labels_mc = [mc_new_label_mapping[f] for f in labels_mc]
# -
indx_unk = [i for i in np.random.randint(0,len(my_x),len(x_mc))if i not in indx_mc]
x_unk = [my_x[f] for f in indx_unk]
labels_unk = np.ones((len(indx_unk),))*(len(np.unique(new_train_labels_mc)))
x_mc_2 = x_mc + x_unk
new_train_labels_mc.extend(labels_unk)
# +
#with open('./c0_mapping','wb') as fp:
# pickle.dump(c0_new_label_mapping,fp)
# -
indx_test_mc = [i for i,f in enumerate(meta_labels_test) if f == 5]
x_test_mc = [x_test[f] for f in indx_test_mc]
labels_test_mc = [labels_test[f] for f in indx_test_mc]
new_test_labels_mc = [mc_new_label_mapping[f] for f in labels_test_mc]
indx_unk_test = [i for i in np.random.randint(0,len(x_test),len(x_test_mc)) if i not in indx_test_mc]
x_unk_test = [x_test[f] for f in indx_unk_test]
labels_unk_test = np.ones((len(indx_unk_test),))*(len(np.unique(new_test_labels_mc)))
x_test_mc_2 = x_test_mc+x_unk_test
new_test_labels_mc.extend(labels_unk_test)
# +
sr = 16000
file_length = sr*4
hop_length = 512
n_mels = 80
frames = int(np.ceil(file_length/hop_length))
# -
# # Malley CNN
from CNNetworks2D import malley_cnn_80
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
input_shape = ([n_mels,frames])
lr = 0.001
model = malley_cnn_80(input_shape,41)
model.load_weights('./weights_m41.h5')
#model.summary()
new_output_name = 'global_max_pooling2d'
new_output_layer = model.get_layer(new_output_name).output
model_headless = tf.keras.Model(inputs = model.input, outputs = new_output_layer)
# +
#model_headless.trainable = False
# -
X = tf.keras.layers.Dense(512,activation='relu')(model_headless.output)
X = tf.keras.layers.Dropout(0.5)(X)
X = tf.keras.layers.Dense(len(mc_new_label_mapping)+1, activation = 'softmax')(X)
model = tf.keras.Model(inputs = model_headless.input, outputs = X)
for l in model.layers[:13]:
l.trainable = False
model.summary()
model.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
# ### Make Generators
from data_generator import DataGenerator
new_order = np.arange(len(x_mc_2))
np.random.shuffle(new_order)
x_mc_2 = [x_mc_2[i] for i in new_order]
new_train_labels_mc = [new_train_labels_mc[i] for i in new_order]
train_generator = DataGenerator(x_mc_2,new_train_labels_mc)
val_generator = DataGenerator(x_test_mc_2,new_test_labels_mc)
# ### Train the Model
model.fit_generator(train_generator,epochs = 25,validation_data=val_generator)
# +
x_test_mc = np.zeros((len(x_test_mc_2),80,125))
for i, this_x in enumerate(x_test_mc_2):
this_frames = this_x.shape[1]
if this_frames > 125:
max_start = this_frames - 125
start = np.random.randint(0,max_start)
end = start+125
this_x = this_x[:,start:end]
x_test_mc[i] = this_x
# -
model.evaluate(x_test_mc,new_test_labels_mc)
y_scores = model.predict(x_test_mc)
y_hat_mc = np.argmax(y_scores, axis = 1)
# ### Plot Confussion Matrix
from utilities import plot_cm
mc_labels = list(mc_new_label_mapping.keys())
labels = [num_to_label[f] for f in mc_labels]
labels.append('Unknown')
plot_cm(new_test_labels_mc,y_hat_mc,figsize = (20,20), labels = labels,xrotation = 0)
model.save_weights('./weights_c5_malley_v2.h5')
| test_notebooks/FSDKaggle2018_MC5_Classification_Malley.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (system-wide)
# language: python
# metadata:
# cocalc:
# description: Python 3 programming language
# priority: 100
# url: https://www.python.org/
# name: python3
# ---
# + jupyter={}
# Figure 8 from DA6 by American Wood Council, 2007
# Revision History
# 0.0 19-Jul-2019 E.Durham created initial notebook
# + jupyter={}
# Given:
# span of beam, L in meters
# magnitude of load, P in kN
# distance from left end of beam to point of load, a in meters
# distance from left end of beam to point of interest, x in meters
# EI of member, use 1 if unknown; in kN*m**2
# Derive:
# distance from right end of beam to point of load, b
# reactions at ends of beams, R_1 and R_2
# shear at points of interest
# bending at points of interest
# deflection at points of interest
# plot graphs of shear, moment and deflection
# + jupyter={}
import numpy as np
# Pint Unit Library
from pint import UnitRegistry
unit = UnitRegistry()
Q_ = unit.Quantity
unit.default_format = '~' # ~ for unit abreviations, P for prettyprint, or both
# Define symbols for common units
m = unit.meter; mm = unit.millimeter; inch = unit.inch;
kN = unit.kilonewton; kPa = unit.kilopascal; MPa = unit.megapascal;
psi = unit.psi; ksi = unit.ksi
SAMPLE_SIZE = 99 # number of discrete points within beam to compute
# + jupyter={}
# Enter case values sans units at present
P = 1.6 * kN
a = 1.3 * m
L = 7.35 * m
EI = 1 * kN*m**2
# Derive distance b
b = L - a
# Calculate Reactions
R_1 = (P*b)/L # reaction at left support
R_2 = (P*a)/L # reaction at right support
# Derive max moment
M_max = (P*a*b)/L
# + jupyter={}
def shear_x(x, P, a, L):
"""
Determine shear for simple beam w/ concentrated load at any position x.
where
x = distance from left end of beam to point of interest
P = concentrated load
a = distance from left end of beam to point of load
L = length of span of beam
The function does not take or allign units. Units of x, a and L must be identical.
Returns value of shear at x in units_of_P
"""
b = L - a # derive b given a and L
if x >= 0*x.units and x <= a:
V_x = (P*b)/L
elif x > a and x <= L:
V_x = -(P*a)/L
else:
V_x = float('NaN')
print("Error: x cannot be less than 0 or greater than L which is {} in this case".format(L))
return V_x
# + jupyter={}
def moment_x(x, P, a, L):
"""
Determine moment for simple beam w/ concentrated load at any position x.
where
x = distance from left end of beam to point of interest
P = concentrated load
a = distance from left end of beam to point of load
L = length of span of beam
The function does not take or allign units. Units of x, a and L must be identical.
Returns value of moment at x in units_of_P * units_of_x
"""
b = L - a # derive b given a and L
if x >= 0*x.units and x <= a:
M_x = (P*x*b)/L
elif x > a and x <= L:
M_x = (P*(L-x)*a)/L
else:
M_x = float('NaN')
print("Error: x cannot be less than 0 or greater than L which is {} in this case".format(L))
return M_x
# + jupyter={}
def deflection_x(x, P, a, L, EI=1):
"""
Determine deflection for simple beam w/ concentrated load at any position x.
where
x = distance from left end of beam to point of interest
P = concentrated load
a = distance from left end of beam to point of load
L = length of span of beam
EI = modulus of elasticity of material in units_of_P / units_of_L**2 times
second moment of area in units_of_L**4. Thus, EI is in units_of_P * units_of_L**2.
Default value for EI is 1 in which case values returned are overstated by actual value
of EI. That is, to determine deflection for a given EI, divide deflection here by
actual value of EI.
The function does not take or allign units. Units of x, a and L must be identical.
Returns value of deflection at x in units_of_L
"""
b = L - a # derive b given a and L
if x == a:
deflection_x = -(P*a**2*b**2)/(3*EI*L)
elif x >= 0*x.units and x < a:
deflection_x = -((P*b*x)/(6*EI*L))*(L**2-b**2-x**2)
elif x > a and x <= L:
deflection_x = -((P*a*(L-x))/(6*EI*L))*(2*L*x-x**2-a**2)
else:
deflection_x = float('NaN')
print("Error: x cannot be less than 0 or greater than L which is {} in this case".format(L))
return deflection_x
# + jupyter={}
# create beam array and load with position, shear, moment and deflection values
beam = np.zeros((4, SAMPLE_SIZE))
beam[0, : ] = np.linspace(0, L.magnitude, SAMPLE_SIZE) # position
for i in range(SAMPLE_SIZE):
beam[1, i] = shear_x(beam[0, i]*L.units, P, a, L).magnitude # stuff shear values
beam[2, i] = moment_x(beam[0, i]*L.units, P, a, L).magnitude # stuff moment values
beam[3, i] = deflection_x(beam[0, i]*L.units, P, a, L, EI).magnitude # stuff deflection values
# beam[0:]
# + jupyter={}
# plot shear diagram with matplotlib
import matplotlib.pyplot as plt
plt.ion()
# # %matplotlib inline
ax = plt.plot(beam[0, : ], beam[1, : ])
plt.grid(b=True, which='both', axis='both')
plt.xlabel('x label')
plt.ylabel('y label')
plt.title('Shear Diagram')
# plt.legend()
# cursor = Cursor(ax, useblit=True, color='red', linewidth=2)
plt.show()
# + jupyter={}
# from matplotlib.widgets import Cursor
bx = plt.plot(beam[0, : ], beam[2, : ])
plt.grid(b=True, which='both', axis='both')
plt.xlabel('x label')
plt.ylabel('y label')
plt.title('Moment Diagram')
# plt.legend()
# cursor = Cursor(ax, useblit=True, color='red', linewidth=2)
plt.show()
# + jupyter={}
# plot deflection diagram
cx = plt.plot(beam[0, : ], beam[3, : ])
plt.grid(b=True, which='both', axis='both')
plt.xlabel('x label')
plt.ylabel('Units of L')
plt.title('Deflection Diagram')
# plt.legend()
plt.show()
# -
| Beam_Formulas/.ipynb_checkpoints/Beam_1.1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
img = cv2.cvtColor(cv2.imread("../imori.jpg"), cv2.COLOR_BGR2GRAY)
H, W = img.shape
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
# +
pad = 1
K = 3
F = [ [-2, -1, 0], [-1, 1, 1], [0, 1, 2] ]
tmp_img = np.zeros((H+2*pad, W+2*pad))
tmp_img[pad:pad+H, pad:pad+W] = img.copy()
output_img = tmp_img.copy()
for i in range(H):
for j in range(W):
output_img[i+pad, j+pad] = np.sum(F * tmp_img[i:i+K, j:j+K])
output_img = np.clip(output_img, 0, 255).astype("uint8")
# -
plt.imshow(output_img, cmap="gray")
plt.title("emboss")
plt.show()
| Question_11_20/solutions_py/solution_018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
pass
# +
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
pre = nxt = None # 第 k 个节点的前一个节点,后一个节点
k_val = None
n = 0
res = dummy = head
while head:
n += 1
if n < k:
pre = head
nxt = head.next.next if head.next else None
k_val = head.next.val
head = head.next
r_pre = r_nxt = None
r_k_val = None
r_k = n - k + 1
n = 0
while dummy:
n += 1
if n < r_k:
r_pre = dummy
r_nxt = dummy.next.next if dummy.next else None
r_k_val = dummy.next.val
dummy = dummy.next
print(k_val, r_k_val)
print(pre, nxt, r_pre, r_nxt)
pre.next = ListNode(r_k_val, nxt)
r_pre.next = ListNode(k_val, r_nxt)
return res
# -
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
vals = []
while head:
vals.append(head.val)
head = head.next
vals[k-1], vals[-k] = vals[-k], vals[k-1]
dummy = root = ListNode()
for v in vals:
dummy.next = ListNode(v)
dummy = dummy.next
return root.next
a = [1, 2, 3]
for v in a[::-1]:
print(v)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
# 一共有多少个节点
n = 0
node = head
while node:
node = node.next
n += 1
#print(n)
node = head
for i in range(1,k):
node = node.next
node2 = head
for i in range(1,n-k+1):
node2 = node2.next
node.val, node2.val = node2.val, node.val
return head
| Linked List/0112/1721. Swapping Nodes in a Linked List.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to integrate Financial Data from Refinitiv Data Platform to Excel with Xlwings - Part 1
#
# ## Overview
#
# With the rise of Data Scientists, Financial coders or Traders (aka Citizen Developers), and the rapid growth of [Jupyter](https://jupyter.org/) application, the main target of every Citizen Developer is replacing [Microsoft Excel](https://www.microsoft.com/en-us/microsoft-365/excel) with the Jupyter application (reference: [Jupyter is the new Excel](https://towardsdatascience.com/jupyter-is-the-new-excel-a7a22f2fc13a)).
#
# However, Excel is not obsolete and is still an important file-format/application for businesses. It is easy to distribute, and non-IT people (especially your boss) can open it easily rather than having to set up the Jupyter/Python environment.
#
# This article is the first part of the series that demonstrate how to export financial data and report from Python/Jupyter application to Excel report file using xlwings CE and xlwings Pro libraries. The demo application uses content from [Refinitiv Data Platform (RDP)](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis) as an example of a dataset.
#
# This first notebook will be focusing on xlwings CE. The [second notebook](./part2_xlwings_pro_notebook.ipynb) is focusing on xlwings PRO.
#
# *Note*: All figures and reports demonstrate Time-Series 90 days data queried on 14th October 2020.
# ## Introduction to xlwings
#
# [xlwings](https://www.xlwings.org) is a Python library that makes it easy to call Python from Excel and vice versa on Windows and macOS. The library lets you automate Excel from Python source code to produce reports or to interact with Jupyter notebook applications. It also allows you to replace VBA macros with Python Code or write UDFs (user defined functions - Windows only).
# * The [xlwings CE](https://docs.xlwings.org/en/stable) is a free and open-source library ([BSD-licensed](https://opensource.org/licenses/BSD-3-Clause)) which provides basic functionalities to lets developers integrate Python with Excel.
# * The [xlwings PRO](https://www.xlwings.org/pro) provides more advance features such as [reports](https://www.xlwings.org/reporting), embedded Python code in Excel, one-click installers for easy deployment, video training, dedicated support and much more.
#
#
# *Note*:
# - This notebook is based on xlwings versions **0.20.7**, **0.21.0** and **0.21.2**.
#
# ## Introduction to Refinitiv Data Platform (RDP) Libraries
#
# Refinitiv provides a wide range of contents and data which require multiple technologies, delivery mechanisms, data formats, and the multiple APIs to access each content. The [RDP Libraries](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries) are a suite of ease-of-use interfaces providing unified access to streaming and non-streaming data services offered within the [Refinitiv Data Platform (RDP)](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis). The Libraries simplified how to access data to various delivery modes such as Request Response, Streaming, Bulk File, and Queues via a single library.
#
# Depending on the level of service and data requirements, developers can easily choose how to access data services using the appropriate access channel defined within the library.
#
# 
#
# Using the library developers can access content from all 3 of the access points - all from within the same application if required. The RDP Libraries are available in the following programming languages:
# - Refinitiv Supported Editions: Python and TypeScript/JavaScript (coming in 2020)
# - Community-based Edition: C#
#
# For more deep detail regarding the RDP Libraries, please refer to the following articles and tutorials:
# - [Developer Article: Discover our Refinitiv Data Platform Library part 1](https://developers.refinitiv.com/article/discover-our-upcoming-refinitiv-data-platform-library-part-1).
# - [Developer Article: Discover our Refinitiv Data Platform Library part 2](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-2).
# - [Refinitiv Data Platform Libraries Document: An Introduction page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/documentation).
#
# ### Disclaimer
#
# As this notebook is based on alpha versions **1.0.0.a5** and **1.0.0.a7** of the Python library, the method signatures, data formats, etc. are subject to change.
#
# ## Code Walkthrough
#
# Let start with xlwings CE first. The application needs to import ```xlwings``` and ```refinitiv.dataplatform``` packages in order to interact with xlwings CE and RDP library.
# import xlwings and RDP libraries
import xlwings as xw
import refinitiv.dataplatform as rdp
# import all required libraries for this notebook
import datetime
import configparser as cp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as tick
import json
# You should save a text file with **filename** `rdp.cfg` having the following contents:
#
# [rdp]
# username = YOUR_RDP_EMAIL_USERNAME
# password = <PASSWORD>
# app_key = YOUR_RDP_APP_KEY
#
# This file should be readily available (e.g. in the current working directory) for the next steps.
cfg = cp.ConfigParser()
cfg.read('rdp.cfg')
# The RDP Libraries let application consumes data from the following platforms
# - DesktopSession (Eikon/Refinitiv Workspace)
# - PlatformSession (RDP, Refinitiv Real-Time Optimized)
# - DeployedPlatformSession (deployed Refinitiv Real-Time/ADS)
#
# This Jupyter Notebook is focusing on the *PlatformSession* only. However, the main logic for other session types are the same when interacts with xlwings library.
# Open RDP Platform Session
session = rdp.open_platform_session(
cfg['rdp']['app_key'],
rdp.GrantPassword(
username = cfg['rdp']['username'],
password = cfg['<PASSWORD>']['password']
)
)
session.get_open_state()
# Firstly, we will use RDP Libraries Function Layer to request time-series data as a [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) object.
# Declare parameter(s)
universe = 'VOD.L'
historical_title = '%s Historical Data' % (universe)
df_historical = rdp.get_historical_price_summaries(
universe = universe,
interval = rdp.Intervals.DAILY,
count = 90,
fields = ['BID','ASK','OPEN_PRC','HIGH_1','LOW_1','TRDPRC_1','NUM_MOVES']
)
df_historical
# ### Interact with xlwings CE
#
# With xlwings, we can export this ```df_historical``` DataFrame to excel directly.
#
# The first step is initiate xlwings object and establish a connection to a workbook.
wb = xw.Book() # Creating an new excel file. wb = xw.Book(filename) would open an existing file
# The above step initiates xlwings workbook class in ```wb``` object. The ```wb = xw.Book()``` statement creates a new excel file. If you are using ```wb = xw.Book(filename)```, the xlwings will open an existing file.
#
# 
#
# Next, instantiate the xlwings [sheet object](https://docs.xlwings.org/en/stable/api.html#sheet). The application will interact with Excel file mostly via this sheet object.
#
# +
# Select the first excel sheet, and rename it
historical_sheet = wb.sheets[0]
historical_sheet.name = historical_title
# -
#
# 
# Then you can just pass your dataframe object to ```sheet.range(<cell>).value``` property to set Pandas DataFrame to Excel directly.
# +
# Set historical_sheet dataframe to cell A1
historical_sheet.range("A1").value = df_historical.head(30)
# -
#
# 
# That is, the application is ready to get and export Refinitiv Data to an Excel file.
#
# The above example shows how to export data "as is" which often hard to read. The application can use xlwings API to customize excel report look and feel, and then customize Pandas DataFrame to make data easier to understand.
# Clear current sheet
historical_sheet.clear()
# Then we use xlwings ```Sheet``` object and its ```api``` property to change report look and feel.
#
# *Note*:
# The xlwings ```api``` property supports Windows only. For MacOS, please refer to [this page](https://docs.xlwings.org/en/stable/missing_features.html).
historical_sheet.range("A1").value = historical_title
historical_sheet.range("A1").api.Font.Size = 14 # Change font size
historical_sheet.range("A1").api.Font.ColorIndex = 2 # Change font color
historical_sheet.range('A1:H1').color = (0,0,255) # Change cell background color
# The above statements create the following excel report look and feel.
#
# 
#
# We can restructure the ```df_historical``` DataFrame to make it easier to read by naming the index column to "Date"
df_historical.index.name = 'Date'
df_historical.head(5)
# Next, we set the DataFrame object to *A2* cell, then set column header font and background color to make them distinguish from data.
# +
historical_sheet.range("A2").value = df_historical.head(30)
#Make Column headers bold
historical_sheet.range('2:1').api.Font.Bold = True
# Change cell background color
historical_sheet.range('A2:H2').color = (144,238,144)
# Set sheet autofit the width of row
historical_sheet.autofit('r')
# -
# The result is the following readable report table:
#
# 
# ## Plotting a Graph
#
# The xlwings CE also supports [Matplotlib](https://matplotlib.org/) figures in Excel as a picture.
#
# Firstly, we change all non-Date columns data type from String to Float.
for column in df_historical:
df_historical[column]=df_historical[column].astype(float)
# we change the DataFrame Date index to be a data column. This will let us plot a graph using **Date** as X-Axis.
df_historical.reset_index(level=0, inplace=True)
df_historical.head(5)
# Then sort data as ascending order.
# Sort DataFrame by Date
df_historical.sort_values('Date',ascending=True,inplace=True)
# +
# Plotting a Graph
columns = ['OPEN_PRC','HIGH_1','LOW_1','TRDPRC_1']
df_historical.set_index('Date',drop=True,inplace=True)
fig = plt.figure()
plt.ticklabel_format(style = 'plain')
plt.title('VOD.L interday data for last 90 days', color='black',fontsize='x-large')
ax = fig.gca()
df_historical.plot(kind='line', ax = fig.gca(),y=columns,figsize=(14,7) )
plt.show()
# -
# We will put this graph at the end of the report table in the Excel sheet. The application can check the position of the last row of the report table with xlwings [end()](https://docs.xlwings.org/en/0.20.5/api.html#xlwings.Range.end) function.
#
# - The ```sheet.cells.last_cell``` statement returns lower right cell
# - The ```sheet.cells.last_cell.row``` statement returns row of the lower right cell
# +
# historical_sheet.cells.last_cell.row = row of the lower right cell
'''
change to your specified column, then go up until you hit a non-empty cell
'''
historical_last_row = historical_sheet.range((historical_sheet.cells.last_cell.row, 1)).end('up').row
historical_last_row
#historical_last_row = historical_sheet.range('A' + str(historical_sheet.cells.last_cell.row)).end('up').row
# -
# Then, gets the position of the *last row of the table + 3 rows* cell. This makes a space between the data table and graph.
rng = historical_sheet.range('B{row}'.format(row = historical_last_row + 3))
rng
# Next, we add this figures as a picture to the xlwings CE sheet object with [pictures API](https://docs.xlwings.org/en/0.20.5/api.html#pictures).
historical_sheet.pictures.add(fig, name='MyPlot', update=True, top=rng.top, left=rng.left)
# The result is the following readable report table:
#
# 
# ## Adding new Excel Sheet
#
# We can create a new excel sheet and export data from RDP to that newly created sheet dynamically.
esg_sheet_title = '%s ESG Data' % (universe)
# Create new sheet for ESG Data
wb.sheets.add(esg_sheet_title)
# The above xlwings statement creates a new sheet in your excel workbook.
#
# 
#
# Then you can get this newly created sheet object and export data to it. I will demonstrate with Environment, Social, and Governance data (ESG) using RDP Delivery Layer.
# +
# -- Requesting ESG Data
RDP_version = '/v1'
base_URL = 'https://api.refinitiv.com'
category_URL = '/data/environmental-social-governance'
service_endpoint_URL = '/views/scores-full'
query_parameters = {
'universe': universe,
'start': -5,
'end': 0
}
# +
endpoint_url = base_URL + category_URL + RDP_version + service_endpoint_URL #https://api.refinitiv.com/data/environmental-social-governance/v1/views/scores-full
try:
endpoint = rdp.Endpoint(session, endpoint_url)
response = endpoint.send_request( query_parameters = query_parameters )
print('This is a ESG data result from RDP library')
print(response.data.raw)
except Exception as exp:
print('RDP Libraries: Delivery Layer exception: %s' % str(exp))
print('\n')
# -
# The data returned from RDP Delivery layer is in JSON message format, so you need to convert it to Pandas DataFrame first.
# +
titles = [i["title"] for i in response.data.raw['headers']]
esg_df = pd.DataFrame(response.data.raw['data'],columns=titles)
esg_df.head(3)
# +
# initiate the xlwings sheet object]
esg_sheet = wb.sheets[esg_sheet_title]
#Set Sheet Title
esg_sheet.range("A1").value = 'VOD.L Environmental, Social and Governance Scores for last 5 years'
esg_sheet.range("A1").api.Font.Size = 14 # Change font size
esg_sheet.range("A1").api.Font.ColorIndex = 2 # Change font color
esg_sheet.range('A1:U1').color = (0,0,255) # Change cell background color
# -
# The result is following:
#
# 
#
# Then we export DataFrame ```esg_df``` object to ESG sheet ```esg_sheet``` object.
# +
esg_sheet.range("A2").options(index=False).value = esg_df
esg_sheet.range('2:1').api.Font.Bold = True
esg_sheet.range('A2:U2').color = (144,238,144) # Change cell background color
# -
# 
# Then we save this excel file with xlwings ```Book``` object ```save()``` function.
# +
wb.save('rdp_report.xlsx') # Classic Jupyter Notebook
# wb.save() # Note: Somehow the Jupyter Lab is not allow overriden file as Classic Notebook.
# -
# ## Reporting with xlwings PRO
#
# The above example source code shows that you can create an excel report file from Refinitiv Data easily with xlwings CE API. However, the Python application source code is a combination of formatting the report's look & feel and handling the data which makes the source code difficult to maintain in the long run.
#
# The [xlwings PRO ](https://www.xlwings.org/pro) has features to solve all of CE version limitations. The [xlwings Reports](https://www.xlwings.org/reporting) provides a capability to generate excel report file with the following features:
# - **Separation of code and design**: Users without coding skills can change the template on their own without having to touch the Python code.
# - **Template variables**: Python variables (between curly braces) can be directly used in cells, e.g. ```{{ title }}```. They act as placeholders that will be replaced by the actual values.
# - **Frames for dynamic tables**: Frames are vertical containers that dynamically align and style tables that have a variable number of rows.
#
# You can get a free trial for xlwings PRO [here](https://www.xlwings.org/pro), then follow the instruction on [How to activate xlwings PRO](https://docs.xlwings.org/en/stable/installation.html#how-to-activate-xlwings-pro) page.
#
#
#
# Now let import the report package from xlwings PRO.
#
# +
# #uncomment if you do not already install xlwings PRO package.
# #Install xlwings PRO packages in a current Jupyter kernal
#import sys
# #!{sys.executable} -m pip install "xlwings[pro]"
# -
from xlwings.pro.reports import create_report
# +
# Resize figure
# fig.set_size_inches(6.4, 4.8)
# -
# Then create an Excel template as a *rdp_report_template.xlsx* file with the following template format:
#
# 
#
# This template defines all look and feel (font, color, etc.) and also the position of auto generated data with a variable inside ```{{``` and ```}}```.
#
# Then use the [report-api](https://docs.xlwings.org/en/stable/api.html#reports-api) to generate excel file based on a template file and data with ```create_report()``` function.
wb = create_report(
'rdp_report_template.xlsx',
'rdp_report_pro.xlsx',
historical_title=historical_title,
df_historical=df_historical.head(10),
graph= fig
)
# The above ```create_report()``` function will generate *rdp_report_pro.xlsx* file with format defined in rdp_report_template.xlsx and data that we pass to the function.
#
# 
# The application does need to interact or hard code setting cells, sheets, and workbooks anymore. The xlwings PRO report package automatic replace ```{{ historical_title }}```, ```{{ df_historical }}``` and ```{{graph}}``` variables with data that the application pass through report package's ```create_report()``` function.
#
# The application can pass text, DataFrame or even Graph to the function and xlwings will generate the excel report file based on the look and feel of the template file.
# ### Close RDP Session
# +
# -- Close Session, just calls close_session() function
rdp.close_session()
print(session.get_open_state())
# -
# ## Conclusion and Next Step
#
# The xlwings CE library lets Python developers integrate data with Excel in a simple way. The library is suitable for a wide range of developers from casual coders, data scientists, professional traders to seasoned programmers, and allows them to work on data analysis and generate reports based on their skill.
#
# The next part will cover and show how powerful xlwings PRO is when comparing to CE library. Python developers' life will be easier with xlwings PRO.
#
# At the same time, the [Refinitiv Data Platform (RDP) Libraries](https://developers.refinitiv.com/refinitiv-data-platform/refinitiv-data-platform-libraries) let developers rapidly access Refinitiv Platform content with a few lines of code that easy to understand and maintain. Developers can focus on implement the business logic or analysis data without worry about the connection, authentication detail with the Refinitiv Platforms.
#
# The integration between Refinitiv APIs and xlwings is not limited to only RDP Libraries. Any [Refinitiv API](https://developers.refinitiv.com/en/api-catalog?i=1;q1=page-type%3Aapi;q2=devportal%3Alanguages~2Fpython;sort=title;sp_c=12;sp_cs=UTF-8;sp_k=devportal-prod;view=xml;x1=w-page-type-id;x2=api-language) that supports Python such as [Eikon Data API](https://developers.refinitiv.com/en/api-catalog/eikon/eikon-data-api) ([Eikon Data API-xlwings article](https://developers.refinitiv.com/en/article-catalog/article/financial-reporting-with-eikon-and-excel)), or [RKD API](https://developers.refinitiv.com/en/api-catalog/refinitiv-knowledge-direct/refinitiv-knowledge-direct-api-rkd-api) can work with xlwings using the same concept and code logic as this RDP Library notebook example.
#
# ## References
#
# You can find more details regarding the Refinitiv Data Platform Libraries, xlwings, and related technologies for this notebook from the following resources:
# * [Refinitiv Data Platform (RDP) Libraries](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries) on the [Refinitiv Developer Community](https://developers.refinitiv.com/) web site.
# * [Xlwings web site](https://www.xlwings.org/).
# * [Financial Reporting with Eikon, xlwings and Excel](https://developers.refinitiv.com/en/article-catalog/article/financial-reporting-with-eikon-and-excel).
# * [Xlwings API Reference](https://docs.xlwings.org/en/stable/api.html).
# * [Xlwings Document page](https://docs.xlwings.org/en/stable/).
# * [RDP Libraries Quick Start Guide page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/quick-start).
# * [RDP Libraries Tutorial page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/tutorials).
# * [Discover our Refinitiv Data Platform Library (part 1)](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-1).
# * [Discover our Refinitiv Data Platform Library (part 2)](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-2).
#
# For any questions related to this article or Refinitiv Data Platform Libraries, please use the Developers Community [Q&A Forum](https://community.developers.refinitiv.com/spaces/321/refinitiv-data-platform-libraries.html).
| notebook/rdp_xlwingsce_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TUTORIAL: Sentiment analysis on Tweets using Hugging Face
#
# <img src="attachment:f0f2279d-9fa1-4cc2-8c4b-e5d702ee7b47.png" width="300"/>
#
# The aim of this tutorial is to use NLP to analyse sentiments on Tweets.
#
# ### USE CASE: OVHcloud French Tweets
#
# A pre-trained [Hugging Face](https://huggingface.co/) model based on [BERT](hhttps://huggingface.co/transformers/model_doc/bert.html) was then chosen to allow an optimal analysis of the sentiments of these Tweets: [bert-base-multilingual-uncased-sentiment](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment).
#
# ## Introduction
#
# **What is Hugging Face?**
#
# Hugging Face is a NLP startup, known for creating open-source software such as Transformers and Datasets, which are used for building NLP systems. Hugging Face models can be used for classification, question answering, translation and many other NLP tasks.
#
# ## Code
# - Install and import dependencies
# - Load your dataset with Tweets
# - Display the parameters you need
# - Define the preprocess function
# - Specify Tweets language
# - Download the pretrained model
# - Run an analysis of Tweets using the model
# - Export the results
# - Display the results
#
# ### Step 1 - Install and import dependencies
#
# Before starting, **install the "*requirements.txt*" file**.
#
# ⚠️ Remember to restart the kernel after installations!
#import dependencies
import transformers
import pandas as pd
import re
import torch
import matplotlib.pyplot as plt
# check GPU availability
print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))
# ### Step 2 - Load your dataset with Tweets
#
# First, you can load your data into an object container, named `data` for example, stored in [Object Storage](https://docs.ovh.com/gb/en/ai-training/data-cli/).
#
# The path to your data becomes: `/workspace/data`
# +
dataframe = pd.read_csv("/workspace/data/tweet_2021-10-16.csv",sep = ',')
# display the first row
dataframe.head()
# -
# ### Step 3 - Display the parameters you need
#
# For the purposes of this tutorial, we are only interested in the `Tweets` and `Language `columns.
# +
# choose the 2 columns
tweets = dataframe['text'].values
langs = dataframe['lang'].values
# the 5 first rows
print("tweets:",tweets[:5])
print("langs:",langs[:5])
# -
# ### Step 4 - Define the preprocess function
def data_preprocess(words):
# split string into words
words = words.split(' ')
# remove URLS
words = [word for word in words if not word.startswith('http')]
words = ' '.join(words)
# remove extra spaces
words = re.sub(' +', ' ', words)
return words
tweets = [data_preprocess(tweet) for tweet in tweets]
tweets[:5]
# ### Step 5 - Specify Tweets language
# +
# create list
french_tweets = []
i=0
# save only the french Tweets
for i in range(len(langs)):
if langs[i] == 'fr' :
french_tweets.append(tweets[i])
i+=1
french_tweets_df = pd.DataFrame(french_tweets)
# display the five first Tweets
print("Tweets in french only:",french_tweets_df[:5])
# -
# ### Step 6 - Download the pretrained models
# +
# model for sentiment analysis
sentiment = transformers.pipeline('sentiment-analysis', model="nlptown/bert-base-multilingual-uncased-sentiment")
# model for entities classification
classifier = transformers.pipeline("zero-shot-classification", model="BaptisteDoyen/camembert-base-xnli")
entities = ["bourse", "IPO", "incident", "panne", " "]
summarizer = transformers.pipeline("summarization")
# -
# ### Step 7 - Run an analysis of Tweets using models
# test on the first Tweet of the day
print(french_tweets[0])
print(sentiment(french_tweets[0]))
print(classifier(french_tweets[0], entities))
# +
# transform into a dataframe
french_tweet_entity_df = pd.DataFrame(classifier(french_tweets, entities))
labels = french_tweet_entity_df["labels"]
scores = french_tweet_entity_df["scores"]
# -
# The classifier returns all entities and their probabilities.
#
# We want to save only the **dominant entity** and its **probability**.
# +
# select only the principal entity
label = []
score = []
i = 0
# for each Tweet, choose the first entity of the list and the first corresponding score
for i in range(len(labels)):
label.append(labels[i][0])
score.append(scores[i][0])
i+=1
# -
# It is possible to define each column of the result table:
# - Tweets (*text*)
# - Entities (*entity*)
# - Entities score (*score_entity*)
# - Sentiments (*label*)
# - Sentiments score (*score*)
# define columns of the results file
french_tweets_text = pd.DataFrame(french_tweets, columns = ['text'])
french_tweets_entity = pd.DataFrame(label, columns = ['entity'])
french_tweets_score_entity = pd.DataFrame(score, columns = ['score_entity'])
french_tweet_sentiment_df = pd.DataFrame(sentiment(french_tweets))
# concatenate the list of tweets and the results of sentiment analysis
results_16_10_2021_french_tweets_bert = pd.concat([french_tweets_text, french_tweets_entity, french_tweets_score_entity, french_tweet_sentiment_df], axis=1)
results_16_10_2021_french_tweets_bert
# ### Step 8 - Export the results
# export results as a csv file
# %cd /workspace/results
results_16_10_2021_french_tweets_bert.to_csv("results_16_10_2021_french_tweets_bert.csv")
# ### Step 9 - Display the results
labels = results_16_10_2021_french_tweets_bert['label'].values
entity = results_16_10_2021_french_tweets_bert['entity'].values
# +
# list for sentiments concerning "bourse" and "IPO"
five_stars_bourse_IPO = []
four_stars_bourse_IPO = []
three_stars_bourse_IPO = []
two_stars_bourse_IPO = []
one_star_bourse_IPO = []
# list for sentiments concerning "panne" and "incident"
five_stars_inci_panne = []
four_stars_inci_panne = []
three_stars_inci_panne = []
two_stars_inci_panne = []
one_star_inci_panne = []
# list for sentiments concerning other subjects
five_stars_others = []
four_stars_others = []
three_stars_others = []
two_stars_others = []
one_star_others = []
# split labels: 5 stars, 4 stars, 3 stars, 2 stars, 1 star
for i in range(len(labels)):
# if the subject is "bourse"/"IPO"
if entity[i] == 'bourse' or entity[i] == 'IPO':
if labels[i] == '5 stars' :
five_stars_bourse_IPO.append(labels[i])
elif labels[i] == '4 stars' :
four_stars_bourse_IPO.append(labels[i])
elif labels[i] == '3 stars' :
three_stars_bourse_IPO.append(labels[i])
elif labels[i] == '2 stars' :
two_stars_bourse_IPO.append(labels[i])
else:
one_star_bourse_IPO.append(labels[i])
# if the subject is "incident"/"panne"
elif entity[i] == 'incident' or entity[i] == 'panne':
if labels[i] == '5 stars' :
five_stars_inci_panne.append(labels[i])
elif labels[i] == '4 stars' :
four_stars_inci_panne.append(labels[i])
elif labels[i] == '3 stars' :
three_stars_inci_panne.append(labels[i])
elif labels[i] == '2 stars' :
two_stars_inci_panne.append(labels[i])
else:
one_star_inci_panne.append(labels[i])
# if it's an other subject
else:
if labels[i] == '5 stars' :
five_stars_others.append(labels[i])
elif labels[i] == '4 stars' :
four_stars_others.append(labels[i])
elif labels[i] == '3 stars' :
three_stars_others.append(labels[i])
elif labels[i] == '2 stars' :
two_stars_others.append(labels[i])
else:
one_star_others.append(labels[i])
i+=1
# +
# create dataframe to display histogram
df = pd.DataFrame([['bourse / IPO', len(one_star_bourse_IPO), len(two_stars_bourse_IPO), len(three_stars_bourse_IPO), len(four_stars_bourse_IPO), len(five_stars_bourse_IPO)],
['incident / panne', len(one_star_inci_panne), len(two_stars_inci_panne), len(three_stars_inci_panne), len(four_stars_inci_panne), len(five_stars_inci_panne)],
['others', len(one_star_others), len(two_stars_inci_panne), len(three_stars_others), len(four_stars_others), len(five_stars_others)]],
columns=['Entities', '1 star', '2 stars', '3 stars', '4 stars', '5 stars'])
# print the dataframe
print(df)
df.plot(x='Entities',
kind='bar',
colormap = 'RdYlGn',
title='OVHcloud french Tweets sentiment - BERT - 16th October 2021',
ylabel="Number of Tweets")
# -
# number of positive and negative Tweets
french_tweet_sentiment_df['label'].value_counts()
| notebooks/hugging-face/tuto/sentiment-analysis-Twitter/BERT/hugging_face_bert_sentiment_analysis_tweets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1- Loading Packages
# In this kernel we are using the following packages:
# +
import matplotlib.animation as animation
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import pandas as pd
import numpy as np
# -
# <a id="3"></a> <br>
# ## 2- Data Visualization Libraries
# Before you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.
#
# * 1- matplotlib
#
# matplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.
#
# * 2- Seaborn
#
# Seaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.
#
# [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png
# [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/
# ## 3- Matplotlib
#
# You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands.
#
# `%matplotlib notebook` provides an interactive environment.
#import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.4, 3.8, 1.2, 2.5], [15, 25, 9, 26], color='darkgreen', marker='o')
plt.xlim(0.5, 4.5)
plt.show()
# ## 3-1 Scatterplots
# +
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
# +
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
# -
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
# +
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# -
# ## 3-2 Line Plots
# +
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# -
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
# ## 3-3 Bar Charts
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
# +
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
# +
linear_err = [np.random.randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# -
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# +
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
# +
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
# +
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# -
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
# + _kg_hide-input=true _uuid="0b565e5356d99d5875de88a853710ee2dd3c4a53"
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
# + [markdown] _uuid="487fb5e77983c55ae891c3a99537d6cef0450b74"
# ## 3-4 Histograms
# + _kg_hide-input=true _uuid="42551850b478e274b9f78d4f6ef717c636242616"
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# + _kg_hide-input=true _uuid="39b84012b223069dd4cc6f1441d2ad0f585218bf"
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
# + [markdown] _uuid="b4e3de19781686010c6038f0e3076eb678398169"
# It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
#
#
# + [markdown] _uuid="2bc0c6fc0bb19748e9e87603e3207f75ffa9b565"
# ## 3-5 Box and Whisker Plots
# In descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
# + _kg_hide-input=true _uuid="94dad21ec08e2633dacb64a89e5c807145042994"
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
# + _kg_hide-input=true _uuid="9f4b288fe4b8ab78e6ad788e4bcfb5931920fcf2"
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# + _kg_hide-input=true _uuid="9dc56a6415be6584fba51630ced26b0aaa486a09"
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# + _kg_hide-input=true _uuid="0f44453d7022928d2aeed3c3e0126cbd7118cdd9"
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
# + _kg_hide-input=true _uuid="90b1e8ffe23e39ec54414c1fd63b5d5c4e72be6f"
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# + _kg_hide-input=true _uuid="cb3652a440484d391d27122878456a642c58d804"
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
# + [markdown] _uuid="fbf8bf0a67f6c49d78911c5f37be531ebbcd9edb"
# ## 3-6 Heatmaps
# + _kg_hide-input=true _uuid="ebfc0dcb8e85aa540f6568fa96431a4e9707f3c1"
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
# + _kg_hide-input=true _uuid="fdbcf35950f94a4d0f1ce10efee6a4502f6ecfc8"
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
# + [markdown] _uuid="139f44a4deb043128c8c7254eb60c33e0fc26e68"
# ## 3-7 Animations
# + _kg_hide-input=true _uuid="b3676970195153dc6056600f024f55c1b6f0ba12"
n = 100
x = np.random.randn(n)
# + _kg_hide-input=true _uuid="ea3eb5835e4acd53d43da483e17d79c32228cad6"
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
# + _kg_hide-input=true _uuid="fb2314b3b1735c5e191c8427c5abe6429e4ff767"
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
# + [markdown] _uuid="7e702e5e0a876f9fa0b2e4fe497a56b91e00a95d"
# ## 3-8 Interactivity
# + _kg_hide-input=true _uuid="51fefb947daf8ca558cbc153e2ddbf39bcb7d4b2"
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
# + _kg_hide-input=true _uuid="1d64a25dc386a30bd895ca8f58ca86d632f05d74"
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
# + _kg_hide-input=true _uuid="e13dd7e002938af1a52d7520004d839a3d7d2011"
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
# + _kg_hide-input=true _uuid="e926ab1d2dc2098a6af48526e9f980bf594c79cd"
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# + _kg_hide-input=true _uuid="bb968584c16b9acc6466a3897c9415c57f3a7404"
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
# + [markdown] _uuid="84742816a16280f7bca7c43879d6762e10e0a440"
# ## 3-9 DataFrame.plot
# + _kg_hide-input=true _uuid="97a3554f9640a2b77e07c861b5a5b6c814a3b276"
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
# + _kg_hide-input=true _uuid="4d64fa4bc8b62fe1f4d2c2de0869bb49c8f7fc3d"
df.plot('A','B', kind = 'scatter');
# + [markdown] _uuid="857cecae1e2c9eb59c1a9d136ef1c5422d86d5ba"
# You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.
#
# `kind` :
# - `'line'` : line plot (default)
# - `'bar'` : vertical bar plot
# - `'barh'` : horizontal bar plot
# - `'hist'` : histogram
# - `'box'` : boxplot
# - `'kde'` : Kernel Density Estimation plot
# - `'density'` : same as 'kde'
# - `'area'` : area plot
# - `'pie'` : pie plot
# - `'scatter'` : scatter plot
# - `'hexbin'` : hexbin plot
# ###### [Go to top](#top)
# + _kg_hide-input=true _uuid="74997530957394a96f0aed15c21e65f54911159c"
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
# + _kg_hide-input=true _uuid="6299f8dddb909c7850620499edc49afdfd909f75"
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
# + _kg_hide-input=true _uuid="91a25480397c8759047100da9ebc6c0264d8a918"
df.plot.box();
# + _kg_hide-input=true _uuid="b7bfa0ce17ea260d75eb97a0161af3dbd700f780"
df.plot.hist(alpha=0.7);
# + [markdown] _uuid="21a68cb3d0111753d29df2b402011daff81c5ff4"
# [Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
# + _kg_hide-input=true _uuid="7b4d0f65af26e55acaf9a06da13dc71eb21a408b"
df.plot.kde();
| tutorial/5. Data visualization/with_matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: RAPIDS Stable
# language: python
# name: rapids-stable
# ---
# ## BlazingSQL Logs
#
# [Docs](https://docs.blazingdb.com/docs/blazingsql-logs)
#
# BlazingSQL has an internal log that records events from every node from all queries run. The events include runtime query step execution information, performance timings, errors and warnings.
#
# The logs table is called `bsql_logs`. You can query the logs as if it were any other table, except you use the `.log()` function, instead of the `.sql()` function.
from blazingsql import BlazingContext
bc = BlazingContext()
bc.log('select * from bsql_logs').head()
# How long did each successfully run query take?
bc.log("SELECT log_time, query_id, duration FROM bsql_logs WHERE info = 'Query Execution Done' ORDER BY log_time DESC")
# This query determines the data load time and total time for all queries, showing the latest ones first.
#
# Load time and total time being the maximum load time and total time for any node.
log_query = '''
SELECT
MAX(end_time) as end_time, query_id,
MAX(load_time) AS load_time, MAX(total_time) AS total_time
FROM (
SELECT
query_id,
node_id,
SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN duration ELSE 0 END) AS load_time,
SUM(CASE WHEN info = 'Query Execution Done' THEN duration ELSE 0 END) AS total_time,
MAX(log_time) AS end_time
FROM
bsql_logs
WHERE
info = 'evaluate_split_query load_data'
OR info = 'Query Execution Done'
GROUP BY
node_id, query_id
)
GROUP BY
query_id
ORDER BY
end_time DESC
'''
bc.log(log_query)
# If you run the same queries multiple times, this query against the logs will tell you the average execution time for every query.
query = """
SELECT
MAX(end_time) AS end_time,
SUM(query_duration)/COUNT(query_duration) AS avg_time,
MIN(query_duration) AS min_time,
MAX(query_duration) AS max_time,
COUNT(query_duration) AS num_times,
relational_algebra
FROM (
SELECT
times.end_time as end_time,
times.query_id, times.avg_time,
times.max_time as query_duration,
times.min_time,
ral.relational_algebra as relational_algebra
FROM (
SELECT
query_id,
MAX(log_time) AS end_time,
SUM(duration)/COUNT(duration) AS avg_time,
MIN(duration) AS min_time,
MAX(duration) AS max_time
FROM
bsql_logs
WHERE
info = 'Query Execution Done'
GROUP BY
query_id
)
AS times
INNER JOIN (
SELECT
query_id,
SUBSTRING(info, 13, 2000) AS relational_algebra
FROM
bsql_logs
WHERE
info LIKE 'Query Start%'
GROUP BY
query_id, info
)
AS ral
ON
times.query_id = ral.query_id
ORDER BY
times.end_time DESC)
AS temp
GROUP BY
relational_algebra
"""
bc.log(query)
| intro_notebooks/bsql_logs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beacon
# %load_ext autoreload
# %autoreload 2
# # %autoreload? for help
import cv2
import numpy as np
import math
#import picamera
import io
from IPython.display import Image
import time
import glob
from termcolor import colored
import math
import re
import cv2
import numpy as np
from matplotlib import pyplot as plt
from beacon_fct import *
boundariesyb = [
([0, 0, 215], [255, 153, 255], 'r', (0,0,255), (0,8000)),
([0, 200, 97], [255, 255, 255], 'y', (0,255,255), (0,0)),
([230, 141, 0], [255, 244, 255], 'b', (255,0,0), (8000,0))
]
boundaries = [
([0, 0, 180], [255, 153, 255], 'r', (0,0,255), (0,8000)),
([230, 141, 0], [255, 225, 255], 'b', (255,0,0), (8000,0)),
([0, 200, 97], [255, 255, 255], 'y', (0,255,255), (0,0)),
([77, 235, 0], [220, 244, 255], 'g', (0,255,0), (8000,8000))
]
# # Fonction to find the perfect center of the image
findCenter(500,600,300,400,41.63,131.186,132.81,boundariesyb,center)
# # HSV Filtering
findThreshold()
# # Process the image, find angles and get robot's position and orientation
center_circles = (532, 357)
center_beacon = (547,355)
# +
rawImage = cv2.imread('images/beacon_4000_500_270.png')
height,width,depth = rawImage.shape
imgWithCircle = np.zeros((height,width), np.uint8)
cv2.circle(imgWithCircle,center_circles,207,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,165,(0,0,0),thickness=-1)
cv2.circle(imgWithCircle,center_circles,75,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,45,(0,0,0),thickness=-1)
imask = imgWithCircle>0
img = np.zeros_like(rawImage, np.uint8)
img[imask] = rawImage[imask]
ret,thresh_img = cv2.threshold(img,170,0,cv2.THRESH_TOZERO)
#thresh_img = cv2.cvtColor(thresh_img, cv2.COLOR_BGR2RGB)
cv2.imwrite('threshold_img.png',thresh_img)
rawImage = cv2.cvtColor(rawImage, cv2.COLOR_BGR2RGB)
thresh_img = cv2.cvtColor(thresh_img, cv2.COLOR_BGR2RGB)
show_images([rawImage,thresh_img], cols = 1, titles = ["Raw image", "Threshold Image"])
# -
threshImg = cv2.imread('threshold_img.png')
angles,lights_coordinates = find_angles_with_display(threshImg,center_beacon,boundaries)
print("Robot's position and orientation: ", find_robot_pos(angles,lights_coordinates))
# # Test Process on several images
center_circles = (532, 357)
center_beacon = (547,355)
i = 0
images_ = []
titles_ = []
for filename in glob.glob("./images/*.png"):
start = time.time()
elements = re.findall('[+-]?\d+', filename)
try:
x,y,a = elements[0],elements[1],elements[2]
x,y,a = float(x),float(y),float(a)
except:
continue
i = i + 1
print("-- Picture: ", filename[2:-4], '--')
rawImage = cv2.imread(filename)
height,width,depth = rawImage.shape
imgWithCircle = np.zeros((height,width), np.uint8)
cv2.circle(imgWithCircle,center_circles,207,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,165,(0,0,0),thickness=-1)
cv2.circle(imgWithCircle,center_circles,75,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,45,(0,0,0),thickness=-1)
imask = imgWithCircle>0
img = np.zeros_like(rawImage, np.uint8)
img[imask] = rawImage[imask]
ret,thresh_img = cv2.threshold(img,170,0,cv2.THRESH_TOZERO)
thresh_img,angles,lights_coordinates = find_angles_and_get_result(thresh_img,center_beacon,boundaries)
thresh_img[np.where((thresh_img==[0,0,0]).all(axis=2))] = [160,160,160]
images_.append(thresh_img)
titles_.append(filename)
if len(angles) < 3:
print("less than 3 lights found")
continue
if len(angles) == 4:
if math.sqrt((x-0)**2 + (y-0)**2) < math.sqrt((x-8000)**2 + (y-8000)**2):
a1,a2,a3 = angles[0],angles[1],angles[2]
angles = a1,a2,a3
l1,l2,l3 = lights_coordinates[0],lights_coordinates[1],lights_coordinates[2]
lights_coordinates = l1,l2,l3
else:
a1,a2,a3 = angles[0],angles[1],angles[3]
angles = a1,a2,a3
l1,l2,l3 = lights_coordinates[0],lights_coordinates[1],lights_coordinates[3]
lights_coordinates = l1,l2,l3
xb,yb,ab = find_robot_pos(angles,lights_coordinates)
print("angles: ", angles)
print("Lights' positions: ", lights_coordinates)
print("position from beacon: ",xb,yb,ab)
print("real position: ",x,y,a)
dist = math.sqrt((x-xb)*(x-xb) + (y-yb)*(y-yb))
angle_error = (a - ab)%360
if dist > 300 or angle_error > 10:
print(colored("Error distance: {}, error angle: {}".format(dist,angle_error), 'red'))
print(colored('ERROR', 'red'))
else:
print(colored("Error distance: {}, error angle: {}".format(dist,angle_error), 'green'))
print(colored('GOOD', 'green'))
show_images(images_, cols = 3, titles = titles_)
# # Verifying images thresholding
# +
center_circles = (532, 357)
center_beacon = (547,355)
i = 1
images = []
titles = []
for filename in glob.glob("./images/*.png"):
elements = re.findall('[+-]?\d+', filename)
try:
x,y,a = elements[0],elements[1],elements[2]
x,y,a = float(x),float(y),float(a)
except:
continue
print("-- Picture: ", filename[2:-4], '--')
i=i+1
rawImage = cv2.imread(filename)
height,width,depth = rawImage.shape
imgWithCircle = np.zeros((height,width), np.uint8)
cv2.circle(imgWithCircle,center_circles,207,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,165,(0,0,0),thickness=-1)
cv2.circle(imgWithCircle,center_circles,75,(255,255,255),thickness=-1)
cv2.circle(imgWithCircle,center_circles,45,(0,0,0),thickness=-1)
imask = imgWithCircle>0
img = np.zeros_like(rawImage, np.uint8)
img[imask] = rawImage[imask]
ret,thresh_img = cv2.threshold(img,170,0,cv2.THRESH_TOZERO)
thresh_img,angles,lights_coordinates = find_angles_and_get_result(thresh_img,center_beacon,boundaries)
thresh_img[np.where((thresh_img==[0,0,0]).all(axis=2))] = [160,160,160]
images.append(thresh_img)
titles.append(filename[:-4])
show_images(images, cols = 3, titles = titles)
| beacon/beacon_main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="s4ljYpQNp50r"
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/dependency_parsing/NLU_untyped_dependency_parsing_example.ipynb)
#
#
# # Untyped Dependency Parsing with NLU.
# 
#
# Each word in a sentence has a grammatical relation to other words in the sentence.
# These relation pairs can be typed (i.e. subject or pronouns) or they can be untyped, in which case only the edges between the tokens will be predicted, withouth the label.
#
# With NLU you can get these relations in just 1 line of code!
# # 1. Install Java and NLU
# + id="SF5-Z-U4jukd"
import os
# ! apt-get update -qq > /dev/null
# Install java
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! pip install nlu pyspark==2.4.7 > /dev/null
# + [markdown] id="kHtLKNXDtZf5"
# # 2. Load the Dependency model and predict some sample relationships
# + id="7GJX5d6mjk5j" colab={"base_uri": "https://localhost:8080/", "height": 512} executionInfo={"status": "ok", "timestamp": 1604907666230, "user_tz": -60, "elapsed": 128480, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="7b5f4b95-706e-4c79-cf4b-9abcf40b3a01"
import nlu
dependency_pipe = nlu.load('dep.untyped')
dependency_pipe.predict('Untyped dependencies describe with their relationship a directed graph')
# + [markdown] id="5lrDNzw3tcqT"
# # 3.1 Download sample dataset
# + id="gpeS8DWBlrun" colab={"base_uri": "https://localhost:8080/", "height": 607} executionInfo={"status": "ok", "timestamp": 1604907674240, "user_tz": -60, "elapsed": 136471, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="c8a9f120-9018-4903-c44a-58f8b3b789b8"
import pandas as pd
# Download the dataset
# ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv -P /tmp
# Load dataset to Pandas
df = pd.read_csv('/tmp/train-balanced-sarcasm.csv')
df
# + [markdown] id="uLWu8DG3tfjz"
# ## 3.2 Predict on sample dataset
# NLU expects a text column, thus we must create it from the column that contains our text data
# + id="3V5l-B6nl43U" colab={"base_uri": "https://localhost:8080/", "height": 380} executionInfo={"status": "ok", "timestamp": 1604907690243, "user_tz": -60, "elapsed": 152462, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="36136418-2a70-4184-83ba-59b40dd1d9ef"
dependency_pipe = nlu.load('dep.untyped')
df['text'] = df['comment']
dependency_predictions = dependency_pipe.predict(df['text'].iloc[0:1])
dependency_predictions
| examples/colab/component_examples/dependency_parsing/NLU_untyped_dependency_parsing_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import specdal
from matplotlib import pyplot as plt
# # Processing a single Spectrum
#
# Specdal provides readers which loads [.asd, .sig, .sed] files into a common Spectrum object.
s = specdal.Spectrum(filepath="/home/young/data/specdal/aidan_data/SVC/ACPA_F_B_SU_20160617_003.sig")
print(s)
# The print output shows the four components of the Spectrum object. For example, we can access the measurements as follows.
print(type(s.measurement))
print(s.measurement.head())
# Spectrum object provides several methods for processing the measurements. Let's start by linearly resampling to the nearest integer (nm) wavelengths.
s.interpolate(method='linear')
print(s.measurement.head())
# We can visualize the spectrum using pyplot. spectrum.plot is just a wrapper around spectrum.measurements.plot, so you can pass any arguments for plotting pandas.Series objects.
s.plot()
plt.show()
# There are folds in the spectrum near 1000 and 1900 wavelengths. This happens because the three bands in the spectrometer has overlapping wavelengths. We can fix this using the stitch method of the Spectrum class.
s.stitch(method='mean')
s.plot()
plt.show()
| specdal/examples/process_spectrum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 8.1
# language: ''
# name: sagemath
# ---
#Create a list of functions
var('x y z t')
f(x,y) = 1-3*sqrt(x^2+y^2)
pr(x,y) = x*y
h(x,y)=x^2-y^2
ap(x,y) = x^2+y^2
g(x,y) = x*sin(x^2-y^2)
w(x,y)=(abs(y*x))^(2/3)
# Methods to plot planes and cross-sections
# N.b. Parametrizing level curves is a bit tricky
def yequalsc(c,xRange=[-3,3],yRange=[-3,3],zRange=[-3,3],color='blue',opacity=1):
return implicit_plot3d(y==c,(x,xRange[0],xRange[1]),(y,yRange[0],yRange[1]),(z,zRange[0],zRange[1]),axes=False,opacity=opacity)
def ycrossatc(c,f,xRange=[-3,3],color='red'):
return parametric_plot([t,c,f(t,c)],(t,xRange[0],xRange[1]),color=color,thickness=5)
def xequalsc(c,xRange=[-3,3],yRange=[-3,3],zRange=[-3,3],color='blue',opacity=1):
return implicit_plot3d(x==c,(x,xRange[0],xRange[1]),(y,yRange[0],yRange[1]),(z,zRange[0],zRange[1]),axes=False,thickness=10,opacity=opacity)
def xcrossatc(c,f,yRange=[-3,3],color='green'):
return parametric_plot([c,t,f(c,t)],(t,yRange[0],yRange[1]),color=color,thickness=5)
def zequalsc(c,xRange=[-3,3],yRange=[-3,3],zRange=[-3,3],color='blue',opacity=1):
return implicit_plot3d(z==c,(x,xRange[0],xRange[1]),(y,yRange[0],yRange[1]),(z,zRange[0],zRange[1]),axes=False,thickness=1,opacity=opacity)
def zcrossatc(c,f,yRange=[-3,3],zRange=[-3,3],xRange=[-3,3],color='red',thickness=0.05):
return implicit_plot3d(f(x,y)-z==0,(x,xRange[0],xRange[1]),(y,yRange[0],yRange[1]),(z,zRange[0],zRange[1]),axes=False,color=color,region=lambda x,y,z : c-thickness<=z and z<=c+thickness,plot_points=400)
#Test: Plot funtion pr and show level curve
PR = plot3d(pr,(x,-3,3),(y,-3,3),adaptive=True,color=['black','white'])
zc = zequalsc(1)
zcs=zcrossatc(1,pr)
T=PR+zc+zcs
T.show(frame=False,viewer="tachyon")
# +
def graphPlot(f,xRange=[-3,3],yRange=[-3,3],opacity=0.8,color=['black','white']):
return plot3d(f,(x,xRange[0],xRange[1]),(y,yRange[0],yRange[-1]),adaptive=True,color=color,opacity=opacity)
def showXcrossSection(f,c,xRange=[-3,3],yRange=[-3,3],color=['black','white']):
Pg=graphPlot(f,xRange=xRange,yRange=yRange,color=color)
zRange=[Pg.bounding_box()[0][2],Pg.bounding_box()[1][2]]
Xplane = xequalsc(c,xRange=xRange,yRange=yRange,zRange=zRange)
Xcross = xcrossatc(c,f,yRange=yRange)
T = Pg+Xplane+Xcross
return T
def showYcrossSection(f,c,xRange=[-3,3],yRange=[-3,3],color=['black','white']):
Pg=graphPlot(f,xRange=xRange,yRange=yRange,color=color)
zRange=[Pg.bounding_box()[0][2],Pg.bounding_box()[1][2]]
Yplane = yequalsc(c,xRange=xRange,yRange=yRange,zRange=zRange)
Ycross = ycrossatc(c,f,xRange=xRange)
T = Pg+Yplane+Ycross
return T
def showZcrossSection(f,c,xRange=[-3,3],yRange=[-3,3],color=['black','white']):
Pg=graphPlot(f,xRange=xRange,yRange=yRange,color=color)
zRange=[Pg.bounding_box()[0][2],Pg.bounding_box()[1][2]]
Zplane = zequalsc(c,xRange=xRange,yRange=yRange,zRange=zRange)
Zcross = zcrossatc(c,f,xRange=xRange,yRange=yRange,zRange=zRange)
T = Pg+Zplane+Zcross
return T
# -
#Test saving to file
T=showZcrossSection(pr,-1)
T.save("pic.gif",viewer="tachyon",frame=False)
#Animated cross-sections
#These produce sequences of numbered .gif files that can be put together
def animateGraph(f,basename,xRange=[-1,1],yRange=[-1,1],zRange=[-1,1],steps=48,xcross=True,ycross=True,zcross=True,verbose=True,showframe=False,showaxes=False):
""""Produces 4 sequences of .gifs giving animations
rotating a graph and showing cross sections"""
print "Starting..."
G=graphPlot(f,xRange=xRange,yRange=yRange)
xincr = (xRange[1]-xRange[0])/steps
yincr = (yRange[1]-yRange[0])/steps
zincr = (zRange[1]-zRange[0])/steps
xpos=xRange[0]
ypos=yRange[0]
zpos=zRange[0]
frame=0
while (xpos <= xRange[1]) and xcross:
outfile = basename+"-xcross-"+"%03d"%frame+".gif"
xcs=showXcrossSection(f,xpos,xRange=xRange,yRange=yRange)
xcs.save(outfile,viewer='tachyon',frame=showframe,axes=showaxes,aspect_ratio=1)
if verbose: print "Produced x="+str(xpos)+"cross-section"
xpos = xpos+xincr
frame=frame+1
frame=0
while (ypos <= yRange[1]) and ycross:
outfile = basename+"-ycross-"+"%03d"%frame+".gif"
ycs=showYcrossSection(f,ypos,xRange=xRange,yRange=yRange)
ycs.save(outfile,viewer='tachyon',frame=showframe,axes=showaxes,aspect_ratio=1)
if verbose: print "Produced y="+str(ypos)+"cross-section"
ypos = ypos+yincr
frame=frame+1
frame=0
while (zpos <= zRange[1]) and zcross:
outfile = basename+"-zcross-"+"%03d"%frame+".gif"
zcs=showZcrossSection(f,zpos,xRange=xRange,yRange=yRange)
zcs.save(outfile,viewer='tachyon',frame=showframe,axes=showaxes,aspect_ratio=1)
if verbose: print "Produced z="+str(zpos)+"cross-section"
zpos = zpos+zincr
frame=frame+1
print "Finished!"
#Calls a shell script that invokes imagemagick to make an animated gif
#default extensions come from animategraph
import subprocess
def assemble(basename,delay=8,extensions=["-xcross","-ycross","-zcross"]):
extensions=extensions
for e in extensions:
base="{0}{1}".format(basename,e)
print base
print "Animating"
subprocess.check_output(["sh","animategifs.sh",str(delay),base])
#print "Cleaning up"
#subprocess.check_output(["sh","cleanup.sh",base])
animateGraph(pr,"xy",zRange=[-1,1],verbose=False)
animateGraph(h,"x2-y2",zRange=[-1,1],verbose=False)
assemble("xy",extensions=[""])
assemble("x2-y2",extensions=[""])
| Cross Sections of graphs in two variables in sage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Quandl - Get data from API
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Quandl/Quandl_Get_data_from_API.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
# + [markdown] papermill={} tags=[]
# **Tags:** #quandl #marketdata #opendata #finance #snippet #matplotlib
# + [markdown] papermill={} tags=["naas"]
# **Author:** [<NAME>](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/)
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Install packages
# + papermill={} tags=[]
# !pip install quandl
# + [markdown] papermill={} tags=[]
# ### Import libraries
# + papermill={} tags=[]
import quandl
import matplotlib.pyplot as plt
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] papermill={} tags=[]
# ### Get the data
# + papermill={} tags=[]
data = quandl.get('EIA/PET_RWTC_D')
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] papermill={} tags=[]
# ### Show dataframe
# + papermill={} tags=[]
data
# + [markdown] papermill={} tags=[]
# ### Show the graph
# + papermill={} tags=[]
# %matplotlib inline
data.plot()
| Quandl/Quandl_Get_data_from_API.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# # RTM Example with dynamic scheduling
# We will perform RTM using the following steps:
# 1. Read the 10m resampled models output from the FWI notebook
# 2. Visualize the model
# 3. Build a small local compute cluster (2 workers)
# 4. Create list of shot locations
# 5. Define the `timemute!`, `migrateshot`, and `stack` functions
# 6. Run the migration and write individual shot images to disk
# 7. Stack the individual shot images
# 8. Perform a little post migration processing
# 9. Visualize Results
#
# #### Note on runtime
# This notebook takes approximately 20 minutes to run for 100 shots with two workers on an Intel 8168.
#
# `lscpu` CPU information: `Intel(R) Xeon(R) Platinum 8168 CPU @ 2.70GHz`
# ## Add required packages
using Distributed, PyPlot, Jets, JetPack, JetPackDSP, Printf
# ## Record time for start of notebook
time_beg = time()
# ## 1. Read the 10m resampled models output from the FWI notebook
# + tags=[]
file1 = "../50_fwi/marmousi_resampled_10m_349x1701_vtrue.bin"
nz,nx = 349,1701
dz,dx = 10.0,10.0
v1_orig = read!(file1, Array{Float32}(undef,nz,nx));
# -
# #### Apply a 7x7 rectangular mix smoother
# We perform the RTM migration in a slightly smoothed model.
# +
ns = 21
P = JopPad(JetSpace(Float32,nz,nx), -ns:nz+ns, -ns:nx+ns, extend=true)
M = JopMix(range(P), (7,7))
R = JopPad(JetSpace(Float32,nz,nx), -ns:nz+ns, -ns:nx+ns, extend=false)
s1 = R' ∘ M ∘ P * (1 ./ v1_orig)
v1 = 1 ./(s1);
# -
# #### Compute "reflectivity" by vertical numerical derivative
D = JopDifference(JetSpace(Float32,nz,nx), 1)
r1 = D * v1;
# ## 2. Visualize the velocity and reflectivity
# +
vmin,vmax = extrema(v1)
rmax = maximum(abs,r1)
@show vmin,vmax,rmax;
figure(figsize=(8,6)); clf()
subplot(2,1,1); imshow(v1,aspect="auto",cmap="jet");
colorbar(orientation="vertical");clim(vmin,vmax);
title("True Velocity");
subplot(2,1,2); imshow(2 .* r1 ./ rmax,aspect="auto",cmap="gray");
colorbar(orientation="vertical");clim(-1,+1);
title("True Reflectivity");
tight_layout()
# -
# ## 3. Build a small local compute cluster (2 workers)
#
# #### Setup OMP environment variables for the cluster
#
# In the distributed compute case the workers that we add would be on different hardware, and we might add tens of workers in 2D and hundreds in 3D. Here we run on a single machine with only 2 workers, and so we need to be careful with details related to high performance computing. If we did not specify thread affinity, the two workers would compete for the same physical cores and the modeling would be *incredibly* slow.
#
# We spin up the small 2-worker cluster by calling `addprocs(2)`, and because we set the environment variable `ENV["OMP_DISPLAY_ENV"] = "true"` we will see the OMP environment printed out on each worker. In that output (below) we can verify that half of the total threads (44/2 = 22) are assigned to each socket on this 2 socket system. You can obtain more details about the hardware with the shell command `lscpu`.
#
# We set four environment variables related to OpenMP:
# * `OMP_DISPLAY_ENV` prints out the OpenMP environment on each worker
# * `OMP_PROC_BIND` specifies that threads should be bound to physical cores
# * `OMP_NUM_THREADS` specifies the number of threads per workers is 1/2 the number of physical cores
# * `GOMP_CPU_AFFINITY` specifies which physical cores the threads run on for each worker
#
# If you run the shell command `top` during execution, you will see 3 julia processes: the main process and two workers. The two workers should generally have about 50% of the system, and `load average` should tend towards the physical number of cores.
# + tags=[]
nthread = Sys.CPU_THREADS
ENV["OMP_DISPLAY_ENV"] = "true"
ENV["OMP_PROC_BIND"] = "close"
ENV["OMP_NUM_THREADS"] = "$(div(nthread,2))"
addprocs(2)
@show workers()
for k in 1:nworkers()
place1 = (k - 1) * div(nthread,nworkers())
place2 = (k + 0) * div(nthread,nworkers()) - 1
@show place1, place2, nthread
@spawnat workers()[k] ENV["GOMP_CPU_AFFINITY"] = "$(place1)-$(place2)";
end
# + tags=[]
@everywhere using DistributedArrays, DistributedJets, DistributedOperations, Jets, JetPack, WaveFD, JetPackWaveFD, Random, LinearAlgebra, Schedulers
# -
# ## 4. Create list of shot locations
# We use 100 shot locations, many times than our FWI example, and run at significantly higher frequency.
# + tags=[]
nshots = 100
sx = round.(Int,collect(range(0,stop=(nx-1)*dx,length=nshots)))
@show nshots;
@show sx;
# -
# ## 5. Define the `timemute!`, `migrateshot`, and `stack` functions
# * `timemute!` mutes data to remove the direct arrival and refraction
# * `migrateshot` runs the migrations for each shot and writes image and illumination files to scratch disk
# * `stack` reads in shots from disk and stacks them
#
# **TODO:** convert the IO to use CloudSeis as proxy for cloud storage
# #### Note on scratch space for temporary files
# When dealing with serialized nonlinear wavefields as in this example, we need to specify the location where scratch files will be written.
#
# You may need to change this to point to a temporary directory available on your system.
@everywhere scratch = "/mnt/scratch"
@assert isdir(scratch)
# #### Global variables for number of samples and sample rate
@everywhere begin
ntrec = 3001
dtrec = 0.002
dtmod = 0.001
end
# #### Build the `timemute!` function
@everywhere function timemute!(F, d, watervel, tmute)
for i = 1:length(state(F, :rx))
rx = state(F, :rx)
rz = state(F, :rz)
sx = state(F, :sx)
sz = state(F, :sz)
dist = sqrt((sx[1] - rx[i])^2 + (sz[1] - rz[i])^2)
time = dist / watervel
tbeg = 1
tend = round(Int, (time + tmute) / state(F,:dtrec))
tend = clamp(tend,1,size(d,1))
d[tbeg:tend,i] .= 0
end
nothing
end
# #### Build the `migrateshot` function
# notice we model to significantly shorter time than for FWI
@everywhere function migrateshot(isrc,nz,nx,dz,dx,_vtrue,_v,sx)
@info "migrating shot $(isrc) on $(gethostname()) with id $(myid())..."
F = JopNlProp2DAcoIsoDenQ_DEO2_FDTD(;
b = ones(Float32,nz,nx),
nthreads = div(Sys.CPU_THREADS,2),
ntrec = ntrec,
dtrec = dtrec,
dtmod = dtmod,
dz = dz,
dx = dx,
wavelet = WaveletCausalRicker(f=10.0),
sx = sx[isrc],
sz = dz,
rx = dx*[0:1:nx-1;],
rz = 2*dz*ones(length(0:1:nx-1)),
nbz_cache = nz,
nbx_cache = 16,
comptype = UInt32,
srcfieldfile = joinpath(scratch, "field-$isrc-$(randstring()).bin"),
reportinterval=0)
d = F*localpart(_vtrue) #here we model the data usually you would just read the data
timemute!(F,d,1500,2/16) #mute out the direct and diving waves
J = jacobian!(F, localpart(_v))
illum = srcillum(J)
m = J'*d
close(F) #delete scratch files that we don't need anymore
write(joinpath(scratch,"image_$(isrc).bin"),m)
write(joinpath(scratch,"illum_$(isrc).bin"),illum)
end
# #### Build the `stack` function
function stack(shots,nz,nx)
img = zeros(Float32,nz,nx)
ill = zeros(Float32,nz,nx)
for isrc in shots
img += read!(joinpath(scratch,"image_$(isrc).bin"), Array{Float32}(undef,nz,nx));
ill += read!(joinpath(scratch,"illum_$(isrc).bin"), Array{Float32}(undef,nz,nx));
rm(joinpath(scratch,"image_$(isrc).bin"))
rm(joinpath(scratch,"illum_$(isrc).bin"))
end
return img,ill
end
# ## 6. Run the migration and write individual shot images to disk
#
# We use epmap to schedule the work for the migration.
#broadcast the models to the workers
_v1 = bcast(v1);
# + tags=["outputPrepend"]
t1 = @elapsed begin
epmap(i->migrateshot(i, nz, nx, dz, dx, _v1, _v1, sx), 1:nshots)
end
@show t1;
# + tags=[]
@printf("Time for migrating %.2f minutes\n", t1 / 60)
# -
# ## 7. Stack the individual shot images
shots = collect(1:nshots)
m1, illum1 = stack(shots,nz,nx);
# ## 8. Perform a little post migration processing
# #### Laplacian filter to remove backscattered noise
L = JopLaplacian(JetSpace(Float32,nz,nx))
# #### Apply low cut filter, illumination compensation, and gain
# + tags=[]
g = ([0:(nz-1);]*dz).^2 * ones(1,nx);
img1 = g .* (L * m1) ./ (illum1 .+ 1e-8 * maximum(abs, illum1));
@show extrema(img1)
# -
# #### Apply water bottom mute
img1[v1_orig.==1500.0] .= 0;
# ## 9. Visualize Results
# + tags=[]
mrms1 = 2.5 * sqrt(norm(img1)^2 / length(img1))
figure(figsize=(8,6)); clf()
subplot(2,1,1); imshow(img1,aspect="auto",cmap="gray");
colorbar(orientation="vertical");clim(-mrms1,+mrms1);
title("Migration in True Velocity")
subplot(2,1,2); imshow(2 .* r1 ./ rmax,aspect="auto",cmap="gray");
colorbar(orientation="vertical");clim(-1,+1);
title("True Reflectivity");
tight_layout()
# -
# ## Remove workers
rmprocs(workers())
time_end = time()
@sprintf("Time to run notebook; %.2f minutes\n", (time_end - time_beg) / 60)
| 60_rtm/02_rtm_DynamicParallel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd0d7c91db6efb27241dc7f6f380fbdfa50e35bcd6db7293e24f2d5f262dd9ffeb4
# ---
# + [markdown] id="pHgZaeo0-efR"
# # IESTI01
# ### *<NAME>*
# ### 2020009562
# + [markdown] id="ijxKvT8iDNqq"
# ## Notebook 1
# + [markdown] id="u62Ou4gQ8Fik"
# **Exercise 1**
# + id="tFpug3R18DyY"
pi = 3.14159 # approximate
diameter = 3
radius = diameter / 2
area = pi * radius ** 2
print("The area of a circle with radius {} is {}".format(radius, area))
# + [markdown] id="5rxvBcDt8ooA"
# **Exercise 2**
# + id="wGIBsBaq8sj1"
a = [1, 2, 3]
b = [3, 2, 1]
a, b = b, a
print(a, b)
# + [markdown] id="paEc7ecf9nH_"
# **Exercise 3**
# + [markdown] id="A5FnsCks-CF6"
# a)
# + id="IefDRq2Q9r5m"
(5 - 3) // 2
# + [markdown] id="5jNPCqf39zau"
# b)
# + id="sG8lGXiB-KPu"
8 - 3 * 2 - (1 + 1)
# + [markdown] id="tABidxwBAF1q"
# **Exercise 4**
# + id="Q5Hoa2D0AKRA"
# Variables representing the number of candies collected by alice, bob, and carol
alice_candies = 121
bob_candies = 77
carol_candies = 109
total_candies = alice_candies + bob_candies + carol_candies
to_smash = total_candies % 3
print(f"They will smash {to_smash} candies")
# + [markdown] id="iGBSIDPzDRMH"
# ## Notebook 2
# -
# **Exercise 1**
# +
def round_to_two_places(num):
return round(num, 2)
round_to_two_places(3.14159)
# -
# **Exercise 2**
# +
round(2.30, -1)
round(2.342212, -3)
round(3735, -2)
#It rounds integers taking their n last digits and turning it to 0
# -
# **Exercise 3**
# +
def to_smash(total_candies, number_of_friends = 3):
return total_candies % number_of_friends
candies = input("How many candies are there?")
friends = input("How many people are they going to be divide in?")
while not candies.isnumeric() and not friends.isnumeric():
candies = input("How many candies are there?")
friends = input("How many people are they going to be split in?")
candies = int(candies)
friends = int(friends)
print(f"The number of smashed candies is = {to_smash(candies, number_of_friends = friends)}")
# -
# **Exercise 4**
round_to_two_places(9.9999) # was raound
x = -10
y = 5
#Which of the two variables above has the smallest absolute value?
smallest_abs = min(abs(x), abs(y)) #abs() only receives one parameter.
print(smallest_abs)
# +
def f(x):
y = abs(x)
return y
print(f(5))
# -
# ## Notebook 3
# **Exercise 1**
# +
def sign(number):
if(number == 0):
return 0
return -1 if(number < -1) else 1
print(sign(0))
print(sign(-10))
print(sign(50))
# -
# **Exercise 2**
# +
def to_smash(total_candies):
candy_form = "candies" if total_candies > 1 else "candy"
print(f"Spliting {total_candies} {candy_form} between 3 people")
return total_candies % 3
to_smash(91)
to_smash(1)
# -
# ** Exercise 3 **
# +
def concise_is_negative(number):
return (number < 0)
print(concise_is_negative(10))
print(concise_is_negative(-10))
# -
# ** Exercise 4 **
# +
def wants_all_toppings(ketchup, mustard, onion):
return "the works" if(ketchup and mustard and onion) else "not the works"
print(wants_all_toppings(True, False, True))
print(wants_all_toppings(True, True, True))
# +
def wants_plain_hotdog(ketchup, mustard, onion):
return "wants plain hot dog" if(not ketchup and not mustard and not onion) else "doesn't want plain hot dog"
print(wants_plain_hotdog(False, False, False))
print(wants_plain_hotdog(False, False, True))
# +
def exactly_one_sauce(ketchup, mustard, onion):
return (ketchup and not mustard) or (not ketchup and mustard)
print(exactly_one_sauce(True, False, True))
print(exactly_one_sauce(True, True, True))
print(exactly_one_sauce(False, True, True))
# -
# ** Exercise 5 **
# +
def exactly_one_topping(ketchup, mustard, onion):
if(ketchup and not (mustard or onion)):
return True
elif(mustard and not (ketchup or onion)):
return True
elif(onion and not (ketchup or mustard)):
return True
else:
return False
print(exactly_one_topping(True, False, False))
print(exactly_one_topping(False, True, False))
print(exactly_one_topping(False, False, True))
print(exactly_one_topping(False, True, True))
# -
# ## Notebook 4
# ** Exercise 1 **
# +
def select_second(L):
return L[1] if(len(L) >= 2) else None
print(select_second([1,2,3,4,5,6]))
print(select_second([1]))
# -
# ** Exercise 2 **
def losing_team_captain(teams):
return teams[-1][1]
print(losing_team_captain([['Spo', 'UD', 'Jimmy', 'Bam'], ['Vogel', 'LeBron', 'AD', 'Caruso'], ['Nash', 'Durant', 'James', 'Irving']]))
# ** Exercise 3 **
# +
def purple_shell(racers):
racers [-1], racers[0] = racers[0], racers[-1]
racers = ['Mario', 'Luigi', 'Bowser', 'Yoshi']
purple_shell(racers)
print(racers)
# -
# **Exercise 4**
# +
a = [1, 2, 3]
b = [1, [2, 3]]
c = []
d = [1, 2, 3][1:]
# Put your predictions in the list below. Lengths should contain 4 numbers, the
# first being the length of a, the second being the length of b and so on.
lengths = [3, 2, 0, 2]
print(len(a), len(b), len(c), len(d))
# -
# ** Exercise 5 **
# +
def fashionably_late(arrivals, name):
if(not name in arrivals):
return "This person was not at the party"
return "Was fashionably late" if(arrivals.index(name) > len(arrivals) / 2 and name != arrivals[-1]) else "Was not fashionably late"
print(fashionably_late(["Adriano", "<NAME>", "Flora", "Zaza", "Vini A", "Livia E", "Fernanda"], 'Fernanda'))
print(fashionably_late(["Adriano", "<NAME>", "Flora", "Zaza", "Vini A", "Livia E", "Fernanda"], 'Adriano'))
print(fashionably_late(["Adriano", "<NAME>", "Flora", "Zaza", "Vini A", "Livia E", "Fernanda"], 'Livia E'))
print(fashionably_late(["Adriano", "<NAME>", "Flora", "Zaza", "Vini A", "Livia E", "Fernanda"], 'Zaza'))
print(fashionably_late(["Adriano", "<NAME>", "Flora", "Zaza", "Vini A", "Livia E", "Fernanda"], 'Leonardo'))
# -
# ## Notebook 5
# **Exercise 1**
# +
def has_lucky_number(nums):
for num in nums:
if num % 7 == 0:
return True
else:
continue
return False
print(has_lucky_number([1,2,21,19,34]))
print(has_lucky_number([1,2,19,34]))
# -
# ** Exercise 2 **
[1, 2, 3, 4] > 2
# +
def elementwise_greater_than(L, thresh):
result = []
for num in L:
result.append(num > thresh)
return result
print(elementwise_greater_than([1,2,10,1,2,6,-10], 3))
# -
# **Exercise 3**
# +
def menu_is_boring(meals):
for meal in meals:
for other_meals in meals[meals.index(meal)+1:]:
if(meal == other_meals):
return True
return False
menu_is_boring(['Salad', "Pasta", 'Lasagna', 'Steak', 'Salad', 'Rice'])
# -
# ## Notebook 6
# **Exercise 1**
# +
def is_valid_zip(zip_code):
if(len(zip_code) != 5):
return False
return True
print(is_valid_zip('12345'))
print(is_valid_zip('1234'))
print(is_valid_zip('123499'))
# -
# ** Exercise 2 **
# +
def search_one_word(word, key):
# wordl: lstr>
# key: str
lowercasedWordList = []
for word in word:
lowercasedWordList.append(word.lower())
index = 0
answer = []
for item in lowercasedWordList:
splittedList = item.replace(".", "").replace(",", "").split() # [] das palavras do documento
print(splittedList)
for word in splittedList:
if word == key:
answer.append(index)
break
index += 1
print(answer)
return answer
list1 = ['Game', 'testjooj', 'A perfect jooj', 'I trust jooj']
search_one_word(list1, 'jooj')
# +
def search_list_of_words(wordList, key):
# wordList: List<str>
# key: str
lowercasedWordList = []
for word in wordList:
lowercasedWordList.append(word.lower())
index = 0
answer = []
for item in lowercasedWordList:
splittedList = item.replace(".", "").replace(",", "").split() # [] das palavras do documento
print(splittedList)
for word in splittedList:
for unity in key:
if word == unity:
answer.append(index)
break
index += 1
print(answer)
return answer
def lowercase(txt):
return txt.lower()
list1 = ['Game', 'perfect testjooj', 'A perfect jooj', 'I trust jooj']
key = ['perfect', 'jooj']
search_list_of_words(list1, key)
# -
# ## Notebook 7
# ** Exercise 1 **
# +
import pandas as pd
import matplotlib.pyplot as plt
# <NAME> points per game and assists per game stats (numbers next to real stats)
table_of_content = pd.DataFrame({'Points': [30, 25, 28, 23, 18, 21, 28, 45, 32, 27, 25, 16, 24],
'Assists': [10, 8, 6, 12, 7, 8, 9, 9, 10, 7, 11, 12, 7]})
points_per_game = table_of_content['Points'].mean()
print(f"Jimmy averaged {round(points_per_game, 1)} ppg")
assists_per_game = table_of_content['Assists'].mean()
print(f"Jimmy averaged {round(assists_per_game, 1)} apg")
plt.ylabel('Assists, Points')
plt.xlabel('Games')
plt.plot(table_of_content['Points'], 'r--', table_of_content['Assists'], 'b^', assists_per_game, 'g-')
plt.axhline(y = assists_per_game, color = 'b', linestyle = '-', label = 'Assists')
plt.axhline(y = points_per_game, color = 'r', linestyle = '-', label = 'Points')
plt.legend()
plt.show()
| exercises/List1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Performance programming
# We've spent most of this course looking at how to make code readable and reliable. For research work, it is often also important that code is efficient: that it does what it needs to do *quickly*.
# It is very hard to work out beforehand whether code will be efficient or not: it is essential to *Profile* code, to measure its performance, to determine what aspects of it are slow.
# When we looked at Functional programming, we claimed that code which is conceptualised in terms of actions on whole data-sets rather than individual elements is more efficient. Let's measure the performance of some different ways of implementing some code and see how they perform.
# ## Two Mandelbrots
# You're probably familiar with a famous fractal called the [Mandelbrot Set](https://www.youtube.com/watch?v=AGUlJus5kpY).
# For a complex number $c$, $c$ is in the Mandelbrot set if the series $z_{i+1}=z_{i}^2+c$ (With $z_0=c$) stays close to $0$.
# Traditionally, we plot a color showing how many steps are needed for $\left|z_i\right|>2$, whereupon we are sure the series will diverge.
# Here's a trivial python implementation:
def mandel1(position, limit=50):
value = position
while abs(value)<2:
limit-=1
value=value**2+position
if limit<0:
return 0
return limit
xmin=-1.5
ymin=-1.0
xmax=0.5
ymax=1.0
resolution=300
xstep=(xmax-xmin)/resolution
ystep=(ymax-ymin)/resolution
xs=[(xmin+(xmax-xmin)*i/resolution) for i in range(resolution)]
ys=[(ymin+(ymax-ymin)*i/resolution) for i in range(resolution)]
# %%timeit
data=[[mandel1(complex(x,y)) for x in xs] for y in ys]
data1=[[mandel1(complex(x,y)) for x in xs] for y in ys]
# %matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(data1,interpolation='none')
# We will learn this lesson how to make a version of this code which works Ten Times faster:
import numpy as np
def mandel_numpy(position,limit=50):
value=position
diverged_at_count=np.zeros(position.shape)
while limit>0:
limit-=1
value=value**2+position
diverging=value*np.conj(value)>4
first_diverged_this_time=np.logical_and(diverging, diverged_at_count==0)
diverged_at_count[first_diverged_this_time]=limit
value[diverging]=2
return diverged_at_count
ymatrix,xmatrix=np.mgrid[ymin:ymax:ystep,xmin:xmax:xstep]
values=xmatrix + 1j*ymatrix
data_numpy=mandel_numpy(values)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(data_numpy,interpolation='none')
# %%timeit
data_numpy=mandel_numpy(values)
# Note we get the same answer:
sum(sum(abs(data_numpy-data1)))
| ch08performance/010intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### class : 클래스
# - 변수와 함수를 묶어 놓은 개념
# - 사용방법
# - 변수와 함수가 들어있는 클래스를 선언
# - 클래스를 객체로 만들어서 클래스 안에 선언된 변수와 함수를 사용
# +
#### 1. 기본 클래스의 사용
# -
# 클래스의 선언
class Calculator:
num1 = 1
num2 = 2
def plus(self):
return self.num1 + self.num2
def minus(self):
return self.num1 - self.num2
# 클래스의 사용
calc = Calculator()
calc
calc.num1, calc.num2, calc.plus(), calc.minus()
# self의 의미 : 객체 자신
calc2 = Calculator()
calc2.num1 = 10
calc2.plus()
# ### 2. 객체지향
# - 실제 세계를 코드에 반영해서 개발하는 방법
# - 여러명의 개발자가 코드를 효율적으로 작성해서 프로젝트를 완성시키기 위한 방법
# - 설계도 작성(class) -> 실제 물건(object)
# - 사용자 정의 데이터 타입
calc2.plus()
obj = "python"
obj.upper()
# ### 3. 생성자
# - 클래스가 객체로 생성될 때 실행되는 함수
# - 변수(재료)를 추가할 때 사용됩니다.
class Calculator:
# 생성자 함수 : __init__
def __init__(self,num1, num2 = 10):
self.num1 = num1
self.num2 = num2
def plus(self):
return self.num1 + self.num2
def minus(self):
return self.num1 - self.num2
calc1 = Calculator(3)
calc1.plus()
# join
ls = ["python", "is", "good"]
sep = " "
sep.join(ls)
# pandas dataframe
import pandas as pd
df = pd.DataFrame([
{"name":"jin", "age":20},
{"name":"andy", "age":21},
])
df
| python/06_Class.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Client Recommendation
#
# * Frequent Pattern mining doesn't work well for this data set.
# * Therefore I'm using similarity recommendation.
# * The assumption here is, a client will purchase similar products they had purchased before.
import pandas as pd
import numpy as np
import math
# +
all_order_train = pd.read_pickle('../all_order_train.pkl')
all_order_test = pd.read_pickle('../all_order_test.pkl')
print(all_order_train.shape, all_order_test.shape)
all_order_train.head()
# -
shared_clients = set(all_order_train['user_id'].values).intersection(all_order_test['user_id'].values)
print(len(shared_clients))
# +
train_shared_client_week_df = all_order_train.loc[all_order_train['user_id']\
.isin(list(shared_clients))][['user_id', 'week_number']]\
.astype('str').drop_duplicates()\
.groupby(['user_id'], as_index=False)['week_number']\
.agg(['count']).reset_index()\
.sort_values(['count'], ascending=False)
train_shared_client_week_df.head()
# -
target_client = 183503
# ## Client Match Score & Discount
# +
merchant_sales_df = all_order_train[['merchant', 'user_id', 'price']].drop_duplicates()\
.groupby(['merchant'], as_index=False)\
.agg({'user_id': 'count', 'price': 'sum'})
merchant_sales_df.head()
# +
merchant_sales_df['avg_sales_per_client'] = round(merchant_sales_df['price']/merchant_sales_df['user_id'], 4)
merchant_sales_df.head()
# +
client_purchase_df = all_order_train[['user_id', 'price', 'order_id']].drop_duplicates()\
.groupby(['user_id'], as_index=False)\
.agg({'price': 'sum', 'order_id': 'count'})
client_purchase_df.head()
# +
client_purchase_df['avg_purchase'] = round(client_purchase_df['price']/client_purchase_df['order_id'], 4)
client_purchase_df.head()
# +
merchant_sales_df['merchant_match_score'] = abs(1 -
client_purchase_df.loc[client_purchase_df['user_id']==target_client]['avg_purchase'].values[0]\
/merchant_sales_df['avg_sales_per_client'])
merchant_sales_df['discount'] = merchant_sales_df['price'] * 0.0000007
merchant_sales_df = merchant_sales_df.sort_values(by='merchant_match_score')
merchant_sales_df.head()
# -
# ## Product Similarity Recommendation
all_order_train.head()
# +
# Client most purchased products
client_products_df = all_order_train.loc[all_order_train['user_id']==target_client][['order_id', 'product_id',
'product_name', 'price', 'week_number']]
client_products_df.head()
# +
client_purchase_amount_df = client_products_df[['product_name', 'order_id']].drop_duplicates()\
.groupby(['product_name'], as_index=False)['order_id']\
.agg(['count']).reset_index()\
.sort_values(['count'], ascending=False)
client_purchase_amount_df.head()
# -
favorite_prod_lst = client_purchase_amount_df.loc[client_purchase_amount_df['count'] >= 6]['product_name'].values
favorite_prod_lst
# +
merchant_prod_df = pd.read_pickle('../merchant_product.pkl')
print(merchant_prod_df.shape)
prod_category = merchant_prod_df[['aisle_id', 'department_id', 'aisle', 'department']].drop_duplicates()
prod_category = prod_category.sort_values(by=['aisle_id', 'department_id'])
print(prod_category.shape)
prod_category = prod_category.sort_values(by=['department_id', 'aisle_id'])
print(prod_category.shape)
prod_df = merchant_prod_df[['product_id', 'product_name', 'aisle_id', 'department_id', 'aisle', 'department']].drop_duplicates()
print(prod_df.shape)
prod_df['norm_aisle'] = round((prod_df['aisle_id'] - min(prod_df['aisle_id']))/(max(prod_df['aisle_id']) - min(prod_df['aisle_id'])), 4)
prod_df['norm_dept'] = round((prod_df['department_id'] - min(prod_df['department_id']))/(max(prod_df['department_id']) - min(prod_df['department_id'])), 4)
print(min(prod_df['norm_aisle']), max(prod_df['norm_aisle']), min(prod_df['norm_dept']), max(prod_df['norm_dept']))
prod_df.head()
# -
def get_prod_similarity(target_aisle, target_dept, aisle, dept):
similarity = 1 - math.sqrt((pow(target_aisle - aisle, 2) + (pow(target_dept - dept, 2)))/2)
return round(similarity, 4)
# +
import warnings
warnings.filterwarnings("ignore")
prod_lst = []
for target_prod in favorite_prod_lst:
other_prod_df = prod_df.loc[prod_df['product_name'] != target_prod]
target_aisle = prod_df.loc[prod_df['product_name'] == target_prod]['norm_aisle'].values[0]
target_dept = prod_df.loc[prod_df['product_name'] == target_prod]['norm_dept'].values[0]
other_prod_df['similarity'] = other_prod_df.apply(lambda r: get_prod_similarity(target_aisle, target_dept,
r['norm_aisle'], r['norm_dept']), axis=1)
other_prod_df = other_prod_df.sort_values(by='similarity', ascending=False)
print(target_prod)
print('Recommended products: ', list(other_prod_df['product_name'].values[0:4]))
prod_lst.extend(list(other_prod_df['product_name'].values[0:4]))
print()
prod_lst
# -
# * I can't believe "glitter Candles" is in "canned jarred vegetables" aisle 😱😱... Feel like this dataset was cleaned by computer vision.
# * But think again, lightening a candle while eating salad is, romantic and cozy 💖💖
# * And overall, this type of product recommendation does look interesting and useful ❣❣
# ## Recommend Merchants
#
# * There is no geo location data here, otherwise will definitely consider the geo part.
# * Rank top matched merchants with total amount of money the client can save.
merchant_sales_df.head()
top_matched_merchant_lst = merchant_sales_df['merchant'].values[0:7]
top_matched_merchant_lst
merchant_prod_df.head()
def get_merchant_discounts(merchant, recommended_set):
merchant_prod_set = set(merchant_prod_df.loc[merchant_prod_df['merchant']==merchant]['product_name'].unique())
common_set = merchant_prod_set.intersection(recommended_set)
discount = round(merchant_sales_df.loc[merchant_sales_df['merchant']==merchant]['discount'].values[0], 4)
total_save = round(discount * len(common_set), 4)
return common_set, total_save
# +
recommeded_merchant_dct = {}
for merchant in top_matched_merchant_lst:
recommended_set, total_save = get_merchant_discounts(merchant, set(prod_lst))
recommeded_merchant_dct[merchant] = {'total_save': total_save, 'recommended_set': recommended_set}
order_dct = {k: v for k, v in sorted(recommeded_merchant_dct.items(), key=lambda item: item[1]['total_save'], reverse=True)}
# -
for merchant, recommendation in order_dct.items():
print(merchant)
print('Total Save $' + str(recommendation['total_save']))
print('Recommended Items:', recommendation['recommended_set'])
print()
| Bank_Fantasy/Golden_Bridge/recommendation_experiments/client_recommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="e2875ace-b565-4b60-9e69-79ad6ca3566b" _uuid="d2c165a9adf3c495c9ce377e21445bd654d8de92"
# # TED-Talks transcripts text processing tutorial
# + [markdown] _cell_guid="c4083d03-2b19-4e95-bf16-bb9486f12d4a" _uuid="4581de0b5caee13f78a4a3881155e34481df47f6"
# In this notebook we will study text processing, passing through feature extraction to topic modeling in order to (1) have a first meet with text processing techniques and (2) analyze briefly some TED-Talks patterns.
#
# In the amazing TED-Talks dataset, we have two files, one (ted_main.csv) with meta information about the talks, as # of comment, rating, related TEDs and so on; the other file has the transcripts which we'll care about in this tutorial. Even so, we'll use the ted_main.csv file to evaluate our topic modeling implementation, because it has a columns of talks' tags, useful as our "ground truth topics".
# + _cell_guid="3388da79-2238-47ff-9d86-45438ad3ea8d" _uuid="5d90eeb14490a2f69e98d1a8835e3018cded02dd"
import numpy as np
import pandas as pd
from time import time
# + [markdown] _cell_guid="3654edf4-32c6-456e-a998-60d508433f8b" _uuid="a3c8331a832f874409c31f3704f8e7c8515e22f1"
# ### 0.1. Transcripts loading
# + _cell_guid="e3891acc-b56a-4f07-8666-8c62227c2ae9" _uuid="a842242197826ec99193ca599dd1667a6a4ff1e1"
ted_main_df = pd.read_csv('data/ted_main.csv', encoding='utf-8')
transcripts_df = pd.read_csv('data/transcripts.csv', encoding='utf-8')
transcripts_df.head()
# + [markdown] _cell_guid="800b6bc6-d049-435e-b9ff-76543d50cfe3" _uuid="6bb30749ff647b7f9b38c7e4f153e337ae7c8fa0"
# ### 0.2. Stopwords loading
#
# "...stop words are words which are filtered out before or after processing of natural language data (text).[1] Though "stop words" usually refers to the most common words in a language, there is no single universal list of stop words used by all natural language processing tools, and indeed not all tools even use such a list." (Wikipedia)
# + _cell_guid="c95abe50-03ef-4036-98da-c64b94807842" _uuid="275b0f0b67ff8857b5f6749252197691380c4be5"
from sklearn.feature_extraction import text
stopwords = text.ENGLISH_STOP_WORDS
# + [markdown] _cell_guid="fe118ab9-476b-42df-ac84-2dd5864fb657" _uuid="7c225d935047b08e3817df7ad590dfa6ec0417d1"
# ## 1. Text feature extraction with TFIDF
#
#
# First, consider the term-frequency (TF) matrix above, that can be extracted from a list of documents and the universe of terms in such documents.
#
# | | Document 1 | Document 2 | ... | Document N |
# |--------|------------|------------|-----|------------|
# | Term 1 | 3 | 0 | ... | 1 |
# | Term 2 | 0 | 1 | ... | 2 |
# | Term 3 | 2 | 2 | ... | 1 |
# | ... | ... | ... | ... | ... |
# | Term N | 1 | 0 | ... | 0 |
#
#
# This is a huge matrix with all elements' frequency in all documents. Now consider de idf (inverse document frequency) as an operation to transform this frequency into word importance, calculated by:
#
# $$ tfidf_{i,j} = tf_{i,j} \times log(\frac{N}{df_{i}}) $$
#
# Where $i$ refers to term index and $j$ document index. $N$ is the total number of documents and $df_{i}$ is the number of documents containing $i$.
#
#
# + _cell_guid="0664d2fd-070d-4712-95cc-3ce34b5a0d3c" _uuid="eb83b0a6d190cd99b6376071e578dd11660fe333"
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words=stopwords,
min_df = 0.05, # cut words present in less than 5% of documents
max_df = 0.5) # cut words present in more than 50% of documents
t0 = time()
tfidf = vectorizer.fit_transform(transcripts_df['transcript'])
print("done in %0.3fs." % (time() - t0))
## uncomment this to visualize tfidf
# print(' (j,i) tfidf')
# print(tfidf)
# + [markdown] _cell_guid="7fa1c608-f090-4cca-93c4-e791b5dfd639" _uuid="2ff07e6661353a7042fc768086c597ca5ad1ab9d"
# Let's see what we can do with TFIDF...
#
# In our TFIFD definition, min_df and max_df were filtering words with document frequency between 5% and 50%. In another words, we're excluding very rare words (present in less than 5% of documents) and the common ones (present in more than
# 50% of them).
#
# Keeping that in mind, we'll want to see the most important words in our matrix...
# + _cell_guid="61b9d9ad-3624-41c1-b492-9d9855ded2fe" _uuid="9a1fd8b2ceeccdd44d8631ae8d33eb2e2eb48d43"
# Let's make a function to call the top ranked words in a vectorizer
def rank_words(terms, feature_matrix):
sums = feature_matrix.sum(axis=0)
data = []
for col, term in enumerate(terms):
data.append( (term, sums[0,col]) )
ranked = pd.DataFrame(data, columns=['term','rank']).sort_values('rank', ascending=False)
return ranked
ranked = rank_words(terms=vectorizer.get_feature_names(), feature_matrix=tfidf)
ranked.head()
# + _cell_guid="918b522b-c239-4c5d-9a78-321b197885ed" _uuid="d11a36b552bb49ec9fb01252b927ca28e6084e6c"
# Let's visualize the top10 important words in our tfidf
n_ranked = 10
y = ranked['term'].values[:n_ranked]
x = ranked['rank'].values[:n_ranked]
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12,4))
index = np.arange(n_ranked)
bar_width = 0.35
rects = plt.bar(index, x, bar_width, color='r')
plt.title('Most important words on TED-Talks')
plt.xticks(index , y)
plt.show()
# + [markdown] _cell_guid="76d35ca7-2814-483f-bb00-986641b13477" _uuid="9e419ac0904aa54c5a3a92c060af3ac79c4f5f16"
# ## 2. Topic modeling (using LDA)
#
# ... In construction
# + _cell_guid="79ecdca2-0be0-4eb2-98b9-d6fcb3d12966" _uuid="5daa58197360e432d7c1258749f3b0e0d3012047"
from sklearn.decomposition import NMF
#NMF
n_topics = 10
nmf = NMF(n_topics, random_state=0)
# NMF fitting
t0 = time()
topics = nmf.fit_transform(tfidf)
print("done in %0.3fs." % (time() - t0))
# + _cell_guid="5a10e9a6-bd17-4afd-9994-9bdf4421c3ca" _uuid="4f2a089fe5e83b29fe38f7c3085f97f9c22218e0"
# Exploring the topics
top_n_words = 5
for t_id, t in enumerate(nmf.components_):
t_words = [vectorizer.get_feature_names()[i] for i in t.argsort()[:-top_n_words - 1:-1]]
print("Topic #{}:".format(t_id),t_words)
# + _cell_guid="6ffba558-1f16-4316-b321-2419e34c08f5" _uuid="eb2e12c8952bf9af31590cdae055e441f8a5f358"
from sklearn.pipeline import Pipeline
pipe = Pipeline([
('tfidf', vectorizer),
('nmf', nmf)
])
document_id = 4
t = pipe.transform([transcripts_df['transcript'].iloc[document_id]])
threshold = 0.1
for i, p in enumerate(t[0]):
if p>threshold:
print('Document number {} talk about topic #{}'.format(document_id, i))
print('\nTranscript:')
print(transcripts_df['transcript'].iloc[document_id][:500],'...')
# + _cell_guid="e3658c0e-8cc3-4c87-8b40-b16147b86f8a" _uuid="80c9f76fe370f06eccbef7de30979d17fb7ffd3c"
talk = ted_main_df[ted_main_df['url']==transcripts_df['url'].iloc[document_id]]
print(talk['tags'].values)
talk
# -
# # Conclusions
#
# After topic modeling, we note that the topics extracted from LDA are very similar to the TED *tags* attribute. May you can ex this notebook and try it out! Change the **document_id** variable and see that happens! And don't forget to star the repo if it helps you!
| __notebook__.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DeepCTR's SVD
# ________________________
# SVD is matrix factorization algorithm popularly used in recommedation system applications such as movie & product recommendation.
#
# This factorization technique is available for training & testing on recommendation datasets through libraries such as surpriselib which analytically does the factorization & produces decomposed matrices.
# Whereas DeepCTR packages several FM techniques implemented through their DNN equivalents. Here one DeepCTR's method DeepFM is utilised to realise the implementation equivalence of SVD; Since the SVD results are here obtained through underlying Deep Neural Net, therefore DeepCTR's SVD.
# **The following notebook serves as Usage guide**
# _______________________________________________
# * The SVD module requires passing feature_column value (which are nothing but `SparseFeat` instances for each input sparse feature) to obtain a tensorflow model.
# * Towards the end, the obatained model is evaluating against sample test values.
# ## Step 1. Load sample dataset as pandas dataframe
# ___________________________________
# * List `sparse_features` & label encode input dataframe.
# * Perform `train_test_split` to output training/test data and labels for model training.
# +
import os
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from deepctr.models import DeepFM
from deepctr.inputs import SparseFeat
data_path = os.path.expanduser('u.data')
df= pd.read_csv(data_path, sep='\t',names= 'user_id,movie_id,rating,timestamp'.split(','))#, header=None)#used for DeepCTR
# -
# * List **sparse features** from input dataframe
# ________________________________________________
sparse_features = ["user_id", "movie_id"]
y= ['rating']
print('feature names:',sparse_features, '\nlabel name:',y)
# * Label encoding features of input dataframe
# __________________________________
# +
for feat in sparse_features:
lbe = LabelEncoder()
df[feat] = lbe.fit_transform(df[feat])
df.head(3)
# -
# **Preparing training input data & target labels.**
# _____________________________________________
# * Training & test input data should be a list of numpy arrays of `user_ids` & `movie_ids`.
# * Labels as numpy array of target values.
# +
train, test = train_test_split(df, test_size=0.2)
train_model_input = [train[name].values for name in sparse_features]#includes values from only data[user_id], data[movie_id]
train_lbl = train[y].values
test_model_input = [test[name].values for name in sparse_features]
test_lbl = test[y].values
# -
print('training data:\n', train_model_input, '\n\ntraining labels:\n', train_lbl)
# ## Step 2. Obtain feature columns
# ________________________________________________
# * Perform required data preparatory operations as described in DeepCtr docs (refer https://deepctr-doc.readthedocs.io/en/latest/Quick-Start.html).
#
# * Defining **feature columns** as list of SparseFeat instances for each sparse feature, here -- `user_id`, `movie_id`, by passing in `feature_name`, `num_unique feature vals` as arguments.
feature_columns = [SparseFeat(feat, df[feat].nunique()) for feat in sparse_features]
feature_columns
# ## Step 3. Import `SVD` from `mlsquare.layers.deepctr`
# ____________________________________________
# * Instantiate the model.
# * Train the model & evaluate results.
from mlsquare.layers.deepctr import SVD
# * Now Instantiate the model by passing in args-- `feature_columns` & `embedding_size`
model = SVD(feature_columns, embedding_size=100)
model.summary()
# * Compile the model & fit on train data
model.compile("adam", "mse", metrics=['mse'] )
history = model.fit(train_model_input, train_lbl, batch_size=64, epochs=8, verbose=2, validation_split=0.2,)
# * Evaluating model prediction on test data.
user_id = test_model_input[0][1]
item_id = test_model_input[1][1]
true_y= test[y].values[1]
print('For test user id: {} & item id : {} \nTrue rating: {} \nModel prediction is: {}'.format(user_id, item_id, true_y, model.predict(test_model_input)[1]))
| examples/DeepctrSVD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: IPython (Python 3)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
# +
tau = 100
seed = 1
tau_seed_fn = 'tau%d_seed%d' % (tau, seed)
fn = 'output/%s_choose_paras.txt' % tau_seed_fn
result = pd.read_csv(fn, sep='\t')
result['nfactor'] = [round(x) for x in result['nfactor']]
# +
## 2.1 Examine the cophenetic coefficient for different number of factors
plt.figure()
result.boxplot(by = 'K', column = 'coph', figsize=(8,5), fontsize = 15)
plt.xlabel("Number of factors assigned", size = 20)
plt.ylabel('Cophenetic coefficient', size = 20)
plt.title('')
plt.show()
plt.close()
plt.figure()
result.boxplot(by = 'nfactor', column = 'coph', figsize=(8,5), fontsize = 15)
plt.xlabel("Number of factors assigned", size = 20)
plt.ylabel('Cophenetic coefficient', size = 20)
plt.title('')
plt.show()
plt.close()
## Narrow down the range of number of factors to have median cophenetic coefficient > 0.9
result = result[result['nfactor'] >= 5]
# +
## 2.2. Filter out implementations with low cophenetic coefficient
## tried 0.85, 0.8 - result in the same optimal solution
res = result[result['coph'] >= 0.9]
# +
## 2.3. Choose the implementation with the most independent factorsres = res.sort_values('correlation')
res = res.sort_values('correlation')
i = 0
K = str(int(res.iloc[i]['K']))
a1 = str(res.iloc[i]['alpha1'])
s = a1
a1 = s.rstrip('0').rstrip('.') if '.' in s else s
l1 = str(res.iloc[i]['lambda1'])
s = l1
l1 = s.rstrip('0').rstrip('.') if '.' in s else s
run_idx = int(res.iloc[i]['run_optimal'])
# +
## Visualize the learned factor matrix
factor = 'K%s_a1%s_l1%s' % (K, a1, l1)
ffn = 'output/%s/sn_spMF_%s/sn_spMF_FactorMatrix_%s_Run%s.txt' % (tau_seed_fn, factor, factor, run_idx)
print(ffn)
fM = pd.read_csv(ffn, sep='\t')
sns.heatmap(fM, cmap = 'Blues')
plt.show()
plt.close()
| simulation/choose_paras_sn_spMF_simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import Python packages
import pickle
# Third-Party Imports
import matplotlib.pyplot as plt
import numpy as np
# -
def score_p(results, true_coeffs):
for result in results:
len_diff = int((len(true_coeffs['p'])-len(result['p']))/2)
diff = true_coeffs['p'][len_diff:-len_diff] - result['p']
score = np.linalg.norm(diff)
score = score/np.linalg.norm(true_coeffs['p'][len_diff:-len_diff])
result['score'] = score
return results
# +
# Figure 5a
file_stem = "./data/Fig5a-NLSL-"
results = pickle.load(open(file_stem +"results.pickle", "rb"))
true_coeffs = pickle.load(open("./data/S2-NLSL-coeffs.pickle", "rb"))
results = score_p(results, true_coeffs)
# +
plot_nm = []
plots_sc = []
for result in results:
plot_nm.append(result['noise_mag'])
plots_sc.append(result['score'])
# +
import matplotlib as mpl
mpl.rcParams["legend.markerscale"] = 1.5
mpl.rcParams["legend.labelspacing"] = 1.2
mpl.rcParams["legend.handlelength"] = 3.5
mpl.rcParams["legend.handletextpad"] = 20
figsize = (6,4)
# Create figure
plt.figure(figsize=figsize)
# set axes
ax1 = plt.gca()
#ax1.autoscale(False, axis='y')
# Plot the results
#ax1.set_yscale('symlog')
#loss_plot = [losses[idc] for idc in idcs]
pltstyle=dict(linestyle=None,marker='o')
ax1.semilogy(plot_nm, plots_sc, color='black', label = "p(x) Error", **pltstyle)
# Place the legend
lines = ax1.get_lines()
labels = [line.get_label() for line in lines]
labels = ['' for line in lines]
# Turn off all the tick labels
ax1.tick_params(labelbottom=False, labelleft=False)
#plt.hlines(0.15, 0, 0.15, linestyles="--")
plt.xlim([0,0.1])
plt.savefig('./Figs/5a-NLSL-pq-noise-vs-error.svg', dpi=600, transparent=True)
# Create separate axes
legend_figsize = (figsize[0]*2, figsize[1]/5)
plt.figure(figsize=legend_figsize)
ax = plt.gca()
for spine in ax.spines:
ax.spines[spine].set_visible(False)
ax.tick_params(labelleft=False, labelbottom=False, left=False, bottom=False)
plt.legend(lines, labels, ncol=2, loc='center', frameon=False)
plt.savefig('./Figs/5-legend.svg', dpi=600, transparent=True)
plt.show()
# +
# Figure 5b
file_stem = "./data/Fig5b-NLSL-"
results = pickle.load(open(file_stem +"results.pickle", "rb"))
true_coeffs = pickle.load(open("./data/S2-NLSL-coeffs.pickle", "rb"))
results = score_p(results, true_coeffs)
plot_trials = []
plots_sc = []
for result in results:
plot_trials.append(result['num_trials'])
plots_sc.append(result['score'])
# +
import matplotlib as mpl
mpl.rcParams["legend.markerscale"] = 1.5
mpl.rcParams["legend.labelspacing"] = 1.2
mpl.rcParams["legend.handlelength"] = 3.5
mpl.rcParams["legend.handletextpad"] = 20
pltstyle=dict(linestyle=None,marker='o')
figsize = (6,4)
# Create figure
plt.figure(figsize=figsize)
# set axes
ax1 = plt.gca()
# Plot the results
#plt.hlines(0.15, 0, 200, linestyles="--")
plt.semilogy(plot_trials, plots_sc, 'o-', color='k')
# Place the legend
lines = ax1.get_lines()
labels = [line.get_label() for line in lines]
labels = ['' for line in lines]
# Turn off all the tick labels
ax1.tick_params(labelbottom=False, labelleft=False)
# Save figure
plt.savefig('./Figs/5b-NLSL-trials-vs-error.svg', dpi=600, transparent=True)
plt.show()
# -
print(plots_sc)
| Fig 5 - Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution:
def romanToInt(self, s: str) -> int:
alphabet = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
sum = 0
for i in range(len(s)):
if i<len(s)-1 and alphabet[s[i]]<alphabet[s[i+1]]:
sum -= alphabet[s[i]]
else:
sum += alphabet[s[i]]
return sum
| easy/roman to integer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # London ASU model
# ## Requirements and module imports
#
# Code in this simulation uses a standard Anaconda Python environment (https://www.anaconda.com/distribution/#download-section). Additionally this model uses SimPy3 (https://simpy.readthedocs.io/en/latest/). Install SimPy3 with `pip install 'simpy<4'`.
import simpy
import inspect
from sim_utils.replication import Replicator
from sim_utils.parameters import Scenario
# ## Set up scenarios
#
# Parameters defined in scenarios will overwrite default values in the parameters python file.
# +
# Set up a dictionary to hold scenarios
scenarios = {}
# Baseline sceanrio (model defaults)
scenarios['constrained_beds'] = Scenario(
allow_non_preferred_asu = False)
# -
# ## Run model
replications = 30
replications = Replicator(scenarios, replications)
replications.run_scenarios()
# ## Show model default parameters
#
# Run the code below to model defaults (these are over-ridden by scenario values above).
print(inspect.getsource(Scenario.__init__))
| experiments/centralisation/all_units/london_asu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit (conda)
# language: python
# name: python3
# ---
import pandas as pd
s = pd.Series(list('asedasaeadaeadase'))
s
s.unique()
s.value_counts()
dados = pd.read_csv('../data/aluguel/aluguel.csv', sep=';')
dados.Tipo.unique()
dados.Tipo.value_counts()
| data/extras/dados/contadores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/pv-912/predict_number/blob/master/Submission1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={} colab_type="code" id="hhIHOHgqEY_U"
import pickle
filename = 'finalized_model.sav'
model = pickle.load(open(filename, 'rb'))
# + colab={} colab_type="code" id="YY0jWI-IEjWL"
import pandas as pd
df = pd.read_csv('gdrive/My Drive/learning/predict_number/data/train.csv')
test = pd.read_csv('gdrive/My Drive/learning/predict_number/data/test.csv')
sample = pd.read_csv('gdrive/My Drive/learning/predict_number/data/sample.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4X2V9WtuEpXT" outputId="8e423712-ff7f-4eee-cb93-9bf91778eda9"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# !pip install mpld3
import mpld3
mpld3.enable_notebook()
import pandas as pd
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# output all lines
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="eTJL-iiqFMPq" outputId="17dbe974-2ff4-4634-f2ea-f8ebf30ae94c"
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
label_encoder = LabelEncoder()
temp = test.copy()
temp.iloc[:,1] = label_encoder.fit_transform(test.iloc[:,1])
temp.head()
# + colab={} colab_type="code" id="E5VgW6A4G0LG"
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler
X = temp[["Reputation", "Answers" , "Views"]].values.astype(np.float64)
min_max_scaler = MinMaxScaler()
min_max_scaler.fit_transform(X)
scaled_data = pd.DataFrame(min_max_scaler.fit_transform(X), columns=["Reputation" , "Answers" , "Views"])
scaled_data.head()
temp.drop(columns=["Reputation" , "Answers" , "Views"], axis=1,inplace=True)
temp = pd.concat([temp, scaled_data], axis=1)
# temp.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="reZ6nfukG0IM" outputId="3d0aee26-dffd-42de-ea34-b4625cd01a81"
temp.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" id="oy7NfVLsG0D7" outputId="d475add3-81c1-4806-e593-55f31c7e6ea7"
onehot = pd.get_dummies(temp.iloc[:,1], prefix='tag')
# onehot.head()
temp = pd.concat([temp,onehot], axis = 1)
temp.head()
# + colab={} colab_type="code" id="cDH6S6YqG0AC"
feature_cols = ['Reputation', 'Views']
X = temp[feature_cols]
y_pred = model.predict(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="7tKR4U37Ga7E" outputId="f469be37-d505-4ffb-d3e2-783461533893"
# y_pred
df.Upvotes.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="X7DV5UTHIXOM" outputId="f27face1-224b-4c4a-cdad-324636e85219"
maxi = y_pred*df.Upvotes.max()
maxi = pd.DataFrame( maxi, columns=[ "Upvotes" ])
maxi.head()
# + colab={} colab_type="code" id="tCgeaUq6Ie1q"
test.ID
# + colab={} colab_type="code" id="7GpIjaBHJcdn"
submission = pd.concat([test['ID'], maxi['Upvotes']], axis=1, keys=['ID', 'Upvotes'])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="V-_C1ViJJwxO" outputId="8ab6a870-e973-4a75-ec9c-d5cdaa97995d"
submission.head()
# + colab={} colab_type="code" id="CgPMorh-J94D"
submission.to_csv('/sub.csv')
| Submission1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
import time
# +
spec_no_data = {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"mark": "point",
"data": {
"name": "data",
"values": [],
},
"encoding": {
"x": {"type": "quantitative", "field": "x"},
"y": {"type": "quantitative", "field": "y"},
}
}
spec_with_data = spec_no_data.copy()
spec_with_data["data"] = {
"name": "data",
"values": [
{"x": random.gauss(0, 1), "y": random.gauss(0, 1), "t": t}
for t in range(5)
],
}
# -
# # Static Vega Plot
from vega import VegaLite
VegaLite(spec=spec_with_data)
# # Vega Plot with dynamic updates
#
# The widget allows to update the plot after it has been displayed. To do so, the widget offers the `update` method that allows to add or remove data from the plot.
# +
from vega.widget import VegaWidget
widget = VegaWidget(spec=spec_no_data)
values = [
dict(
x=random.gauss(0.0, 1.0),
y=random.gauss(0.0, 1.0),
t=0,
)
for _ in range(10)
]
display(widget)
widget.update('data', insert=values)
# -
# The spec can be updated after the widget has been displayed. However, any
# data is inserted via update is lost and needs to be re-inserted.
widget.spec = dict(spec_no_data, mark="line")
widget.update('data', insert=values)
# Similarly the options can be updated after the widget has been displayed.
# Again, any data is inserted via update is lost and needs to be re-inserted.
widget.opt = {"theme": "dark"}
widget.update('data', insert=values)
print("the current spec / options")
print(widget.spec)
print(widget.opt)
# The VegaWidget can also be embedded into larger ipywidgets layout and use interactive features, such as buttons or similar interactive elements.
from ipywidgets import VBox, Label, Button
# +
plot = VegaWidget(spec=spec_with_data)
button = Button(description="Add new data point")
t = 5
@button.on_click
def on_click_handler(*_):
global t
value = dict(
x=random.gauss(0.0, 1.0),
y=random.gauss(0.0, 1.0),
t=t,
)
plot.update('data', insert=[value], remove=f'datum.t <= {t - 5}')
t += 1
VBox([Label("Vega plot embedded in another widget"), plot, button])
# -
# # Errors
# +
from vega.widget import VegaWidget
widget = VegaWidget(spec=spec_no_data)
values = [
dict(
x=random.gauss(0.0, 1.0),
y=random.gauss(0.0, 1.0),
t=0,
)
for _ in range(10)
]
display(widget)
widget.update('non_existing', insert=values)
# -
| notebooks/Widget.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python3
# ---
# Goal of this notebook:
#
# - Implement the first type of determining connections between edges at a time:
# - Each new connection is represented by a separate edge with a start and end time.
# - When querying the graph with a specific timestamp, filter the edges based on the time and get the count. If count > 0, then a connection existed between the edges at this time.
# + tags=[]
# Jupyter notebook needs this or else it will crash
import nest_asyncio
nest_asyncio.apply()
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.process.traversal import P # NEW!!! Import predicates (gt, gte, lt, lte, etc.)
# Instantiate a Gremlin Graph
graph = Graph()
# Connect to the server, instantiate traversal of graph.
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin','g'))
end_placeholder = 2**63 - 1
# +
# Drop all vertices of the graph.
# g.V().drop().iterate()
# g.E().drop().iterate()
# +
# Add two vertices that are to be connected
# Radio frequency over fiber
g.addV().property('name', 'RFoF').next()
# Analog-Digital converter
g.addV().property('name', 'ADC').next()
# Optical fiber
g.addV().property('name', 'OF').next()
# -
def connected(name1: str, name2: str, time: float) -> bool:
"""
Given two vertices labelled with <name1> and <name2>, determine whether they were connected at time <time>.
Do so by sending a Gremlin query to determine whether there exists an edge between the
two vertices such that <time> falls in between their "start_time" and "end_time" parameters.
TODO: Add sphinx documentation if this will be implemented into the actual Python library.
"""
# Get the vertices associated with the names
# v1, v2 = g.V().has('name', name1).next(), g.V().has('name', name2).next()
# Return whether there are edges that:
# - connect v1 and v2,
# - labelled 'connection',
# - have a start time that is less than or equal to <time>
# - either do not have an end time or have an end time that is greater than or equal to <time>
return g.V().has('name', name1).bothE('connection').as_('e').bothV().has('name', name2).select('e').and_(
__.has('start', P.lte(time)), # want start time to be less than or equal to <time>
__.has('end', P.gt(time)) # end time must be greater than <time>
).count().next() > 0
print(connected('RFoF', 'ADC', 5))
def set_connection(name1: str, name2: str, time: float, connection: bool) -> None:
"""
Given two vertices labelled with <name1> and <name2>, create a new connection or terminate their existing connection, based on the value of <bool>. Label with time <time>.
TODO: Add sphinx documentation if this will be implemented into the actual Python library.
"""
if connection:
# Add an edge labelled 'connection' with a start time of <time>
g.V().has('name', name1).as_("a").not_( # NEGATE
__.bothE('connection').as_('e').bothV().has('name', name2).select('e').and_(
__.has('start', P.lte(time)),
__.has('end', P.gt(time))
)
).V().has('name', name2).as_("b").addE('connection').from_("a").to("b").property('start', time).property('end', end_placeholder).iterate()
else:
# For all edges between v1 and v2 labelled 'connection' (there should only be one) that do not have an 'end' property, create an end property of <time>.
g.V().has('name', name1).bothE('connection').as_('e').bothV().has('name', name2).select('e').has('end', end_placeholder).property('end', time).iterate()
# +
# Set some connections.
rfof_adc = [(1, True), (3, False), (4, True), (5, False), (7, True)]
rfof_of = [(2, True), (3, False), (5, True), (6, False), (9, True)]
for (time, connection) in rfof_adc:
set_connection(name1='RFoF', name2='ADC', time=time, connection=connection)
for (time, connection) in rfof_of:
set_connection(name1='RFoF', name2='OF', time=time, connection=connection)
# +
# Go through each combination of element and see if they are connected at all times from 0 to 10.
import itertools
time_min = 0
time_max = 10
names = ['RFoF', 'ADC', 'OF']
combinations = list(itertools.combinations(names, 2))
for (name1, name2) in combinations:
for time in range(time_min, time_max + 1):
print(f"({name1}, {name2}) at {time}: {connected(name1, name2, time)}")
# +
# Go through each combination of element and see if they are connected at all times from 0 to 10.
import itertools
import datetime
time_min = 0
time_max = 10
attempts = 5
names = ['RFoF', 'ADC', 'OF']
combinations = list(itertools.combinations(names, 2))
now = datetime.datetime.now()
for _ in range(attempts):
for (name1, name2) in combinations:
for time in range(time_min, time_max + 1):
connected(name1, name2, time)
seconds = (datetime.datetime.now() - now).total_seconds()
print("Average time per call:", seconds / (attempts * (time_max + 1) * len(combinations)))
| demo_notebooks/connections_test_1.ipynb |