code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Measurements analysis
# +
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from mpl_toolkits.mplot3d import Axes3D
from calculations_annular_valve import MRF
# -
# ## Data collected
# ### Reading the .txt files
# +
# It takes 180 seconds
pressures = ["135kPa", "190kPa", "270kPa", "380kPa", "540kPa"]
currents = ["000mA", "050mA", "100mA", "150mA", "200mA", "250mA", "300mA", "350mA",
"400mA", "450mA", "500mA", "550mA", "600mA", "650mA", "700mA", "750mA",
"800mA", "850mA", "900mA", "950mA", "1000mA"]
measurements_types = ["V", "P"]
number_measures = 3
data = {}
# construction of the data architecture
for counter_P, P in enumerate(pressures):
data[P] = {}
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 and not (P == "540kPa" and I == "400mA"):
print("new I")
data[P][I] = {}
for m_type in measurements_types:
data[P][I][m_type] = [None]*number_measures
# Read the .txt files
for i in range(number_measures):
data[P][I][m_type][i] = np.loadtxt("Sofia_setup_20_12_2021/P_onoff_" + I + "_" + P + "_" + m_type + "_0" + str(i+1) + ".txt",
delimiter=",", skiprows=5)
# -
# ### Plotting the data
# +
# Conversion functions
def conv_P(P):
return P*202_500 - 203_200
def conv_V(V):
return (V + 0.897) / 500
def integ_V(V, t):
time_step = (t[10] - t[0])/10
integ_V = [0]
for j in range(len(t)-1):
integ_V.append(integ_V[j]+V[j]*time_step)
return integ_V
def recover_numbers(string):
return int(string[:3])
# Filter functions
def gauss_func(f, freq_cut):
return np.exp(-0.5*f**2/freq_cut**2)
def filter_data(X, t, freq_cut):
sp = np.fft.fft(X)
freq = np.fft.fftfreq(t.shape[-1])
freq = freq * (1/(t[1]-t[0]))
mask_filter = gauss_func(freq, freq_cut)
sp_mask = mask_filter * sp
return np.fft.ifft(sp_mask).real
def filter_data_pass_high(X, t, freq_cut):
sp = np.fft.fft(X)
freq = np.fft.fftfreq(t.shape[-1])
freq = freq * (1/(t[1]-t[0]))
mask_filter = 1 - gauss_func(freq, freq_cut)
sp_mask = mask_filter * sp
return np.fft.ifft(sp_mask).real
def AC_conversion(X, t):
x = X.copy()
cutoff_freq = 2
time_step = (t[10] - t[0])/10
ratio = 1/(2**(time_step*cutoff_freq))
x_AC = np.zeros(len(x))
x_AC[0] = x[0]
for i in range(len(x)-1):
x_AC[i+1] = ratio*x_AC[i] + (x[i+1]-x[i])
return x_AC
# Plotting function
def plot_data(V, P, t, labels=["Induced voltage [V]", "Differential pressure [Pa]"], t_char=None):
fig, ax = plt.subplots(1, 1, figsize=[10, 6])
axes = [ax, ax.twinx()]
colors = ('midnightblue', 'crimson')
for i in range(2):
ax, color = (axes[i], colors[i])
if i == 0: data = V
if i == 1: data = P
ax.plot(t, data, color=color)
if i == 0: ax.set_ylabel(labels[0])
if i == 1: ax.set_ylabel(labels[1])
if isinstance(t_char, list) and i==1:
ax.vlines(t_char, np.amin(P), np.amax(P), colors="black")
ax.tick_params(axis='y', colors=color)
axes[0].set_xlabel("time [s]")
plt.show()
# Getting metrics
def integ_metric(V, P, t, sigma_t):
time_step = (t[10] - t[0])/10
V_f = filter_data(V, t, 200)
P_f = filter_data(P, t, 5)
index1 = np.argmax(np.gradient(P_f))
index2 = np.argmin(np.gradient(P_f))
sigma_index = int(sigma_t / time_step)
return (np.sum(V_f[index1-sigma_index:index1+sigma_index])*time_step,
np.sum(V_f[index2-sigma_index:index2+sigma_index])*time_step)
def var_metric(V, P, t, sigma_t):
time_step = (t[10] - t[0])/10
V_f = filter_data(V, t, 200)
V_f = V_f - np.mean(V_f)
P_f = filter_data(P, t, 5)
index1 = np.argmax(np.gradient(P_f))
index2 = np.argmin(np.gradient(P_f))
sigma_index = int(sigma_t / time_step)
return (np.mean(V_f[index1-sigma_index:index1+sigma_index]**2)**0.5,
np.mean(V_f[index2-sigma_index:index2+sigma_index]**2)**0.5)
# +
# %matplotlib qt
P_string = "135kPa"
I_string = "100mA"
t = data[P_string][I_string]["V"][0][:, 0]
V = conv_V(data[P_string][I_string]["V"][0][:, 1])
V_f = filter_data(V, t, 200)
V_f_AC = AC_conversion(V_f, t)
V_f_integ = integ_V(V_f, t)
V_f_integ_AC = integ_V(V_f_AC, t)
P = conv_P(data[P_string][I_string]["P"][0][:, 1])
P = P - 34458
P_f = filter_data(P, t, 15)
t1 = np.argmax(np.gradient(P_f))
t2 = np.argmin(np.gradient(P_f))
print("t1 : ", t[t1], " t2 : ", t[t1])
#plot_data(V, P, t)
#plot_data(V, P, t)
plot_data(V_f, P_f, t, t_char=[t[t1], t[t2]])
#plot_data(V_f_AC, P_f, t)
#plot_data(V_f_integ, P_f, t)
#plot_data(V_f_integ_AC, P_f, t)
# -
np.amin(P_f)
# ### Calculating the metrics
# +
# It takes 90 seconds
pressures = ["135kPa", "190kPa", "270kPa", "380kPa"] # , "540kPa"
currents = ["000mA", "050mA", "100mA", "150mA", "200mA", "250mA", "300mA", "350mA",
"400mA", "450mA", "500mA", "550mA", "600mA", "650mA", "700mA", "750mA",
"800mA", "850mA", "900mA", "950mA", "1000mA"]
measurements_types = ["V", "P"]
number_measures = 3
metrics_types = ["integ", "var"]
data_metrics = {}
# construction of the data architecture
for counter_P, P in enumerate(pressures):
data_metrics[P] = {}
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 and not (P == "135kPa" and I == "000mA"):
# print(I)
# data_metrics[P][I] = [{}]*number_measures
data_metrics[P][I] = {}
for n in range(number_measures):
data_metrics[P][I][str(n)] = {}
for m_type in metrics_types:
if m_type == "var":
if P == "540kPa":
data_metrics[P][I][str(n)][m_type] = var_metric(data[P][I]["V"][n][:, 1]/500,
conv_P(data[P][I]["P"][n][:, 1]),
data[P][I]["V"][n][:, 0],
0.1)
else:
data_metrics[P][I][str(n)][m_type] = var_metric(conv_V(data[P][I]["V"][n][:, 1]),
conv_P(data[P][I]["P"][n][:, 1]),
data[P][I]["V"][n][:, 0],
0.1)
if m_type == "integ":
if P == "540kPa":
data_metrics[P][I][str(n)][m_type] = integ_metric(data[P][I]["V"][n][:, 1]/500,
conv_P(data[P][I]["P"][n][:, 1]),
data[P][I]["V"][n][:, 0],
0.1)
else:
data_metrics[P][I][str(n)][m_type] = integ_metric(conv_V(data[P][I]["V"][n][:, 1]),
conv_P(data[P][I]["P"][n][:, 1]),
data[P][I]["V"][n][:, 0],
0.1)
# -
# ### Plotting the 3D plot of P and I
# %matplotlib qt
# +
# NORMAL GRAPHS
m_type = "integ"
number_measures = 3
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))), name) for name, color in mcolors.TABLEAU_COLORS.items())
names = [name for hsv, name in by_hsv]
fig, ax = plt.subplots(1, 1, figsize=[10, 6])
# construction of the data architecture
for counter_P, P in enumerate(pressures):
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 and not (P == "135kPa" and I == "000mA"):
for n in range(number_measures):
line = ax.scatter(recover_numbers(I), -1000*data_metrics[P][I][str(n)][m_type][1], c=names[counter_P], alpha=0.7)
if counter_I == counter_P*2 + 1 and n == 0:
line.set_label(P)
ax.legend()
#ax.set_ylim([-0.001, 0.010])
ax.set_xlabel('Current [mA]')
ax.set_ylabel('Integral of the induced voltage [mVs]')
plt.show()
# +
# R VALUE GRAPHS
m_type = "integ"
number_measures = 3
Ns = 174
Ne = 135
g = 0.0005
S = 0.00011
mu0 = 0.000001257
phi = 0.33
alph = 2*g/(Ns*Ne*mu0*S*phi)
r0 = 0.938
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))), name) for name, color in mcolors.TABLEAU_COLORS.items())
names = [name for hsv, name in by_hsv]
fig, ax = plt.subplots(1, 1, figsize=[10, 6])
# construction of the data architecture
for counter_P, P in enumerate(pressures):
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 and not (P == "135kPa" and I == "000mA"):
for n in range(number_measures):
i = recover_numbers(I)/1000
IV = data_metrics[P][I][str(n)][m_type][1]
line = ax.scatter(recover_numbers(I),
1 - (1-r0) / (1-(1-r0)*alph*IV/i),
c=names[counter_P], alpha=0.7)
if counter_I == counter_P*2 + 1 and n == 0:
line.set_label(P)
ax.legend()
#ax.set_ylim([0.93794, 0.93802])
ax.set_xlabel('Current [mA]')
ax.set_ylabel('r value [-]')
plt.show()
# +
m_type = "integ"
number_measures = 3
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))), name) for name, color in mcolors.TABLEAU_COLORS.items())
names = [name for hsv, name in by_hsv]
fig, ax = plt.subplots(1, 1, figsize=[10, 6])
# construction of the data architecture
for counter_P, P in enumerate(pressures):
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 and I == "500mA":
for n in range(number_measures):
line = ax.scatter(recover_numbers(P), 1000*np.abs(data_metrics[P][I][str(n)][m_type][1]), c=names[counter_I%8], alpha=0.7)
ax.legend()
#ax.set_ylim([-0.001, 0.010])
ax.set_xlabel('Current [mA]')
ax.set_ylabel('Integral of the induced voltage [mVs]')
plt.show()
# + [markdown] tags=[]
# ## Reproduction of the results with model based simulations
# -
# ### Single use
def interpolation_20(X):
nb_step = 300_000
ret = np.zeros(nb_step)
for i in range(len(X)-1):
for j in range(20):
ret[i*20 + j] = X[i] * (20 - j) / 20 + X[i+1] * j / 20
for i in range(20):
ret[-(i+1)] = ret[-21]
return ret
# +
nb_step = 300_000
# Constant current
i_max = 0.5/2 # because double gap
i = np.ones(nb_step)*i_max
t = data["270kPa"]["200mA"]["P"][0][:, 0]
V = conv_V(data["270kPa"]["200mA"]["V"][0][:, 1])
V_f = filter_data(V, t, 200)
P = conv_P(data["270kPa"]["200mA"]["P"][0][:, 1])
P_f = filter_data(P, t, 5)
dP_f = P_f - P_f[2500]
dP_f = np.abs(dP_f[2500:17500])
dP_f_resample = interpolation_20(dP_f)
# +
analysis = MRF(step_t=1*10**(-6), nb_step=nb_step,
fi=0.302, d=math.pi*2*(0.0069 + 0.00025), g=0.0005, length=0.0025,
N_e=135.5, N_s=174.5, b=1, # d_eff_disp=0.0001,
i=i, dP=dP_f_resample)
analysis.run()
print("Yield stress pressure drop : ", analysis.tau[0])
print("H : ", analysis.H[0], " B : ", analysis.H[0]*1.25*10**(-6))
# +
t_sim = np.linspace(0, 1, analysis.nb_step)
t_sim = t_sim * (analysis.nb_step - 1) * analysis.step_t
fig, ax = plt.subplots(1, 1, figsize=[7, 4])
ax.plot(t_sim, analysis.v)
ax.plot(t, V_f*10)
ax.set_ylabel("Induced voltage [V]")
ax.set_xlabel("time [s]")
# -
# ### All data simulation
# +
# It takes 90 seconds
pressures = ["135kPa", "190kPa", "270kPa", "380kPa", "540kPa"]
currents = ["000mA", "050mA", "100mA", "150mA", "200mA", "250mA", "300mA", "350mA",
"400mA", "450mA", "500mA", "550mA", "600mA", "650mA", "700mA", "750mA",
"800mA", "850mA", "900mA", "950mA", "1000mA"]
measurements_types = ["V", "P"]
data_sim = {}
nb_step = 300_000
# construction of the data architecture
for counter_P, P in enumerate(pressures):
data_sim[P] = {}
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 \
and not (P == "540kPa" and I == "400mA") and not (P == "135kPa" and I == "000mA"):
print("Current current : ", I)
i_max = recover_numbers(I)/2000 # because double gap and mA
i = np.ones(nb_step)*i_max
t = data[P][I]["P"][0][:, 0]
P_signal = conv_P(data[P][I]["P"][0][:, 1])
P_signal_f = filter_data(P_signal, t, 5)
dP_f = P_signal_f - P_signal_f[2500]
dP_f = np.abs(dP_f[2500:17500])
dP_f_resample = interpolation_20(dP_f)
analysis = MRF(step_t=1*10**(-6), nb_step=nb_step,
fi=0.302, d=math.pi*2*(0.0069 + 0.00025), g=0.0005, length=0.0025,
N_e=135.5, N_s=174.5, b=0.1, # d_eff_disp=0.0001,
i=i, dP=dP_f_resample)
analysis.run()
t_sim = np.linspace(0, 1, analysis.nb_step)
t_sim = t_sim * (analysis.nb_step - 1) * analysis.step_t
print(P, I)
data_sim[P][I] = (t_sim, analysis.v)
# -
# %matplotlib qt
# +
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))), name) for name, color in mcolors.TABLEAU_COLORS.items())
names = [name for hsv, name in by_hsv]
fig, ax = plt.subplots(1, 1, figsize=[10, 6])
# construction of the data architecture
for counter_P, P in enumerate(pressures):
for counter_I ,I in enumerate(currents):
if counter_I >= counter_P*2 and counter_I <= 10 + counter_P*2 \
and not (P == "540kPa" and I == "400mA") and not (P == "135kPa" and I == "000mA"):
# print(recover_numbers(I))
line = ax.scatter(recover_numbers(I), np.sum(data_sim[P][I][1])/10**6, c=names[counter_P], alpha=0.3)
if counter_I == counter_P*2 + 1:
line.set_label(P)
ax.legend()
ax.set_xlabel('Current [mA]')
ax.set_ylabel('metric [Vs]')
plt.show()
# -
plt.plot(data_sim["540kPa"]["700mA"][1])
# ## Test impact of auto induction
# +
def L_evolution(t):
value = 0.001
factor = 3
if t < 0.25:
return value
if t > 0.75:
return value * factor
return value + (t - 0.25) * 2 * value * (factor-1)
N = 1000
dt = 1/N
V = np.ones(N)
t = np.linspace(0, 1, N)
R = 4
L = np.array(list(map(L_evolution, t)))
V_ind = np.zeros(N)
V_ind_approx = np.zeros(N)
i = np.zeros(N)
i_cte = V[0]/R
i[0] = V[0]/R
# -
for step in range(1, N):
i[step] = (V[step] + L[step-1]*i[step-1]/dt) / (R + L[step]/dt)
V_ind[step] = (L[step]*i[step] - L[step-1]*i[step-1])/dt
V_ind_approx[step] = (L[step]*i_cte - L[step-1]*i_cte)/dt
plt.plot(V_ind)
plt.plot(V_ind_approx)
# ### Figure for report
| final_measurements_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fetching Twitter and Reddit data
#
# This notebook represents the process followed to fetch Twitter and Reddit data.
BASE_PATH = 'C:/Users/crisp/Projects/Columbia/SEM_1/BDA/Final_Project'
# ### Importing the required custom modules
#
import sys
sys.path.append(BASE_PATH+'/master_code_lib')
from data_fetching import date_range, get_twitter_data, get_reddit_data
# ### Set Date Range
start_date_list = date_range('2020-01-01', '2020-01-05')
end_date_list = date_range('2020-01-02', '2020-01-06')
# Set a path to store the fetched data i.e. data_path
# ### Fetching Twitter Data
#
# Provide company name and number of API calls per data required
twitter_data_df = get_twitter_data(company_name, start_date_list, end_date_list)
# ### Fetching Reddit Data
#
# Provide company name and number of API calls per data required
#
#
reddit_data_df = get_reddit_data(company_name, start_date_list, end_date_list)
| stock_price_analysis/Data_Fetching/Data_fetching_twitter_reddit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Тестирование производительности OLAP-хранилищ Clickhouse и Vertica
#
# В рамках исследования были рассмотрены Clickhouse и Vertica.
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Запуск
# + pycharm={"name": "#%%\n"}
# !docker-compose up -d
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Загрузка тестовых данных
#
# Структура тестовой таблицы
#
#
# | ----------- | ----------- |
# | id | UUID |
# | user_id | UUID |
# | movie_id | UUID |
# | viewed_frame | Int |
# + pycharm={"name": "#%%\n"}
# !export PYTHONPATH="${PYTHONPATH}:${PWD}/../.."
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Загрузка тестовых данных в Clickhouse
# + pycharm={"name": "#%%\n"}
from multiprocessing import Pool
import tqdm as tqdm
from clickhouse_driver import Client
from utils.fake_data_gen import Row, generate_fake_data
from config import CLICKHOUSE_HOST, NUMBER_OF_BATCHES, UPLOAD_BATCH_SIZE
client = Client(CLICKHOUSE_HOST)
def upload_batch(batch):
columns = ', '.join(Row._fields)
client.execute(
f'INSERT INTO views ({columns}) VALUES',
batch
)
# + pycharm={"name": "#%%\n"}
test_data = generate_fake_data(UPLOAD_BATCH_SIZE, NUMBER_OF_BATCHES)
with Pool() as pool:
r = list(tqdm.tqdm(
pool.imap(upload_batch, test_data),
total=NUMBER_OF_BATCHES
))
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Загрузка тестовых данных в Vertica
# + pycharm={"name": "#%%\n"}
from multiprocessing import Pool
import tqdm
import vertica_python
from utils.fake_data_gen import Row, generate_fake_data
from config import NUMBER_OF_BATCHES, UPLOAD_BATCH_SIZE, VERTICA_CONNECTION_PARAMS
def upload_batch(batch):
with vertica_python.connect(**VERTICA_CONNECTION_PARAMS) as connection:
columns = ', '.join(Row._fields)
placeholders = ', '.join(['%s'] * len(Row._fields))
cursor = connection.cursor()
cursor.executemany(
f'INSERT INTO views ({columns}) VALUES ({placeholders})',
batch
)
# + pycharm={"name": "#%%\n"}
test_data = generate_fake_data(UPLOAD_BATCH_SIZE, NUMBER_OF_BATCHES)
with Pool() as pool:
r = list(tqdm.tqdm(
pool.imap(upload_batch, test_data),
total=NUMBER_OF_BATCHES
))
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Выполнение тестовых запросов
# + [markdown] pycharm={"name": "#%% md\n"}
# Использовались следующие тестовые запросы:
# + pycharm={"name": "#%%\n"}
from utils.test_queries import QUERIES
for name, query in QUERIES.items():
print(f'{name}')
print(f'{query}')
print("----------")
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Тестирование Clickhouse
# + pycharm={"name": "#%%\n"}
from clickhouse_driver import Client
from utils.test_queries import QUERIES
from utils.timer import timer
from config import BENCHMARK_ITERATIONS, CLICKHOUSE_HOST
client = Client(CLICKHOUSE_HOST)
@timer(BENCHMARK_ITERATIONS)
def execute_query(query: str):
client.execute(query)
# + pycharm={"name": "#%%\n"}
for name, query in QUERIES.items():
print(f'{name}')
execute_query(query)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Тестирование Vertica
# + pycharm={"name": "#%%\n"}
import vertica_python
from utils.test_queries import QUERIES
from utils.timer import timer
from config import BENCHMARK_ITERATIONS, VERTICA_CONNECTION_PARAMS
@timer(BENCHMARK_ITERATIONS)
def execute_query(query: str):
with vertica_python.connect(**VERTICA_CONNECTION_PARAMS) as connection:
cursor = connection.cursor()
cursor.execute(query)
# + pycharm={"name": "#%%\n"}
for name, query in QUERIES.items():
print(f'{name}')
execute_query(query)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Вывод
#
# Clickhouse показывает **лучшую производительность** во всех проведенных тестах!
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Остановка
# + pycharm={"name": "#%%\n"}
# !docker-compose down -v
# + pycharm={"name": "#%%\n"}
| services/movies_ugc/benchmarks/olap/BENCHMARKS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Activity 3: Analyzing Airbnb Data with geoplotlib
# In this last activity for geoplotlib, we will use airbnb listing data to determine the most expensive and best rated regions of accomodations in the New York area.
# We will write a custom layer with which we can switch between the price and the review score of each accomodation.
#
# In the end, we will be able to see the hostpots for the most expensive and best rated accomodations across New York.
# In theory, we should see a price increase the closer we get to the center of Manhatten. It will be very interesting to see if the ratings for the given accomodations also increase the closer we get to the center of Manhatten.
# #### Loading the dataset
# importing the necessary dependencies
import numpy as np
import pandas as pd
import geoplotlib
# **Note:**
# If your system is a little bit slower, just use `./data/airbnb_new_york_smaller.csv` which has fewer datapoints. The activity stays the same, we just cut down on the number of datapoints.
# loading the Dataset
dataset = pd.read_csv('../../datasets/airbnb_new_york_smaller.csv')
# dataset = pd.read_csv('./data/airbnb_new_york_smaller.csv')
# **Note:**
# If we import our dataset without defining the `dtypes` specifically - like we did in the chapter about geoplotlib - we will get a warning telling out the it has a mixed datatype.
# We can get rid of this warning by explicitly defining the type of the values in this column by using the `dtype` parameter.
# We will ignore this since we are only using a small subset of the data in this dataset.
# Normally you want to define the `dtypes` of each column of the used dataset to avoid errors later on.
# print the first 5 rows of the dataset
dataset.head()
# ---
# ### Data handling
# Before we start plotting our data, we want to *wrangle* our data to fit our needs.
# As with all the previous geoplitlib exercises and activites, we have to map the `latitude` and `longitude` columns to `lat` and `lon`.
#
# Considering the fact, that there might be some missing data points in the `review_scores_rating` and `price` columns, we want to fill them in with data of the same datatype.
# > This is where you would want to apply some data augmentation in real projects.
#
# The last step of our pre-processing is to create a sub-section view of our dataset that is much easier to handle and will be used for plotting.
# #### Mapping `Latitude` and `Longitude` to `lat` and `lon`
# Again, our dataset has a `latitiude` and a `longitude` column.
# As we've already discussed in the lesson about geoplotlib, we need them as `lat` and `lon`.
# mapping Latitude to lat and Longitude to lon
dataset['lat'] = dataset['latitude']
dataset['lon'] = dataset['longitude']
# **Note:**
# Geoplotlibs methods expect dataset columns `lat` and `lon` for plotting. This means your dataframe has to be tranfsormed to resemble this structure.
# #### Mapping `price` to `dollar_price` as int type
# When creating a color map that changes color based on the price of an accommodation, we need a value that can easily be compared and checked whether it's smaller or bigger than any other listing.
# Therefore we will create a new column called `dollar_price` that will hold the value of the `price` column as a float.
# convert string of type $<numbers> to <nubmers> of type float
def convert_to_float(x):
try:
value=str.replace(x[1:], ',', '')
return float(value)
except:
return 0.0
# +
# create new dollar_price column with the price as a number
# and replace the NaN values by 0 in the ratings column
dataset['price'] = dataset['price'].fillna('$0.0')
dataset['review_scores_rating'] = dataset['review_scores_rating'].fillna(0.0)
dataset['dollar_price'] = dataset['price'].apply(lambda x: convert_to_float(x))
# -
# #### Reducing the amount of columns
# This dataset has 96 columns. When working with such a huge dataset it makes sense to think about what data we really need and create a subsection of our dataset that only holds the data we need.
# Before we can do that , we'll take a look at all the columns available and an example for that column. This will help us decide what information is suitable.
# print the col name and the first entry per column
for col in dataset.columns:
print('{}\t{}'.format(col, dataset[col][0]))
# For now, we want to **only use the fields that help us build the described visualization**.
#
# Those fields are:
# - **id**
# - **latitude (as lat)**
# - **longitude (as lon)**
# - **price (in $)**
# - **review_scores_rating**
# create a subsection of the dataset with the above mentioned columns
columns=['id', 'lat', 'lon', 'dollar_price', 'review_scores_rating']
sub_data=dataset[columns]
# print the first 5 rows of the dataset
sub_data.head()
# **We are now left with only 5 of the 96 columns.**
# ---
# #### Understanding the spatial features of our dataset
# Even though we know that our data holds airbnb listings for New York city, at the moment we have no feeling about the amount, distribution, and character of our dataset.
# The simplest way to get a first glance at the data is to plot every listing with a simple dot map.
# import DataAccessObject and create a data object as an instance of that class
from geoplotlib.utils import DataAccessObject
data = DataAccessObject(sub_data)
# plotting the whole dataset with dots
geoplotlib.dot(data)
geoplotlib.show()
# This gives us a better understanding about the distribution and character of our data.
# ---
# ### Writing the custom layer to map the price and rating to a color
# The last step is to write the custom layer. Here we want to define a `ValueLayer` that extends the `BaseLayer` of geoplotlib.
# For the mentioned interactive feature we need an additional import. `pyglet` provides us with the option to act on key presses.
#
# Given the data, we want to plot each point on the map with a color that is defined by the currently selected attribute, either price or rating.
# To avoid non-descriptive output, we need to also adjust the scale of our color map. Ratings are between 0 and 100, whereas prices can be much higer. Using a linear (`lin`) scale for the ratings and a logarithmic ('log') scale for the price will give us much better insights into our data.
# The view (bounding box) of our visualization will be set to New York and a text information with the currently selected attribute will be displayed in the upper right corner.
# <img src="./assets/colorscale.png" width=500/>
# > The jet color map displays low values as cooler tones and higher values as hotter.
# In order to assign each point a different color, we simply paint each point separately. This is definitely not the most efficient solution, but it wills suite for now.
# We will need the following instance variables:
# - self.data that holds the dataset
# - self.display that holds the currently selected attribute name
# - self.painter holds an instance of the BatchPainter class
# - self.view holds the BoundingBox
# - self.cmap holds a color map with the `jet` color schmema, alpha of 255 and 100 levels
#
# Inside the `invalidate` method that holds the logic of projection the data to points on the map, we have to switch between the `lin` and `log` scales, depending on the attribute that is currently selected.
# The color is then determined by "placing" the value between 0/1 and the maximum (`max_val`) value which also has to be taken from the dataset based on what attribute currently is display.
# +
# custom layer creation
import pyglet
import geoplotlib
from geoplotlib.layers import BaseLayer
from geoplotlib.core import BatchPainter
from geoplotlib.colors import ColorMap
from geoplotlib.utils import BoundingBox
class ValueLayer(BaseLayer):
def __init__(self, dataset, bbox=BoundingBox.WORLD):
# initialize instance variables
self.data = dataset
self.display = 'dollar_price'
self.painter = BatchPainter()
self.view = bbox
self.cmap = ColorMap('jet', alpha=255, levels=100)
def invalidate(self, proj):
# paint every point with a color that represents the currently selected attributes value
self.painter = BatchPainter()
max_val = max(self.data[self.display])
scale = 'log' if self.display == 'dollar_price' else 'lin'
for index, id in enumerate(self.data['id']):
# log scale can't start at 0, must be 1
min_val = max(self.data[self.display][index], 1)
color = self.cmap.to_color(min_val, max_val, scale)
self.painter.set_color(color)
lat, lon = self.data['lon'][index], self.data['lat'][index]
x, y = proj.lonlat_to_screen(lat, lon)
self.painter.points(x, y, 5)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
# display the ui manager info
ui_manager.info('Use left and right to switch between the displaying of price and ratings. Currently displaying: {}'.format(self.display))
self.painter.batch_draw()
def on_key_release(self, key, modifiers):
# check if left or right keys are pressed to switch to other attribute
if key == pyglet.window.key.LEFT or key == pyglet.window.key.RIGHT:
self.display = 'dollar_price' if self.display != 'dollar_price' else 'review_scores_rating'
return True
return False
def bbox(self):
# bounding box that gets used when layer is created
return self.view
# -
# Since our dataset only contains data from New York, we want to set the view to New York in the beginning.
# Therefore we need an instance of the `BoundingBox` class with the given parameters.
#
# In addition to a custom `BoundingBox`, we will use the `darkmatter` tile provider we have looked at in lesson 5.
# +
# bounding box for our view on New York
from geoplotlib.utils import BoundingBox
ny_bbox = BoundingBox(north=40.897994, west=-73.999040, south=40.595581, east=-73.95040)
# -
# displaying our custom layer using add_layer
geoplotlib.tiles_provider('darkmatter')
geoplotlib.add_layer(ValueLayer(data, bbox=ny_bbox))
geoplotlib.show()
| Chapter07/Activity7.03/Activity7.03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from sklearn import cross_validation, datasets
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
def write_answer(filename, answer):
with open(filename, "w") as fout:
fout.write(str(answer))
# +
digits = datasets.load_digits()
breast_cancer = datasets.load_breast_cancer()
print(digits.data[:1]) # Целые числа
print(breast_cancer.data[:1]) # Вещественные числа
print
for ds, ds_name in [(digits, "digits"), (breast_cancer, "breast cancer")]:
X = ds.data
y = ds.target
print(ds_name)
for estimator in [BernoulliNB(), MultinomialNB(), GaussianNB()]:
scoring = cross_validation.cross_val_score(estimator, X, y)
print("%s\t%0.8f" % (type(estimator).__name__, scoring.mean()))
print
# -
write_answer("naive_bayes_a1.txt", 0.93674928)
write_answer("naive_bayes_a2.txt", 0.87087715)
write_answer("naive_bayes_a3.txt", "3 4")
| C2W5/naive_bayes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
from bigdl.util.common import *
from image import *
import cv2
import numpy as np
from IPython.display import Image, display
JavaCreator.set_creator_class("com.intel.analytics.zoo.transform.vision.pythonapi.PythonVisionTransform")
init_engine()
# +
test_image = '/home/jxy/data/testdata/test/000019.jpg'
def create_image_feature(path):
image = cv2.imread(path)
return ImageFeature(image)
def transform_display(transformer, image_feature):
out = transformer.transform(image_feature)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False))
display(Image(filename='/tmp/tmp.jpg'))
# -
image_feature = create_image_feature(test_image)
color = ColorJitter(random_order_prob=1.0, shuffle=True)
transform_display(color, image_feature)
image_feature = create_image_feature(test_image)
resize = Resize(200, 200, 1)
transform_display(resize, image_feature)
# +
# Normalized Roi
image_feature = create_image_feature(test_image)
crop = FixedCrop(0.0, 0.0, 0.5, 1.0)
transform_display(crop, image_feature)
# Non-normalized Roi
image_feature = create_image_feature(test_image)
crop = FixedCrop(0.0, 0.0, 200.0, 200., False)
transform_display(crop, image_feature)
# -
image_feature = create_image_feature(test_image)
crop = CenterCrop(200, 200)
transform_display(crop, image_feature)
image_feature = create_image_feature(test_image)
crop = RandomCrop(200, 200)
transform_display(crop, image_feature)
image_feature = create_image_feature(test_image)
expand = Expand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
transform_display(expand, image_feature)
image_feature = create_image_feature(test_image)
transformer = HFlip()
transform_display(transformer, image_feature)
transformer = Pipeline([ColorJitter(), HFlip(), Resize(200, 200, 1)])
image_feature = create_image_feature(test_image)
transform_display(transformer, image_feature)
transformer = RandomTransformer(HFlip(), 0.5)
image_feature = create_image_feature(test_image)
transform_display(transformer, image_feature)
image_feature = create_image_feature(test_image)
print image_feature
print image_feature.to_sample()
print image_feature.get_image()
print image_feature.get_label()
# +
folder = "/home/jxy/data/testdata/test"
imageFiles = os.listdir(folder)
images = []
labels = []
for f in imageFiles:
image = cv2.imread(folder + '/' + f)
images.append(image)
labels.append(np.array([2]))
img_rdd = sc.parallelize(images)
labels_rdd = sc.parallelize(labels)
transformer = Pipeline([ColorJitter(), HFlip(), Resize(200, 200, 1), MatToFloats(200, 200)])
# create ImageFrame from image ndarray rdd
image_frame = ImageFrame(img_rdd, labels_rdd)
# apply transformer to image_frame
image_frame = transformer(image_frame)
print type(image_frame)
# image_frame to sample
sample_rdd = image_frame.to_sample()
print sample_rdd.take(1)
# get label rdd
label_rdd = image_frame.get_label()
print 'label ', label_rdd.take(1)
# get image rdd
image_rdd = image_frame.get_image()
print image_rdd.take(1)
color = ColorJitter()
hflip = HFlip()
resize = Resize(200, 200, 1)
to_float = MatToFloats(200, 200)
image_frame = ImageFrame(img_rdd)
image_frame = color(image_frame)
image_frame = hflip(image_frame)
image_frame = resize(image_frame)
image_frame = to_float(image_frame)
print type(image_frame)
sample = image_frame.to_sample()
print sample.count()
# +
folder = "/home/jxy/data/testdata/test"
imageFiles = os.listdir(folder)
images = []
for f in imageFiles:
image = cv2.imread(folder + '/' + f)
images.append(image)
img_rdd = sc.parallelize(images)
color = ColorJitter()
hflip = HFlip()
resize = Resize(200, 200, 1)
to_float = MatToFloats(200, 200)
image_frame = ImageFrame(img_rdd)
image_frame = color(image_frame)
image_frame = hflip(image_frame)
image_frame = resize(image_frame)
image_frame = to_float(image_frame)
print type(image_frame)
sample = image_frame.to_sample()
print sample.count()
# -
| apps/feature/image_augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import var
import predict as pre
import utils
print('初始化变量...')
names = ['user_id', 'item_id', 'rating', 'timestamp']
direct = 'dataset/ml-100k/'
trainingset_files = (direct + name for name in ('u1.base', 'u2.base', 'u3.base', 'u4.base', 'u5.base'))
testset_files = (direct + name for name in ('u1.test', 'u2.test', 'u3.test', 'u4.test', 'u5.test'))
if __name__ == '__main__':
i = 0
nums = 5
alpha_set = [0, 0.1, 0.3, 0.5, 0.6, 0.65, 0.7, 0.75, 0.8, 0.9, 1]
rmse_blendCF = {alpha:[] for alpha in alpha_set}
rmse_blendCF_train = {alpha:[] for alpha in alpha_set}
for trainingset_file, testset_file in zip(trainingset_files, testset_files):
i += 1
print('------ 第%d/%d组样本 ------' % (i, nums))
df = pd.read_csv(trainingset_file, sep='\t', names=names)
var.ratings = np.zeros((var.n_users, var.n_items))
print('载入训练集' + trainingset_file)
for row in df.itertuples():
var.ratings[row[1]-1, row[2]-1] = row[3]
print('计算训练集各项统计数据...')
utils.cal_mean()
print('计算相似度矩阵...')
var.user_similarity = utils.cal_similarity(kind='user')
var.item_similarity = utils.cal_similarity(kind='item')
print('计算完成')
predictions_blendCF_train = {alpha:[] for alpha in alpha_set}
targets = []
print('在训练集上测试...')
for row in df.itertuples():
user, item, actual = row[1]-1, row[2]-1, row[3]
for alpha in alpha_set:
predictions_blendCF_train[alpha].append(pre.predict_blend(user, item, alpha=alpha))
targets.append(actual)
for alpha in alpha_set:
rmse_blendCF_train[alpha].append(utils.rmse(np.array(predictions_blendCF_train[alpha]), np.array(targets)))
print('载入测试集' + testset_file)
test_df = pd.read_csv(testset_file, sep='\t', names=names)
predictions_blendCF = {alpha:[] for alpha in alpha_set}
targets = []
print('测试集规模为 %d' % len(test_df))
print('在测试集上测试...')
for row in test_df.itertuples():
user, item, actual = row[1]-1, row[2]-1, row[3]
for alpha in alpha_set:
predictions_blendCF[alpha].append(pre.predict_blend(user, item, alpha=alpha))
targets.append(actual)
for alpha in alpha_set:
rmse_blendCF[alpha].append(utils.rmse(np.array(predictions_blendCF[alpha]), np.array(targets)))
print('测试完成')
print('------ 测试结果 ------')
print('融合模型中,不同alpha在训练集上的RMSE:')
for alpha in sorted(alpha_set):
print('alpha = %.2f: %.4f' % (alpha, np.mean(rmse_blendCF_train[alpha])))
print('融合模型中,不同alpha在测试集上的RMSE:')
for alpha in sorted(alpha_set):
print('alpha = %.2f: %.4f' % (alpha, np.mean(rmse_blendCF[alpha])))
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# alpha_set
rmse_blend_testlist = [np.mean(rmse_blendCF[alpha]) for alpha in alpha_set]
rmse_blend_trainlist = [np.mean(rmse_blendCF_train[alpha]) for alpha in alpha_set]
pal = sns.color_palette("Set2", 2)
plt.figure(figsize=(8, 8))
plt.plot(alpha_set, rmse_blend_trainlist, c=pal[0], label='blend train', alpha=0.5, linewidth=5)
plt.plot(alpha_set, rmse_blend_testlist, c=pal[0], label='blend test', linewidth=5)
plt.legend(loc='best', fontsize=18)
plt.xticks(fontsize=16);
plt.yticks(fontsize=16);
plt.xlabel('alpha', fontsize=25);
plt.ylabel('RMSE', fontsize=25);
| alpha.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: luce_vm
# language: python
# name: luce_vm
# ---
# ### Deploy and Interact with Anthony's contract
# The aim of this notebook is to compile, deploy and interact with Anthony's Smart Contract
# ### Compile contract from file
# + inputHidden=false outputHidden=false
# Import libraries
import json
import web3
from web3 import Web3
from solcx import compile_source
from web3.contract import ConciseContract
# -
# Read in LUCE contract code
with open('./data/anthony_dev.sol', 'r') as file:
contract_source_code = file.read()
# Compile & Store Compiled source code
compiled_sol = compile_source(contract_source_code)
# +
#compiled_sol
# -
# Extract full interface as dict from compiled contract
contract_interface = compiled_sol['<stdin>:LinkedList']
# +
#contract_interface
# -
# Extract abi and bytecode
abi = contract_interface['abi']
bytecode = contract_interface['bin']
# ### Deploy
# Use Ganache for web3 instance
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:8545"))
# +
# Use local Ganache GUI on macOS
#w3 = Web3(Web3.HTTPProvider("HTTP://192.168.72.1:7545"))
# -
# Set pre-funded ganache account #0 as sender
w3.eth.defaultAccount = w3.eth.accounts[0]
w3.eth.accounts[0]
# The default `eth.defaultAccount` address is used as the default "from" property for transaction dictionaries if no other explicit "from" property is specified.
# Create contract blueprint
Contract = w3.eth.contract(abi=abi, bytecode=bytecode)
# Submit the transaction that deploys the contract
tx_hash = Contract.constructor().transact()
tx_hash
# ### Obtain Transcation Receipt
# Wait for the transaction to be mined, and get the transaction receipt
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
# Obtain address of freshly deployed contract
tx_receipt.contractAddress
# ### Interact with contract
# Create python instance of deployed contract
anthony_contract = w3.eth.contract(
address=tx_receipt.contractAddress,
abi=contract_interface['abi'],
)
# Extract default accounts created by ganache
accounts = w3.eth.accounts
accounts
# **UploadData**
# ```solidity
# function UploadData(
# address _address1,
# bool _OpenToGeneralResearch,
# bool _OpenToHMBResearch,
# bool _OpenForClinicalPurpose,
# bool _OpenToProfit
# )
# ```
# +
test_address = '0x92D44e8579620F2Db88A12E70FE38e8CDB3541BA'
test_address = accounts[0]
tx_hash = anthony_contract.functions.UploadData(test_address, True, False, False, False).transact()
# -
tx_hash
# **giveResearchPurpose**
# ```solidity
# function giveResearchPurpose(
# address _address2,
# bool _MethodsDevelopment,
# bool _ReferenceOrControlMaterial,
# bool _Populations,
# bool _Ancestry )
# ```
# +
test_address2 = accounts[1]
tx_hash = anthony_contract.functions.giveResearchPurpose(test_address2, True, True, True, True).transact()
# -
| LUCE-API/luce_vm/jupyter/x) Anthony Contract Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 2.01
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("circles.csv")
plt.scatter(data.iloc[:,0], data.iloc[:,1])
plt.show()
plt.hist(data.iloc[:,0])
plt.show()
# ## Exercise 2.02
from sklearn.cluster import KMeans
ideal_k = []
for i in range(1,21):
est_kmeans = KMeans(n_clusters=i, random_state=0)
est_kmeans.fit(data)
ideal_k.append([i,est_kmeans.inertia_])
ideal_k = np.array(ideal_k)
plt.plot(ideal_k[:,0],ideal_k[:,1])
plt.show()
# A number of cluster of 5 was selected
est_kmeans = KMeans(n_clusters=5, random_state=0)
est_kmeans.fit(data)
pred_kmeans = est_kmeans.predict(data)
plt.scatter(data.iloc[:,0], data.iloc[:,1], c=pred_kmeans)
plt.show()
| Chapter02/Exercise2.02/Exercise2_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="0s5qDoyasFLH"
# <a href="https://colab.research.google.com/github/https-deeplearning-ai/tensorflow-1-public/blob/master/C3/W1/ungraded_labs/C3_W1_Lab_3_sarcasm.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="SdNGfEo2u-r7"
# # Ungraded Lab: Tokenizing the Sarcasm Dataset
#
# In this lab, you will be applying what you've learned in the past two exercises to preprocess the [News Headlines Dataset for Sarcasm Detection](https://www.kaggle.com/rmisra/news-headlines-dataset-for-sarcasm-detection/home). This contains news headlines which are labeled as sarcastic or not. You will revisit this dataset in later labs so it is good to be acquainted with it now.
# + [markdown] id="Twhyfjg0xTkg"
# ## Download and inspect the dataset
#
# First, you will fetch the dataset and preview some of its elements.
# + id="33W129a7xgoJ" colab={"base_uri": "https://localhost:8080/"} outputId="27f36810-1f7a-4322-b1e5-59293345ca3f"
# Download the dataset
# !wget https://storage.googleapis.com/tensorflow-1-public/course3/sarcasm.json
# + [markdown] id="zJHdzh9FyWa2"
# The dataset is saved as a [JSON](https://www.json.org/json-en.html) file and you can use Python's [`json`](https://docs.python.org/3/library/json.html) module to load it into your workspace. The cell below unpacks the JSON file into a list.
# + id="OkaBMeNDwMel"
import json
# Load the JSON file
with open("./sarcasm.json", 'r') as f:
datastore = json.load(f)
# + [markdown] id="D2aSBvJVzRNV"
# You can inspect a few of the elements in the list. You will notice that each element consists of a dictionary with a URL link, the actual headline, and a label named `is_sarcastic`. Printed below are two elements with contrasting labels.
# + id="RiiFcWU2xnMJ" colab={"base_uri": "https://localhost:8080/"} outputId="0597ff83-05a6-4d7d-d2d3-faa09929649d"
# Non-sarcastic headline
print(datastore[0])
# Sarcastic headline
print(datastore[20000])
# + [markdown] id="dPuH0bBiz8LJ"
# With that, you can collect all urls, headlines, and labels for easier processing when using the tokenizer. For this lab, you will only need the headlines but we included the code to collect the URLs and labels as well.
# + id="9pxLUQJCxkNB"
# Initialize lists
sentences = []
labels = []
urls = []
# Append elements in the dictionaries into each list
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
urls.append(item['article_link'])
# + [markdown] id="lBHSXJ5V0qqK"
# ## Preprocessing the headlines
#
# You can convert the `sentences` list above into padded sequences by using the same methods you've been using in the past exercises. The cell below generates the `word_index` dictionary and generates the list of padded sequences for each of the 26,709 headlines.
# + id="5OSTw3uJuvmY" colab={"base_uri": "https://localhost:8080/"} outputId="6090c089-5af7-4aeb-e925-dc7e9813f296"
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Initialize the Tokenizer class
tokenizer = Tokenizer(oov_token="<OOV>")
# Generate the word index dictionary
tokenizer.fit_on_texts(sentences)
# Print the length of the word index
word_index = tokenizer.word_index
print(f'number of words in word_index: {len(word_index)}')
# Print the word index
print(f'word_index: {word_index}')
print()
# Generate and pad the sequences
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding='post')
# Print a sample headline
index = 3
print(f'sample headline: {sentences[index]}')
print(f'padded sequence: {padded[index]}')
print()
# Print dimensions of padded sequences
print(f'shape of padded sequences: {padded.shape}')
# + [markdown] id="4wyLF5T036W8"
# This concludes the short demo on using text data preprocessing APIs on a relatively large dataset. Next week, you will start building models that can be trained on these output sequences. See you there!
| Natural Language Processing Tensorflow/C3_W1_Lab_3_sarcasm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Document retrieval from wikipedia data
# ## Fire up GraphLab Create
# (See [Getting Started with SFrames](../Week%201/Getting%20Started%20with%20SFrames.ipynb) for setup instructions)
# ## To-Dos
# * Find out what SFrame.stack can do.
# * Find out Python dictionary's internal implementation
# * Learn how to do Clustering. There isn't any exercise here.
# * Learn additional features about "distance" parameter
# * Normalize word_count approach and see how it improves the result
# ## Quiz Starts Here
import graphlab
# Limit number of worker processes. This preserves system memory, which prevents hosted notebooks from crashing.
graphlab.set_runtime_config('GRAPHLAB_DEFAULT_NUM_PYLAMBDA_WORKERS', 4)
# +
#the”, “on”, “and”, “go”, “round”, “bus”, and “wheels”
words = graphlab.SArray(["the wheels on the bus go round and round"])
word_counts = graphlab.text_analytics.count_words(words)
answer = 2111211
word_counts
# -
import numpy
# +
m1 = numpy.array("1 3 2 1 2 1 1".split(), dtype=int)
m2 = numpy.array("7 0 2 1 0 0 1".split(), dtype=int)
m1.dot(m2)
# +
m3 = numpy.array("1 7 0 0 2 0 1".split(), dtype=int)
m1.dot(m3) # CORRECT
# +
m4 = numpy.array("1 0 0 0 7 1 2".split(), dtype=int)
m1.dot(m4)
# +
m5 = numpy.array("0 2 0 0 7 1 1".split(), dtype=int)
m1.dot(m5)
# -
# # Load some text data - from wikipedia, pages on people
people = graphlab.SFrame('people_wiki.gl/')
# Data contains: link to wikipedia article, name of person, text of article.
people.head()
len(people)
# # Explore the dataset and checkout the text it contains
#
# ## Exploring the entry for president Obama
obama = people[people['name'] == '<NAME>']
obama
obama['text']
# ## Exploring the entry for actor <NAME>
clooney = people[people['name'] == '<NAME>']
clooney['text']
# # Get the word counts for Obama article
obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])
print obama['word_count']
# ## Sort the word counts for the Obama article
# ### Turning dictonary of word counts into a table
obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word','count'])
type(obama['word_count'])
type(obama[['word_count']])
type(obama)
# Take word_count column and turn it into an SFrame
obama[['word_count']]
# The entire SFrame
obama
# ### Sorting the word counts to show most common words at the top
obama_word_count_table.head()
obama_word_count_table.sort('count',ascending=False)
# Most common words include uninformative words like "the", "in", "and",...
# # Compute TF-IDF for the corpus
#
# To give more weight to informative words, we weigh them by their TF-IDF scores.
# +
# Do word count for all articles
people['word_count'] = graphlab.text_analytics.count_words(people['text'])
people.head()
# +
# Use word_count in all articles to implement TF-IDF normalizer
tfidf = graphlab.text_analytics.tf_idf(people['word_count'])
# Earlier versions of GraphLab Create returned an SFrame rather than a single SArray
# This notebook was created using Graphlab Create version 1.7.1
if graphlab.version <= '1.6.1':
tfidf = tfidf['docs']
tfidf
# -
people['tfidf'] = tfidf
# ## Convert all TF-IDF into Table from Dictionary
# +
# probably not because it will create nested tables
# -
# ## Examine the TF-IDF for the Obama article
obama = people[people['name'] == '<NAME>']
obama[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
# Words with highest TF-IDF are much more informative.
# # Manually compute distances between a few people
#
# Let's manually compare the distances between the articles for a few famous people.
clinton = people[people['name'] == '<NAME>']
beckham = people[people['name'] == '<NAME>']
# ## Is Obama closer to Clinton than to Beckham?
#
# We will use cosine distance, which is given by
#
# ### (1-cosine_similarity)
# ### The lower the number, the higher the similarity
#
# and find that the article about president Obama is closer to the one about former president Clinton than that of footballer <NAME>.
graphlab.distances.cosine(obama['tfidf'][0],clinton['tfidf'][0])
graphlab.distances.cosine(obama['tfidf'][0],beckham['tfidf'][0])
# # Build a nearest neighbor model for document retrieval
#
# We now create a nearest-neighbors model and apply it to document retrieval.
knn_model = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name')
# # Applying the nearest-neighbors model for retrieval
# ## Who is closest to Obama?
knn_model.query(obama)
# As we can see, president Obama's article is closest to the one about his vice-president Biden, and those of other politicians.
# ## Other examples of document retrieval
swift = people[people['name'] == '<NAME>']
knn_model.query(swift)
jolie = people[people['name'] == '<NAME>']
knn_model.query(jolie)
arnold = people[people['name'] == '<NAME>']
knn_model.query(arnold)
# ## Homework Starts Here
elton_john = people[people['name'] == '<NAME>']
elton_john_tf_idf = elton_john[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
elton_john_word_count = elton_john[['word_count']].stack('word_count',new_column_name=['word','count']).sort('count',ascending=False)
elton_john_tf_idf
elton_john_word_count
victoria_beckham = people[people['name'] == '<NAME>']
graphlab.distances.cosine(elton_john['tfidf'][0],victoria_beckham['tfidf'][0])
paul_mccartney = people[people['name'] == '<NAME>']
graphlab.distances.cosine(elton_john['tfidf'][0],paul_mccartney['tfidf'][0])
billy_joel = people[people['name'] == '<NAME>']
cliff_richard = people[people['name'] == '<NAME>']
roger_daltrey = people[people['name'] == '<NAME>']
george_bush = people[people['name'] == '<NAME>']
rod_steward = people[people['name'] == '<NAME>']
tommy_haas = people[people['name'] == '<NAME>']
stephen_dow_beckham = people[people['name'] == '<NAME>']
louis_molloy = people[people['name'] == '<NAME>']
adrienne_corri = people[people['name'] == '<NAME>']
# +
mary_fitzgerald = people[people['name'] == '<NAME> (artist)']
mary_fitzgerald
# -
carrie = people[people['name'] == '<NAME>']
carrie
david = people[people['name'] == '<NAME>']
david
# +
caroline = people[people['name'] == '<NAME>']
caroline
# +
mel = people[people['name'] >= '<NAME>']
mel = mel[mel['name'] <= '<NAME>']
mel
# -
graphlab.distances.cosine(elton_john['word_count'][0],billy_joel['word_count'][0])
graphlab.distances.cosine(elton_john['word_count'][0],cliff_richard['word_count'][0])
graphlab.distances.cosine(elton_john['word_count'][0],roger_daltrey['word_count'][0])
graphlab.distances.cosine(elton_john['tfidf'][0],roger_daltrey['tfidf'][0])
graphlab.distances.cosine(elton_john['tfidf'][0],rod_steward['tfidf'][0])
graphlab.distances.cosine(elton_john['tfidf'][0],tommy_haas['tfidf'][0])
graphlab.distances.cosine(victoria_beckham['word_count'][0],mary_fitzgerald['word_count'][0])
graphlab.distances.cosine(victoria_beckham['word_count'][0],stephen_dow_beckham['word_count'][0])
graphlab.distances.cosine(victoria_beckham['word_count'][0],louis_molloy['word_count'][0])
graphlab.distances.cosine(victoria_beckham['word_count'][0],adrienne_corri['word_count'][0])
graphlab.distances.cosine(victoria_beckham['tfidf'][0],caroline['tfidf'][0])
graphlab.distances.cosine(victoria_beckham['tfidf'][0],david['tfidf'][0])
graphlab.distances.cosine(victoria_beckham['tfidf'][0],carrie['tfidf'][0])
graphlab.distances.cosine(elton_john['tfidf'][0],paul_mccartney['tfidf'][0])
knn_model_word_count = graphlab.nearest_neighbors.create(people,features=['word_count'],label='name', distance='cosine')
knn_model_tfidf = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name', distance='cosine')
knn_model_word_count.query(victoria_beckham)
knn_model_tfidf.query(victoria_beckham)
knn_model_tfidf.query(elton_john)
knn_model_word_count.query(elton_john)
| homework/course.1.homework.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''mvabyR'': conda)'
# language: python
# name: python37664bitmvabyrconda8543ffede4714687a536e11de6ad4416
# ---
# # 第2章 Rによるデータハンドリングを学びたい <br>---アンケートデータとID-POSデータのハンドリング
# 目次を見た感じだと1章と比較してRの手法メインの章かなという印象
# ## 手法の概要
# 予想という感じ。pandasで似たようなことをやっていこう。
# dataframeでの対応をまとめる
# str | dtypes
import pandas as pd
import numpy as np
# ## 変数の型
# ### 関数strによるデータ構造の把握
df_jhk = pd.read_csv("../data/第02章/人事評価結果.csv", encoding="shift-jis")
# +
# df_jhk[:5]
# -
df_jhk.dtypes
# python では、strがobjectになっている。\
# Rに関して言うと、Factorは嫌いというか使えない。
df_jhk["総合平均"] = (df_jhk["昨年総合"] + df_jhk["総合"])/2
df_jhk.dtypes
# 総合平均がfloatで足されたのがわかる。
# ### 関数factorの使いどころ
S = pd.Series([1,5,2,10,8,2,1,4,3,3])
# すこし強引にやる
pd.cut(S, range(-1,10)).value_counts(sort = False)
# ## 観測対象の情報の抽出
# queryを使えるとかなり便利にDataFrameを使用できる。
mdat = df_jhk.query("性別 == 'M'")
# mdat.head()
mdat2 = df_jhk.query("性別 == 'F'")
# mdat.head()
# いろいろな方法でやってみる
# +
cope1 = df_jhk.query("協調性 < 50") # < 50
cope2 = df_jhk.where( df_jhk["協調性"] <= 50).dropna()# <= 50
cope3 = df_jhk[df_jhk["協調性"] > 50]# > 50
cope4 = df_jhk.query("協調性 >= 50") # >=50
# -
max(cope2["協調性"])
# ちなみにこんなこともできる
cope5 = df_jhk.query("75 > 協調性 > 50")
min(cope5["協調性"]), max(cope5["協調性"])
# +
m1 = df_jhk.query("(性別 == 'M') | (年代 == '熟練')") # M and 熟練
m2 = df_jhk.where( (df_jhk["技能"] >= 50) & (df_jhk["性別"] == "M") & (df_jhk["年代"] == "熟練")).dropna()#
cope3 = df_jhk[(df_jhk["技能"] >= 50) & (df_jhk["性別"] == "M") | (df_jhk["年代"] == "熟練")]
# +
# m2
# -
# ## 欠損値の処理
df_na = pd.read_csv("../data/第02章/欠損データ.csv")
df_na
df_na2 = pd.read_csv("../data/第02章/欠損データ2.csv")
df_na2
# Pythonには、na.stringsのようなものはなさそう。\
# ただ条件に合うものを埋めるのは、where!\
# 実際は、合わないものを埋めるので、条件は、999以下
kesson = df_na2.where( ( 999 > df_na2), other = np.nan )
kesson
# 削除は、dropna
kanzen = kesson.dropna(axis=0)
kanzen
df_na.isnull().any(axis=0)
# ### 一回欠損値の確認の方法(Python用番外編)
df_na.info()
df_na.isnull()
df_na.isnull().any(axis = 0)
#
# ## ソート
score = [1,5,2,10,8,2,1,4,3,3]
sorted(score)
df_s = pd.read_csv("../data/第02章/ソートデータ.csv", encoding="shift-jis")
df_s
df_s.sort_values(by = "協調性")
# indexがpythonは0、Rは1始まりなので、少しずれたように見える
df_s.sort_values(by = ["協調性", "総合"])
# ## マージ
datA = pd.read_csv("../data/第02章/マージデータA.csv", encoding="shift-jis")
datB = pd.read_csv("../data/第02章/マージデータB.csv", encoding="shift-jis")
datA
datB
pd.merge(datA, datB, on = "ID", how = "left")
# ## 数値の置き換え
vec = [2,3,4,5,1,2,3,1,2]
mat = np.array(vec).reshape(3,-1).T
# .Tは転置
mat
# pythonは0始まりなので、Rの結果引く1になっている
loc2 = np.where(mat == 2)
loc2
loc4 = np.where(mat == 4)
mat[loc2] = 4
mat[loc4] = 2
mat
# ## 固定長データのハンドリング
# テキストデータの読み込み
with open("../data/第02章/項目反応固定長.txt") as f:
itermap = f.readlines()
itermap
# 改行コード(\n)も残ってしまうので、replaceする
itermap = [i.replace('\n','') for i in itermap]
itermap
row0 = [[i[:6],i[6],i[7], i[8], i[9], i[10]] for i in itermap]
# 少し強引か
row0
# *はリストを展開する
df_row = pd.DataFrame(row0, columns=["ID",*[f"問{i+1}"for i in range(5)] ])
df_row
with open("../data/第02章/key.txt") as f:
answer = f.readlines()
answer
answer = [a.replace("\n", "") for a in answer]
key = answer[1:]
key_tile = np.tile(key, (len(df_row), 1 ))
key_tile
# pythonは特殊で、True,Falseは元がintである。\
# 少し雑な言い方(笑)
(df_row.iloc[:, 1:] == key_tile).astype(int)
# pythonでboolenがintといったおもしろい例
True + True
True*4
# なんてこともできる
# ## ID-POSデータの読み込み
import glob
fname = glob.glob("../data/第02章/POSフォルダ/*")
fname
pos0 = pd.read_csv(fname[0], encoding="shift-jis")
pos0[:5]
# ### コラム Rの外部エディタとしての"NotePad"
# 個人的に、Notepadは使ったことないが、RStadioはかなり使いやすい。データ解析をする授業で、レポートを書きながら実行して、そのままwordに変換できたのでだれよりも早く終われたので大好き。
pos0.dtypes
list_df = [pd.read_csv(f, encoding="shift-jis") for f in fname]
# + active=""
# つなげるだけなら、リストを作成する必要もない
# -
posall = pd.DataFrame()
for df_one in list_df:
posall = pd.concat([posall, df_one])
posall.describe()
# ## ID-POSデータにおけるソート
posall.sort_values(by = ["顧客ID", "購買日", "購買時間"])[:6]
# ## RFM分析
R = pd.pivot_table(posall, index="顧客ID", values = "購買日", aggfunc=np.max)
F = posall["顧客ID"].value_counts()
M = pd.pivot_table(posall, index="顧客ID", values = "購買金額", aggfunc=np.sum)
rfm = pd.concat([R, F, M], axis = 1)
rfm.columns = ["R", "F","M"]
rfm[:5]
rfm2 = rfm.sort_values(by=["R", "F", "M"], ascending=False)
rfm2[:7]
# ## ID-POSデータにおけるクロス集計表
t1 = pd.crosstab(posall["顧客ID"], posall["商品カテゴリ"])
t1[:5]
t1 = pd.crosstab(posall["顧客ID"], posall["商品カテゴリ"])
# +
# t1
# -
t2 = pd.crosstab([posall["商品カテゴリ"],posall["店舗"]], columns=posall["顧客ID"])
# +
# t2
# -
t3 = pd.crosstab([posall["商品カテゴリ"],posall["購買日"]], columns=posall["顧客ID"])
t3
t1.shape
storeA = posall.query("店舗 == 'A'")
pd.crosstab(storeA["顧客ID"],storeA["商品カテゴリ"]).shape
# 35行になっているので見てみると、ID00036が店舗Aの値が0になっている。\
# queryだとデータがないと削除されてしまうで、クロス集計表を作ってそのなかで顧客IDと商品カテゴリをとる
cross = pd.crosstab(posall["顧客ID"],[posall["商品カテゴリ"], posall["店舗"]])
# columnsが二段階になっているmultiindexになっているので、少し特殊なやり方、slice(None)は一階層目をすべて選択することになっている。
cross.loc[:, (slice(None), 'A')].shape
# ## 顧客ID 別に月ごとの購買金額を求める
cid = posall.顧客ID
posall.columns
buym = posall.購買日.map(lambda x : f"{str(x)[:6]}" )
pd.pivot_table(posall, "購買金額",[cid,buym], np.sum)
# いい方法がわからなかったが、日当たりの合計になってしまった。\
# 月ごとにする
pd.pivot_table(posall, "購買金額",[cid,buym], np.sum).sum(axis=1)
# ## 顧客ID 別に商品名を取得する---自作関数を利用する
# for t in t1
res2 = t1.apply(lambda x : t1.columns[x > 1], axis=1)
# ソートのされ方が異なっているので表示が若干異なる
res2.head(2)
# ## 顧客ID ごとに来店感覚の分布を描画・要約する
from datetime import datetime
posall["ndate"] = posall["購買日"].map(lambda date : datetime((date)//10000, (date - (20130108//10000)*10000)//100,((date)%100)))
# なんだか、numpyの日付の型は、計算するとなの秒になるらしい
posall.query("顧客ID == 'ID00001'")["ndate"][:5]
np.diff(posall.query("顧客ID == 'ID00001'")["ndate"])[:5]
np.diff(posall.query("顧客ID == 'ID00001'")["ndate"])/(3600*10**9*24)
restime = [ (np.diff(posall.query("顧客ID == 'ID000{:02}'".format(i))["ndate"])/(3600*10**9*24)).astype(int) for i in range(len(posall.顧客ID.unique())+1)][1:]
len(restime)
df_restime = pd.DataFrame(restime, index = posall.顧客ID.unique())
df_restime[:5]
df_restime.index.name = "顧客ID"
# dataframeのdescribeは列方向しかできないので
df_restime.T.describe()
| MyCode/Chapter02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Finteresting-problems&branch=main&subPath=notebooks/digit-sums.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# # Digit Sums
#
# [Watch on YouTube](https://www.youtube.com/watch?v=rvq2nYffo9Y&list=PL-j7ku2URmjZYtWzMCS4AqFS5SXPXRHwf)
#
# The [digit sum](https://en.wikipedia.org/wiki/Digit_sum) of a (natural) number is what you get when you add all of its digits together.
#
# For example, the digit sum of 256 is 2 + 5 + 6 = 13.
#
# We can isolate the digits in a number into a list in order to add them up.
number = 256
sum_of_digits = 0
for digit in str(number):
sum_of_digits += int(digit)
print('The sum of the digits in', number, 'is', sum_of_digits)
# In order to reuse that code, let's put it in a function.
def digit_sum(number):
sum_of_digits = 0
for digit in str(number):
sum_of_digits += int(digit)
return sum_of_digits
# And then we can call that function.
digit_sum(12345567)
# Now we can investigate interesting ideas like graphing digit sums versus numbers, or listing [Harshad numbers](https://en.wikipedia.org/wiki/Harshad_number), which are numbers that are divisible by the sum of their digits.
#
# Here's a scatter plot of digit sums between 1 and 100.
import matplotlib.pyplot as plt
# %matplotlib inline
x_list = []
y_list = []
for x in range(1, 100):
x_list.append(x)
y_list.append(digit_sum(x))
plt.scatter(x_list, y_list)
plt.title('Digit Sums from 1 to 100')
plt.xlabel('Number')
plt.ylabel('Digit Sum')
plt.show()
# Let's generate a list of Harshad numbers between 1 and 256.
harshad_numbers = []
for number in range(1, 256):
sum_of_digits = digit_sum(number)
if number % sum_of_digits == 0:
harshad_numbers.append(number)
print(harshad_numbers)
# And just for fun, collecting Harshad numbers, and their digit sum, in a dataframe instead of a list.
import pandas as pd
harshad_numbers_dataframe = pd.DataFrame(columns=['Harshad Number', 'Digit Sum'])
for number in range(1, 256):
sum_of_digits = digit_sum(number)
if number % sum_of_digits == 0:
harshad_numbers_dataframe = harshad_numbers_dataframe.append(
{'Harshad Number':number, 'Digit Sum':sum_of_digits},
ignore_index=True)
harshad_numbers_dataframe
harshad_numbers_dataframe.plot()
# For more information, check out [applications of digit sums](https://en.wikipedia.org/wiki/Digit_sum#Applications) or try [this challenge](https://twitter.com/mathsjem/status/1186597918213660673).
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| notebooks/digit-sums.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Types and Missing Values
#
# One of the most important pieces of information you can have about your DataFrame is the data type of each column. pandas stores its data such that each column is exactly one data type. A large number of data types are available for pandas DataFrame columns. This chapter focuses only on the most common data types and provides a brief summary of each one. For extensive coverage of each and every data type, see the chapter **Changing Data Types** in the **Essential Commands** part.
#
# ## Common data types
#
# The following are the most common data types that appear frequently in DataFrames.
#
# * **boolean** - Only two possible values, `True` and `False`
# * **integer** - Whole numbers without decimals
# * **float** - Numbers with decimals
# * **object** - Typically strings, but may contain any object
# * **datetime** - Specific date and time with nanosecond precision
#
# ### More on the object data type
#
# The object data type is the most confusing and deserves a longer discussion. Each value in an object column can be *any* Python object. Object columns can contain integers, floats, or even data structures such as lists or dictionaries. Anything can be contained in object columns. But, nearly all of the time, columns of the object data type only contain **strings**. When you see that a column is an object data type, you should expect the values to be strings. Unfortunately, pandas does not provide its users with a specific data type for strings. If you do have strings in your columns, the data type will be object.
#
# ### The importance of knowing the data type
#
# Knowing the data type of each column of your pandas DataFrame is very important. The main reason for this is that every value in each column will be of the same type. For instance, if you select a single value from a column that has an integer data type, then you are guaranteed that this value is also an integer. Knowing the data type of a column is one of the most fundamental pieces of knowledge of your DataFrame.
#
# ### A major exception with the object data type
#
# The object data type, is unfortunately, an exception to the information in the previous section. Although columns that have object data type are typically strings, there is no guarantee that each value will be a string. You could very well have an integer, list, or even another DataFrame as a value in the same object column.
#
# ## Missing Value Representation
#
# ### `NaN`, `None`, and `NaT`
#
# pandas represents missing values differently based on the data type of the column.
#
# * `NaN` - Stands for not a number and is a float data type
# * `None` - The literal Python object `None` and only found in object columns
# * `NaT` - Stands for not a time and is used for missing values in datetime columns
#
# ### Missing values for each data type
#
# * **boolean and integer** - No representation for missing values exist for boolean and integer columns. This is an unfortunate limitation.
# * **float** - Uses `NaN` as the missing value.
# * **datetime** - Only uses `NaT` as the missing value.
# * **object** - Can contain any Python object so all three of the missing value representations may appear in these columns, but typically you will encounter `NaN` or `None`.
#
# ### Missing values in boolean and integer columns
#
# Knowing that a column is either a boolean or integer column guarantees that there are no missing values in that column as pandas does not allow for it. If, for instance, you would like to place a missing value in a boolean or integer column, then pandas converts the entire column to float. This is because a float column can accommodate missing values. When booleans are converted to floats, `False` becomes 0 and `True` becomes 1.
#
# ### Integer NaN update for pandas 0.24
#
# With the release of pandas version 0.24 (February 2019), missing value representation was made possible for a special kind of integer data type called **Int64Dtype**. There is still no missing value representation for the default integer data type.
#
# ## Finding the data type of each column
#
# The `dtypes` DataFrame attribute (NOT a method) returns the data type of each column and is one of the first commands you should execute after reading in your data. Let's begin by using the `read_csv` function to read in the bikes dataset.
import pandas as pd
bikes = pd.read_csv('../data/bikes.csv')
bikes.head(3)
# Let's get the data types of each column in our `bikes` DataFrame. The returned object is a Series with the data types as the values and the column names as the index.
bikes.dtypes
# ### Why do `starttime` and `stoptime` have object as the data type?
#
# From the visual display of the bikes DataFrame above, it appears that both the `starttime` and `stoptime` columns are datetimes. The result of the `dtypes` attribute shows that they are objects (strings).
#
# The `read_csv` function requires that you provide a list of columns that are datetimes to the `parse_dates` parameter, otherwise it will read them in as strings. Let's reread the data using the `parse_dates` parameter.
bikes = pd.read_csv('../data/bikes.csv', parse_dates=['starttime', 'stoptime'])
bikes.dtypes.head()
# ### What are all those 64's at the end of the data types?
#
# Booleans, integers, floats, and datetimes all use a particular amount of memory for each of their values. The memory is measured in **bits**. The number of bits used for each value is the number appended to the end of the data type name. For instance, integers can be either 8, 16, 32, or 64 bits while floats can be 16, 32, 64, or 128. A 128-bit float column will show up as `float128`.
#
# Technically a `float128` is a different data type than a `float64` but generally you will not have to worry about such a distinction as the operations between different float columns will be the same.
#
# **Booleans** are always stored as 8-bits. There is no set bit size for values in an **object** column as each value can be of any size.
#
# ## Getting more metadata
#
# **Metadata** can be defined as data on the data. The data type of each column is an example of metadata. The number of rows and columns is another piece of metadata. We find this with the `shape` attribute, which returns a tuple of integers.
bikes.shape
# ### Total number of values with the `size` attribute
# The `size` attribute returns the total number of values (the number of columns multiplied by the number of rows) in the DataFrame.
bikes.size
# ### Get data types plus more with the `info` method
# The `info` DataFrame method provides output similar to `dtypes`, but also shows the number of non-missing values in each column along with more info such as:
#
# * Type of object (always a DataFrame)
# * The type of index and number of rows
# * The number of columns
# * The data types of each column and the number of non-missing (a.k.a non-null)
# * The frequency count of all data types
# * The total memory usage
bikes.info()
# ## More data types
#
# There are several more data types available in pandas. An extensive and formal discussion on all data types is available in the chapter **Changing Data Types** from the **Essential Commands** part.
# ## Exercises
# Use the `bikes` DataFrame for the following:
# ### Exercise 1
# <span style="color:green; font-size:16px">What type of object is returned from the `dtypes` attribute?</span>
# ### Exercise 2
# <span style="color:green; font-size:16px">What type of object is returned from the `shape` attribute?</span>
# ### Exercise 3
# <span style="color:green; font-size:16px">What type of object is returned from the `info` method?</span>
# ### Exercise 4
# <span style="color:green; font-size:16px">The memory usage from the `info` method isn't correct when you have objects in your DataFrame. Read the docstrings from it and get the true memory usage.</span>
| jupyter_notebooks/pandas/mastering_data_analysis/01. Intro to pandas/03. Data Types and Missing Values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="_jGuJWHhCJ8L" outputId="cb97ef70-8591-41f0-f4da-1136fed494b6"
import time
import os
import numpy as np
import google.colab as colab
import random
import json
# %matplotlib inline
import matplotlib.pyplot as plt
from multiprocessing import Pool
import shutil
from pprint import pprint
import pickle
from random import randint
import pandas as pd
import re
import inspect
import torch
from torch import optim
from torch.autograd import Variable
import torch.nn as nn
import nltk
nltk.download('punkt')
import warnings
warnings.filterwarnings('ignore')
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + [markdown] id="lfs2-AL8JD0i"
# # Mount Google Drive
# + id="xykwbqUrJJUg"
def mount_google_drive():
'''
# Functionality
Mount google drive. Since colab does not save files, we want to make it easier to directly access files in google drive.
# Arguments
Nothing
# Returns
drive_root: the working directory mounted
'''
mount_directory = "/content/gdrive"
drive = colab.drive
drive.mount(mount_directory, force_remount=True)
drive_root = mount_directory + "/" + list(filter(lambda x: x[0] != '.', os.listdir(mount_directory)))[0]
return drive_root
# + id="hq7hUZxgJK7S" colab={"base_uri": "https://localhost:8080/"} outputId="622addf5-2db6-48d2-9afb-c3d24ca86947"
# Please Set up mounted directories here. Notice whether you want to balance dataset
ROOT_DIR = mount_google_drive() + "/05839-Final-Project/code/"
DATASET_PATH = ROOT_DIR + "quora.csv"
NLI_NET_DIR = ROOT_DIR + "models/NliNetUtils/"
CHECKPOINT_DIR = ROOT_DIR + "checkpoints/"
# + id="wcSJYLrLNL9F"
# Migrate utils from drive to current dir so that we don't need to upload a folder from local every time
shutil.rmtree('utils/', ignore_errors=True)
_ = shutil.copytree(ROOT_DIR +"/utils/", "utils/")
# + id="2j5FQDGmKQ54"
# Load custimizable utils here
from utils.file_utils import *
from utils.image_utils import *
from utils.generator_utils import *
from utils.tqdm_utils import *
from utils.keras_utils import *
# + id="PEXNCE090hAn" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7b81ed39-a455-4bf2-8747-a534a0a91d99"
# Load infersent model related files
shutil.rmtree('models.py', ignore_errors=True)
shutil.copy(NLI_NET_DIR + "models.py", "models.py")
shutil.rmtree('data.py', ignore_errors=True)
shutil.copy(NLI_NET_DIR + "data.py", "data.py")
shutil.rmtree('mutils.py', ignore_errors=True)
shutil.copy(NLI_NET_DIR + "mutils.py", "mutils.py")
# + id="Evmmh-AigeML"
# shutil.rmtree('fastText/', ignore_errors=True)
# shutil.copytree(ROOT_DIR + "fastText/", "fastText/")
# + id="XDrpU37uJR3g"
from data import get_nli, get_batch, build_vocab
from mutils import get_optimizer
from models import NLINet
# + id="paLUu7Y5HToJ"
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
# + colab={"base_uri": "https://localhost:8080/"} id="ZMwTFPfVZ6G1" outputId="76d08fb8-46f9-4a52-9260-a9929aa8c4b5"
torch.cuda.is_available()
# + id="QWnHB024C6oS"
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
# + colab={"base_uri": "https://localhost:8080/"} id="FUq5W5A1C8z0" outputId="a66a7523-ac94-4688-becc-f150dbc00908"
# # !mkdir fastText
# # !curl -Lo fastText/crawl-300d-2M.vec.zip https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip
# # !unzip fastText/crawl-300d-2M.vec.zip -d fastText/
# + colab={"base_uri": "https://localhost:8080/"} id="1gobG5yFNulB" outputId="592f595c-15e1-48b6-a512-78d1250a3688"
# !mkdir encoder
# !curl -Lo encoder/infersent2.pkl https://dl.fbaipublicfiles.com/infersent/infersent2.pkl
# + id="l6r_4BWjDOGU"
def build_nli_net():
V = 2
MODEL_PATH = 'encoder/infersent%s.pkl' % V
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
infersent = InferSent(params_model)
infersent.load_state_dict(torch.load(MODEL_PATH))
return infersent
# + id="al33yHLrDR2Z"
infersent = build_nli_net()
# + id="NuvKcywGDSQi"
W2V_PATH = 'fastText/crawl-300d-2M.vec'
infersent.set_w2v_path(W2V_PATH)
# + colab={"base_uri": "https://localhost:8080/"} id="WYxkVTNSDUCj" outputId="c5642eaf-9408-43db-debd-b5ad5763a875"
infersent.build_vocab_k_words(K=500000)
# + id="nSvvyNxnDquG"
def text_prepare(text):
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;#]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
text = str(text)
# text = " ".join([word for word in text.split(" ") if re.search('[a-zA-Z]', word)])
# text = text.lower()
# text = re.sub(REPLACE_BY_SPACE_RE, " ", text)
# text = re.sub(BAD_SYMBOLS_RE, "", text)
return text
def cosine(u, v):
# compute the similarity between two embeddings
# u and v are matrices!
result = np.einsum('ij,ij->i', u, v) / ((np.linalg.norm(u, axis=1) * np.linalg.norm(v, axis=1)))
return np.log(result) + 1
# + colab={"base_uri": "https://localhost:8080/"} id="ghxPfXp5DYwa" outputId="bda1ce82-a75c-4ccc-e75d-00704ad33908"
tweet_1 = "Since the start of the pandemic, a total 65 WHO staff stationed in Geneva - working from home and onsite - have tested positive for #COVID19. We have not yet established whether any transmission has occurred on campus, but are looking into the matter."
tweet_2 = "WHO staff who were confirmed positive with #COVID19 in Geneva have received the necessary medical attention. WHO carried out full contact tracing and related protocols. Enhanced cleaning protocols were implemented in relevant offices."
tweet_3 = "Any tweets only my own views. More Guns,Less Crime (Univ Chicago Press, 3rd ed);10 books, 100+academic articles. PhD Econ, Advisor for Research & Science #USDOJ"
print("The similarity score between premise and hypoetheis 1 is:")
print(cosine(infersent.encode([text_prepare(tweet_1)]), infersent.encode([text_prepare(tweet_2)])).tolist()[0])
print("The similarity score between premise and hypoetheis 2 is:")
print(cosine(infersent.encode([text_prepare(tweet_1)]), infersent.encode([text_prepare(tweet_3)])).tolist()[0])
# + [markdown] id="UWRo1K9AXdtr"
# ## Look at twitter data
# + id="GNDrnL63XfZc"
import pandas as pd
# + id="05DvmkGUXh46"
df = pd.read_csv("tweets.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 411} id="RY6fxMQuXmHr" outputId="4bb7c18e-7241-4628-d496-4fc22f3b140c"
df.head()
# + id="sjZVe6kdXuYr"
tweets = df.text.tolist()
# + id="Uj4s_VuwSJBx"
processed_tweets = list(map(text_prepare, tweets))
# + id="KWVM8DHNTWLi"
assert len(tweets) == len(df) == len(processed_tweets)
# + colab={"base_uri": "https://localhost:8080/"} id="bRQP3p9USP-i" outputId="4f66392c-db9a-4c2f-f977-7617ea47b568"
processed_tweets[:5]
# + id="s_ESXxBcOlbE"
infersent = infersent.cuda()
# + colab={"base_uri": "https://localhost:8080/"} id="MV5dO6xIZQ7s" outputId="b981f952-c5ed-4b8b-92ff-c38dfec54d26"
infersent.is_cuda()
# + id="KOd8tZwKS8nv"
all_tweets_emb = infersent.encode(processed_tweets)
# + colab={"base_uri": "https://localhost:8080/"} id="HRxmtKIGTBRV" outputId="451743c7-099b-4a2e-f678-49ce2c210729"
all_tweets_emb.shape
# + colab={"base_uri": "https://localhost:8080/"} id="D8LC_btcSQsL" outputId="44b878d3-52c5-4a96-d37f-66201e81b6e0"
all_scores = np.zeros((len(tweets), len(tweets)))
for i in range(len(processed_tweets)):
candidate_emb = infersent.encode([processed_tweets[i]])
all_scores[i] = cosine(np.repeat(candidate_emb, len(processed_tweets), axis=0), all_tweets_emb)
all_scores[np.isnan(all_scores)] = -np.inf
# + colab={"base_uri": "https://localhost:8080/"} id="Ko7_Z-P1TQ4n" outputId="55f8d4e3-1792-4e85-8d05-3100626e5e7e"
all_scores
# + id="HcRCznMoYpRH"
with open('adjacency_matrix.npy', 'wb') as f:
np.save(f, all_scores)
# + colab={"base_uri": "https://localhost:8080/"} id="7__zKF5bQS7T" outputId="2bcf65c3-5c9e-4390-a517-4e48ec644697"
all_scores
# + id="-4WFD2I9QVRo"
with open('adjacency_matrix.npy', 'rb') as f:
all_scores = np.load(f)
# + id="rPy6hFQ5QfKJ"
[tweet0: for all tweet similartiy score]
[tweet1: for all tweet similartiy score]
# + colab={"base_uri": "https://localhost:8080/"} id="ycDbzx0KW52m" outputId="863c8e7b-3e7e-42f5-e31a-912daead15a4"
all_scores.shape
# + id="urBz6sHTW8k3"
sorted_row_idx = np.argsort(all_scores, axis=1)[:,all_scores.shape[1]-6::]
# + colab={"base_uri": "https://localhost:8080/"} id="7fJZrG6tjobY" outputId="99f60f79-77cb-4a82-9fde-43109527f8c1"
sorted_row_idx
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="Ypq5cvpDjs9b" outputId="71c1a534-74fd-47f9-dc03-4e7159fe4271"
tweets[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 69} id="-m5F5jIhj70P" outputId="5ba62109-7107-4786-a10c-01fcd36ff584"
tweets[298]
# + id="nzO3HUxZj9lM"
| research/NLI/pretrained_adjacency.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Appendix 0: Different input types for Hamiltonians
#
# Author: <NAME>
#
# In this Appendix it is shown how Hamiltonian can be specified using different types (numpy array, list, dictionary). We use single orbital example from *tutorial.ipynb* notebook.
# Prerequisites
from __future__ import division, print_function
import numpy as np
import qmeq
# ### Single orbital example
# Let us consider an example of a quantum dot containing one spinful orbital and on-site charging energy $U$ coupled to source ($L$) and drain ($R$) leads:
#
# $H_{\mathrm{one}}=\sum_{\substack{\ell=L,R \\ \sigma=\uparrow,\downarrow}}\varepsilon_{\ell\sigma k}^{\phantom{\dagger}}c_{\ell\sigma k}^{\dagger}c_{\ell\sigma k}^{\phantom{\dagger}}
# +\sum_{\ell\sigma k}\left(t_{\ell\sigma}d_{\sigma}^{\dagger}c_{\ell\sigma k}+\mathrm{H.c.}\right)
# +\sum_{\sigma}\varepsilon_{\sigma}d_{\sigma}^{\dagger}d_{\sigma}
# +(\Omega d_{\uparrow}^{\dagger}d_{\downarrow}+\mathrm{H.c.})
# +Ud_{\uparrow}^{\dagger}d_{\downarrow}^{\dagger}d_{\downarrow}^{\phantom{\dagger}}d_{\uparrow}^{\phantom{\dagger}}$
#
# with $\varepsilon_{\uparrow}=V_{g}+\frac{B}{2}$, $\varepsilon_{\downarrow}=V_{g}\frac{B}{2}$, where $V_{g}$ is the gate voltage and $B$ is the magnetic field (representing anamolous Zeeman splitting of spinful orbital). We will choose such values for parameters:
# Quantum dot parameters
vgate = 1.0
bfield = 0.5
omega = 2.0
U = 20.0
# Lead parameters
vbias = 0.5
temp = 1.0
dband = 40.0
# Tunneling amplitudes
gam = 0.5
t0 = np.sqrt(gam/(2*np.pi))
# In $H_{\mathrm{one}}$ we have two single particle states $\lvert\uparrow\rangle$ and $\lvert\downarrow\rangle$. The single particle Hamiltonian $H_{\mathrm{single}}$ can be specified using numpy array, a list, or a dictionary:
# +
nsingle = 2
# numpy array up down
hsingle = np.array([[vgate+bfield/2, omega], # up
[omega.conjugate(), vgate-bfield/2]]) # down
# list, 0 is up, 1 is down
hsingle = [[0, 0, vgate+bfield/2],
[1, 1, vgate-bfield/2],
[0, 1, omega]]
# dictionary, 0 is up, 1 is down
hsingle = {(0, 0): vgate+bfield/2,
(1, 1): vgate-bfield/2,
(0, 1): omega}
# -
# **Comment 1.**
# In a dictionary it is enough to specify one element like $\Omega d_{\uparrow}^{\dagger}d_{\downarrow}^{\phantom{\dagger}}$, because the element $\Omega^{*} d_{\downarrow}^{\dagger}d_{\uparrow}^{\phantom{\dagger}}$ is determined by complex conjugation and is included automatically in order to get a Hermitian quantum dot Hamiltonian. If an element like (1,0) is given, it will be added to the Hamiltonian. So specifying {(0, 1):omega, (1,0):omega.conjugate()} will simply double count $\Omega$.
#
# **Comment 2.** QmeQ will convert any numpy array or a list to a dictionary for internal usage. Also all elements in numpy array equal to zero are not included in the Hamiltonian dictionary.
# The Coulomb interaction Hamiltonian $H_{\mathrm{Coulomb}}$ is specified using either a list or a dictionary:
coulomb = [[0,1,1,0,U]]
coulomb = {(0,1,1,0):U}
# The tunneling Hamiltonian $H_{\mathrm{tunneling}}$ can be specified as:
# +
# numpy array up down
tleads = np.array([[t0, 0], # L, up
[t0, 0], # R, up
[0, t0], # L, down
[0, t0]]) # R, down
# list # lead label, lead spin <-- level spin
tleads = [[0, 0, t0], # L, up <-- up
[1, 0, t0], # R, up <-- up
[2, 1, t0], # L, down <-- down
[3, 1, t0]] # R, down <-- down
# dictionary
tleads = {(0, 0):t0,
(1, 0):t0,
(2, 1):t0,
(3, 1):t0}
# -
# **Comment 3.** In dictionary tuple $(\alpha, i)$ the first label $\alpha$ denotes lead quantum numbers, the second label $i$ denotes levels of the quantum dot. We note that effectively we get four leads, i.e., $\alpha\in\{L\uparrow,R\uparrow,L\downarrow,R\downarrow\}$.
# ### Specify the system using numpy arrays and lists
nsingle = 2
nleads = 4
# numpy array up down
hsingle = np.array([[vgate+bfield/2, omega], # up
[omega.conjugate(), vgate-bfield/2]]) # down
# numpy array up down
tleads = np.array([[t0, 0], # L, up
[t0, 0], # R, up
[0, t0], # L, down
[0, t0]]) # R, down
coulomb = [[0,1,1,0,U]]
# L,up R,up L,down R,down
mulst = [vbias/2, -vbias/2, vbias/2, -vbias/2]
tlst = [temp, temp, temp, temp]
system = qmeq.Builder(nsingle, hsingle, coulomb,
nleads, tleads, mulst, tlst, dband,
kerntype='Pauli')
# The numpy arrays *hsingle*, *tleads* and a list *coulomb* get converted to dictionaries in **QmeQ**
print(system.qd.hsingle)
print(system.qd.coulomb)
print(system.leads.tleads)
| appendix/00_types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ocean data handling
#
# In this notebook we show how ocean data is handled.
# +
# NBVAL_IGNORE_OUTPUT
import traceback
from os.path import join
import numpy as np
import iris
import iris.quickplot as qplt
import matplotlib
import matplotlib.pyplot as plt
from openscm.scmdataframe import ScmDataFrame
from netcdf_scm.iris_cube_wrappers import CMIP6OutputCube
# -
# make all logs apper
import logging
root_logger = logging.getLogger()
root_logger.addHandler(logging.StreamHandler())
plt.style.use('bmh')
# %matplotlib inline
DATA_PATH_TEST = join("..", "tests", "test-data")
DATA_PATH_TEST_CMIP6_OUTPUT_ROOT = join(DATA_PATH_TEST, "cmip6output")
# ## Test data
#
# For this notebook's test data we use CMIP6Output from NCAR's CESM2 model.
# ### 2D data
#
# Some ocean data is 2D. Here we use surface downward heat flux in sea water.
# Firstly we use data which has been regridded by the modelling group.
hfds_file = join(
DATA_PATH_TEST,
"cmip6output",
"CMIP6",
"CMIP",
"NCAR",
"CESM2",
"historical",
"r7i1p1f1",
"Omon",
"hfds",
"gr",
"v20190311",
"hfds_Omon_CESM2_historical_r7i1p1f1_gr_195701-195703.nc"
)
# We also examine how iris handles data which is provided on the native model grid.
hfds_file_gn = hfds_file.replace("gr", "gn")
# ### 3D data
#
# Some ocean data is 3D. NetCDF-SCM currently supports crunching this to iris cubes but will not convert those cubes to SCM timeseries.
thetao_file = join(
DATA_PATH_TEST,
"cmip6output",
"CMIP6",
"CMIP",
"NCAR",
"CESM2",
"historical",
"r10i1p1f1",
"Omon",
"thetao",
"gn",
"v20190313",
"thetao_Omon_CESM2_historical_r10i1p1f1_gn_195310-195312.nc"
)
# ## 2D data handling
hfds_cube = CMIP6OutputCube()
hfds_cube.load_data_from_path(hfds_file)
print(hfds_cube.cube)
# NBVAL_IGNORE_OUTPUT
time_mean = hfds_cube.cube.collapsed('time', iris.analysis.MEAN)
qplt.contourf(
time_mean,
extend='max',
)
plt.gca().coastlines();
# Iris' handling of data on the native model grid is mostly workable, but not yet perfect.
# +
hfds_cube_gn = CMIP6OutputCube()
hfds_cube_gn.load_data_from_path(hfds_file_gn)
print(hfds_cube_gn.cube)
# -
# As we can see, iris doesn't plot data on these native model grids well (unlike the regridded data which it handles without problems).
# NBVAL_IGNORE_OUTPUT
time_mean = hfds_cube_gn.cube.collapsed('time', iris.analysis.MEAN)
qplt.contourf(
time_mean,
extend='max',
)
plt.gca().coastlines();
# ## Getting SCM Timeseries
# We cut down to SCM timeseries in the standard way.
# +
regions_to_get = [
"World",
"World|Northern Hemisphere",
"World|Northern Hemisphere|Ocean",
"World|Ocean",
"World|Southern Hemisphere",
"World|Southern Hemisphere|Ocean",
"World|North Atlantic Ocean",
"World|El Nino N3.4",
]
hfds_ts = hfds_cube.get_scm_timeseries(regions=regions_to_get)
hfds_gn_ts = hfds_cube_gn.get_scm_timeseries(regions=regions_to_get)
ax = hfds_ts.line_plot(color="region", linestyle=":", figsize=(16, 9))
hfds_gn_ts.line_plot(color="region", linestyle="--", dashes=(10, 30), ax=ax);
# -
# Comparing the results of collapsing the native grid and the regridded data reveals a small difference (approx 1%), in particular in the small El Nino N3.4 region.
# +
ax1, ax2 = plt.figure(figsize=(16, 9)).subplots(nrows=1, ncols=2)
ScmDataFrame(
hfds_ts.timeseries()
- hfds_gn_ts.timeseries()
).line_plot(color="region", ax=ax1, legend=False)
ax1.set_title("Absolute difference")
ScmDataFrame(
(
(hfds_ts.timeseries() - hfds_gn_ts.timeseries())
/ hfds_ts.timeseries()
).abs() * 100
).line_plot(color="region", ax=ax2)
ax2.set_title("Percentage difference");
# -
# ## 3D Data Handling
thetao_cube = CMIP6OutputCube()
thetao_cube.load_data_from_path(thetao_file)
print(thetao_cube.cube)
# If we take a time mean of a cube with 3D spatial data, we end up with a 3D cube, which cannot be plotted on a contour map.
# NBVAL_IGNORE_OUTPUT
time_mean = thetao_cube.cube.collapsed('time', iris.analysis.MEAN)
try:
qplt.contourf(
time_mean,
extend='max',
)
except ValueError as e:
traceback.print_exc(limit=0, chain=False)
# If we take e.g. a depth mean too, then we can plot (although as this data is on the model's native grid iris doesn't do a great job of plotting it).
# NBVAL_IGNORE_OUTPUT
# the depth co-ordinate is labelled as 'generic' for some reason
time_depth_mean = time_mean.collapsed('generic', iris.analysis.MEAN)
qplt.contourf(
time_depth_mean,
extend='max',
);
# We can crunch into SCM timeseries cubes.
thetao_ts_cubes = thetao_cube.get_scm_timeseries_cubes(regions=regions_to_get)
# These cubes now have dimensions of time and depth (labelled as 'generic' here). Hence we can plot them.
# +
plt.figure(figsize=(12, 15))
plt.subplot(311)
qplt.contourf(
thetao_ts_cubes["World"].cube,
extend='max',
levels=range(0, 27, 3)
)
plt.title("World")
plt.subplot(323)
qplt.contourf(
thetao_ts_cubes["World|Northern Hemisphere|Ocean"].cube,
extend='max',
levels=range(0, 27, 3)
)
plt.title("World|Northern Hemisphere|Ocean")
plt.subplot(324)
qplt.contourf(
thetao_ts_cubes["World|Southern Hemisphere|Ocean"].cube,
extend='max',
levels=range(0, 27, 3)
)
plt.title("World|Southern Hemisphere|Ocean")
plt.subplot(325)
qplt.contourf(
thetao_ts_cubes["World|El Nino N3.4"].cube,
extend='max',
levels=range(0, 27, 3)
)
plt.title("World|El Nino N3.4")
plt.subplot(326)
qplt.contourf(
thetao_ts_cubes["World|North Atlantic Ocean"].cube,
extend='max',
levels=range(0, 27, 3)
)
plt.title("World|North Atlantic Ocean")
plt.tight_layout()
# -
# We have also not yet decided on our convention for handling the depth information in `SCMDataFrame`'s, hence attempting to retrieve SCM timeseries will result in an error.
# NBVAL_IGNORE_OUTPUT
try:
thetao_cube.get_scm_timeseries(regions=regions_to_get)
except NotImplementedError as e:
traceback.print_exc(limit=0, chain=False)
| notebooks/ocean-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="OADFwxpspAKW"
import pandas as pd
df = pd.read_csv("NewDataset.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 622} colab_type="code" id="FxN7KH9dpGiN" outputId="09968074-58bd-4a62-bcd6-32247f517a9b"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="LMxK66jCqRA2" outputId="80ff9651-eb5a-4d27-a9f7-714790cdc30d"
X = df.iloc[:, 0:14].values
X[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 835} colab_type="code" id="2Zwi4WkEqbX_" outputId="bb85d783-3a8f-46b5-f06b-cf67bf2504a5"
y = df.iloc[:,14].values
y
# + colab={} colab_type="code" id="jySq_axeqsuH"
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" id="pkbxsQWVq3gs" outputId="3f837ccc-89a2-4949-c3fe-9640d08356f6"
def printColumn(number):
switcher = {
0: "Profile Pic",
1: "Nums/length Username",
2: "Full Name Words",
3: "Bio Length",
4: "External URL",
5: "Verified",
6: "Business",
7: "#Post",
8: "#Followers",
9: "#Following",
10: "Last Post Recent",
11: "%Post Single Day",
12: "Index of Activity",
13: "Average of Likes"
}
return switcher.get(number, "Invalid Column")
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f) %s" % (f + 1, indices[f], importances[indices[f]], printColumn(indices[f])))
# + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" id="2mcP7TWdrCRB" outputId="2e803170-77be-454a-86cd-8e0823d7c66c"
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
# + colab={} colab_type="code" id="K7O6Lp1PdUsm"
| Preprocessing/Feature Importance/Features_Importance_Forest_of_Trees_NewDataSet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Sk_8tXrZj1Qy"
# ## Analysis of historical data
# + [markdown] id="L4TBtNmVmC_8"
# ### Data
# Source: EM-DAT, CRED / UCLouvain, Brussels, Belgium ; www.emdat.be (D. Guha-Sapir)
#
# Version: 020-09-01
#
# File creation: Sat, 12 Sep 2020 22:21:30 CEST
#
# Table type: Custom request
#
# Number of records: 24520
# + [markdown] id="crUzSJsJpd1a"
# ### Important Notes
#
# For a disaster to be entered into the EM-DAT database at least one of the following criteria must be fulfilled:
#
# * Ten (10) or more people reported killed
# * Hundred (100) or more people reported affected
# * Declaration of a state of emergency
# * Call for international assistance
#
# source: https://www.emdat.be/explanatory-notes
#
# ⚠️ if we will use different databases, we must have common criteria
#
# + [markdown] id="2gkPSy8Km1ne"
# ### Packages:
# + id="kZzguimuhdUn"
import pandas as pd
import copy as copy
# Plot
import seaborn as sns
import matplotlib.pyplot as plt
import utilities
# + [markdown] id="P6wJ_LjwnEek"
# ### Loading em-dat data
# + colab={"base_uri": "https://localhost:8080/", "height": 909} id="j3LCwltuhyh-" outputId="a771f037-8b82-4857-8cac-f7e33fd7a5e2"
flood_table = utilities.dataframe_flood()
flood_table
# + [markdown] id="vcxJLdG-gxtv"
# ### Period
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="V1u4ikHJg0bK" outputId="b900b0fc-407e-4994-f79b-1c5159d42f6b"
print('from',flood_table['Year'].min(),'to',flood_table['Year'].max())
# + [markdown] id="gPrIO7YW8mjw"
# ### Types of floods
#
# The types of floods that are considered by EM-DAT are:
# * Coastal flood
# * Riverine flood
# * Flash flood
# * Ice jam flood
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="OHCpkWLM8ltc" outputId="93a9ef13-96fe-4fe9-bc21-0f366bcf4159"
# the types of floods that have occurred
plt.figure(figsize=(5,5))
sns.countplot(x="Disaster Subtype", data=flood_table,palette='RdPu')
plt.show()
flood_table[['Disaster Subtype']].groupby(by=['Disaster Subtype']).count()
# + colab={"base_uri": "https://localhost:8080/", "height": 785} id="lTCzw2sMoUHb" outputId="cf968316-4355-4d49-e284-becafa60b040"
flood_table['Region'].value_counts().plot(kind='barh', figsize=(10,5),cmap='Spectral',title='all types')
# -
flood_table.loc[flood_table['Disaster Subtype']=='Riverine flood','Region'].value_counts().plot(kind='barh', figsize=(10,5),cmap='Spectral',title='Riverine floods')
flood_table.loc[flood_table['Disaster Subtype']=='Flash flood','Region'].value_counts().plot(kind='barh', figsize=(10,5),cmap='Spectral',title='Flash floods')
flood_table.loc[flood_table['Disaster Subtype']=='Coastal flood','Region'].value_counts().plot(kind='barh', figsize=(10,5),cmap='Spectral',title='Coastal floods')
southernAsiaplt = flood_table.loc[flood_table['Region']=='Southern Asia',['Year','Seq']].groupby(by=['Year']).count()
southernAsiaplt.plot(figsize=(10,5),cmap='Spectral',title='Southern Asia')
flood_table.loc[flood_table['Region']=='South-Eastern Asia',['Year','Seq']].groupby(by=['Year']).count().plot(figsize=(10,5),cmap='Spectral',title='South-Eastern Asia')
flood_table.loc[flood_table['Region']=='Eastern Asia',['Year','Seq']].groupby(by=['Year']).count().plot(figsize=(10,5),cmap='Spectral',title='Eastern Asia')
flood_table.loc[flood_table['Region']=='Western Asia',['Year','Seq']].groupby(by=['Year']).count().plot(figsize=(10,5),cmap='Spectral',title='Western Asia')
flood_table.loc[flood_table['Region']=='Southern Asia',['Year','Seq']].groupby(by=['Year']).count().rolling(20).mean().plot(figsize=(10,5),cmap='Spectral',title='Southern Asia+ rolling mean')
flood_table.loc[flood_table['Region']=='South-Eastern Asia',['Year','Seq']].groupby(by=['Year']).count().rolling(20).mean().plot(figsize=(10,5),cmap='Spectral',title='South-Eastern Asia+ rolling mean')
flood_table.loc[flood_table['Region']=='Eastern Asia',['Year','Seq']].groupby(by=['Year']).count().rolling(20).mean().plot(figsize=(10,5),cmap='Spectral',title='Eastern Asia+ rolling mean')
flood_table.loc[flood_table['Region']=='Western Asia',['Year','Seq']].groupby(by=['Year']).count().rolling(20).mean().plot(figsize=(10,5),cmap='Spectral',title='Western Asia+ rolling mean')
| model_innondations/notebook/emdat_historical_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import os, sys
from os.path import join as pjoin
import json
print(os.getcwd())
sys.path.insert(0, pjoin(os.path.pardir))
sys.path.insert(0, pjoin(os.path.pardir, 'webapi'))
sys.path.insert(0, pjoin(os.path.pardir, os.path.pardir, 'mcdose'))
import numpy as np
import h5py
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
from rttypes.volume import Volume
import dicomutils as du
import sparse
from planning.maskio import Mask
from skimage import measure
# +
pdir = '/home/ryan/projects/MCDose-electron/patient_input_test2/'
files = du.find_dicom_files(pjoin(pdir, 'ctdata'))
arr, vsize = du.extract_voxel_data(files['CT'])
vol = Volume.fromDir(pjoin(pdir, 'ctdata'))
with open(pjoin(pdir, 'cropbox.json'), 'r') as fd:
bbox = json.load(fd)
print(bbox)
ctbox = {"start": vol.frame.start, "size": vol.frame.size, "spacing": vol.frame.spacing}
offset = np.ceil(np.subtract(bbox['start'], ctbox['start']) / np.array(ctbox['spacing'])).astype(int)
print(offset)
doses = {}
for fname in ['dose_new_size2.5_space2.5', 'dose_new_size1.0_space2.5', 'dose_new_size5.0_space2.5', 'dose_new_size2.5_space5.0']:
dose = sparse.SparseMatrixCOO.fromFile(pjoin(pdir, fname+'.h5'))
doses[fname] = np.array(dose.tocoo().todense())
# -
masks = Mask.load_masks_from_file(pjoin(pdir, 'masks.h5'))
fig = plt.figure(figsize=(8,6), dpi=150)
for ii, (dosename, dose) in enumerate(doses.items()):
densedose = dose.reshape((*bbox['size'][::-1], -1))
print(densedose.shape)
sumdose = np.sum(densedose, axis=3)
idx = sumdose.shape[0]//2-5
#ptv_name = "PTV_NNreview"
ptv_name = 'O_parotid_R'
contours = measure.find_contours(masks[ptv_name].arr[idx], 0.9)
croparr = arr[offset[2]:offset[2]+bbox['size'][2], offset[1]:offset[1]+bbox['size'][1], offset[0]:offset[0]+bbox['size'][0]]
ax = plt.subplot(2,2,ii+1)
ax.imshow(croparr[idx], origin='upper', cmap='gray')
ax.imshow(sumdose[idx], origin='upper', alpha=0.5)
#plt.imshow(masks[ptv_name].arr[idx], alpha=0.3)
for contour in contours:
ax.plot(contour[:,1], contour[:,0], color='red', linewidth=1)
ax.set_title(dosename)
plt.show()
olddoses = doses
| MC simulation/dosecalc/jupyter_notebooks/Check_electron_dose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Think Bayes
#
# This notebook presents code and exercises from Think Bayes, second edition.
#
# Copyright 2018 <NAME>
#
# MIT License: https://opensource.org/licenses/MIT
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
import math
import numpy as np
from thinkbayes2 import Pmf, Suite, Joint
import thinkbayes2
import thinkplot
# -
class Battleship(Suite, Joint):
lam = 1
def Likelihood(self, hypo, data):
x_actual, y_actual = hypo
x_guess, y_guess, result = data
d = np.hypot(x_guess-x_actual, y_guess-y_actual)
p_hit = np.exp(-self.lam * d)
return p if result == 'hit' else 1-p
for t, p in gap.Items():
arrivals = thinkbayes2.MakePoissonPmf(1.3 * t, 25)
thinkplot.plot(arrivals, color='C0', linewidth=0.1)
metapmf[arrivals] = p
gap = thinkbayes2.MakeNormalPmf(7, 1, 3)
thinkplot.plot(gap)
metapmf = thinkbayes2.Pmf()
for t, p in gap.Items():
arrivals = thinkbayes2.MakePoissonPmf(1.3 * t, 25)
thinkplot.plot(arrivals, color='C0', linewidth=0.1)
metapmf[arrivals] = p
mix = thinkbayes2.MakeMixture(metapmf)
mix.Mean()
thinkplot.Hist(mix)
thinkplot.decorate(xlabel='Number of passengers',
ylabel='PMF')
mix[10]
| solutions/quiz04_soln.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import csv
import cv2
lines = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
i = 0
for line in reader:
if i > 0:
lines.append(line)
i =1
print(lines[0])
print(lines[1])
print(len(lines))
images = []
measurements = []
for line in lines:
source_path = line[0]
local_path = './data' + source_path
img = cv2.imread(local_path)
print(local_path)
print(img.shape)
images.append(img)
measurements.append(line[3])
| .ipynb_checkpoints/model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="SymssfV7IcQ7"
# 
# + [markdown] id="MCjxEZg_IfJo"
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/11.Pretrained_Clinical_Pipelines.ipynb)
# + [markdown] id="v4uN8ZP_Itdo"
# # 11. Pretrained_Clinical_Pipelines
# + [markdown] id="YcrO6BlfIwEV"
# ## Colab Setup
# + id="sDxBKvHjIc5B"
import json
import os
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys)
# + id="R6iawCF1hDtv"
# Installing pyspark and spark-nlp
# ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
# ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
# + colab={"base_uri": "https://localhost:8080/", "height": 254} id="xzNgN33jNHTe" outputId="3dc8a533-beb1-4283-c2d6-573296850638"
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
import sparknlp
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
spark
# + [markdown] id="9CzSCbXSKLSC"
#
# <b> if you want to work with Spark 2.3 </b>
# ```
# import os
#
# # Install java
# # # ! apt-get update -qq
# # # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
#
# # # !wget -q https://archive.apache.org/dist/spark/spark-2.3.0/spark-2.3.0-bin-hadoop2.7.tgz
#
# # # !tar xf spark-2.3.0-bin-hadoop2.7.tgz
# # # !pip install -q findspark
#
# os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
# os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# os.environ["SPARK_HOME"] = "/content/spark-2.3.0-bin-hadoop2.7"
# # # ! java -version
#
# import findspark
# findspark.init()
# from pyspark.sql import SparkSession
#
# # # ! pip install --ignore-installed -q spark-nlp==2.7.5
# import sparknlp
#
# spark = sparknlp.start(spark23=True)
# ```
# + [markdown] id="hmhC6kMHKYyh"
# ## Pretrained Pipelines
# + [markdown] id="dLFDvuupKeGH"
# In order to save you from creating a pipeline from scratch, Spark NLP also has a pre-trained pipelines that are already fitted using certain annotators and transformers according to various use cases.
#
# Here is the list of clinical pre-trained pipelines:
#
# > These clinical pipelines are trained with `embeddings_healthcare_100d` and accuracies might be 1-2% lower than `embeddings_clinical` which is 200d.
#
# **1. explain_clinical_doc_carp** :
#
# > a pipeline with `ner_clinical`, `assertion_dl`, `re_clinical` and `ner_posology`. It will extract clinical and medication entities, assign assertion status and find relationships between clinical entities.
#
# **2. explain_clinical_doc_era** :
#
# > a pipeline with `ner_clinical_events`, `assertion_dl` and `re_temporal_events_clinical`. It will extract clinical entities, assign assertion status and find temporal relationships between clinical entities.
#
# **3. recognize_entities_posology** :
#
# > a pipeline with `ner_posology`. It will only extract medication entities.
#
#
# *Since 3rd pipeline is already a subset of 1st and 2nd pipeline, we will only cover the first two pipelines in this notebook.*
#
# **4. explain_clinical_doc_ade** :
#
# > a pipeline for `Adverse Drug Events (ADE)` with `ner_ade_biobert`, `assertiondl_biobert`, `classifierdl_ade_conversational_biobert` and `re_ade_biobert`. It will classify the document, extract `ADE` and `DRUG` entities, assign assertion status to `ADE` entities, and relate them with `DRUG` entities, then assign ADE status to a text (`True` means ADE, `False` means not related to ADE).
#
# **letter codes in the naming conventions:**
#
# > c : ner_clinical
#
# > e : ner_clinical_events
#
# > r : relation extraction
#
# > p : ner_posology
#
# > a : assertion
#
# > ade : adverse drug events
#
# **Relation Extraction types:**
#
# `re_clinical` >> TrIP (improved), TrWP (worsened), TrCP (caused problem), TrAP (administered), TrNAP (avoided), TeRP (revealed problem), TeCP (investigate problem), PIP (problems related)
#
# `re_temporal_events_clinical` >> `AFTER`, `BEFORE`, `OVERLAP`
#
# **5. ner_profiling_clinical and ner_profiling_biobert:**
#
# > pipelines for exploring all the available pretrained NER models at once.
#
# **6. ner_model_finder**
#
# > a pipeline trained with bert embeddings that can be used to find the most appropriate NER model given the entity name.
#
# **7. icd10cm_snomed_mapping**
#
# > a pipeline maps ICD10CM codes to SNOMED codes without using any text data. You’ll just feed a comma or white space delimited ICD10CM codes and it will return the corresponding SNOMED codes as a list.
#
# **8. snomed_icd10cm_mapping:**
#
# > a pipeline converts Snomed codes to ICD10CM codes. Just feed a comma or white space delimited SNOMED codes and it will return the corresponding ICD10CM codes as a list.
#
# + [markdown] id="pK2Tt0ZuRy2B"
# ## 1.explain_clinical_doc_carp
#
# a pipeline with ner_clinical, assertion_dl, re_clinical and ner_posology. It will extract clinical and medication entities, assign assertion status and find relationships between clinical entities.
# + id="Y1di9iuJMiXl"
from sparknlp.pretrained import PretrainedPipeline
# + colab={"base_uri": "https://localhost:8080/"} id="4rg3oe37R1wX" outputId="0ac3ecad-36de-47d1-aee3-147ff235dffa"
pipeline = PretrainedPipeline('explain_clinical_doc_carp', 'en', 'clinical/models')
# + colab={"base_uri": "https://localhost:8080/"} id="V7OaKtxjvA7f" outputId="f3904a00-0b7d-49fc-c43a-f382cb4efcf4"
pipeline.model.stages
# + id="d9eRK3IKUurq"
# Load pretrained pipeline from local disk:
# >> pipeline_local = PretrainedPipeline.from_disk('/root/cache_pretrained/explain_clinical_doc_carp_en_2.5.5_2.4_1597841630062')
# + colab={"base_uri": "https://localhost:8080/"} id="3xmhwW2MVKYa" outputId="b7c705b3-af10-4c77-a2aa-c3af57fcac7f"
text ="""A 28-year-old female with a history of gestational diabetes mellitus, used to take metformin 1000 mg two times a day, presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting .
She was seen by the endocrinology service and discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals.
"""
annotations = pipeline.annotate(text)
annotations.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 676} id="X4z-RbyZVvgR" outputId="8d466f2e-d396-4fce-8a56-ba878d0390c7"
import pandas as pd
rows = list(zip(annotations['tokens'], annotations['clinical_ner_tags'], annotations['posology_ner_tags'], annotations['pos_tags'], annotations['dependencies']))
df = pd.DataFrame(rows, columns = ['tokens','clinical_ner_tags','posology_ner_tags','POS_tags','dependencies'])
df.head(20)
# + colab={"base_uri": "https://localhost:8080/", "height": 174} id="5Hlwc2M2Xl_y" outputId="15452182-08c2-45e3-8f10-51900db54621"
text = 'Patient has a headache for the last 2 weeks and appears anxious when she walks fast. No alopecia noted. She denies pain'
result = pipeline.fullAnnotate(text)[0]
chunks=[]
entities=[]
status=[]
for n,m in zip(result['clinical_ner_chunks'],result['assertion']):
chunks.append(n.result)
entities.append(n.metadata['entity'])
status.append(m.result)
df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status})
df
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="wF9PCC18Y7eE" outputId="f48d8a28-5564-405f-ef62-09658dfa27d7"
text = """
The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also
given 1 unit of Metformin daily.
He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night ,
12 units of insulin lispro with meals , and metformin 1000 mg two times a day.
"""
result = pipeline.fullAnnotate(text)[0]
chunks=[]
entities=[]
begins=[]
ends=[]
for n in result['posology_ner_chunks']:
chunks.append(n.result)
begins.append(n.begin)
ends.append(n.end)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'begin':begins, 'end':ends, 'entities':entities})
df
# + [markdown] id="ljp4EmCxaDNd"
# ## **2. explain_clinical_doc_era**
#
# > a pipeline with `ner_clinical_events`, `assertion_dl` and `re_temporal_events_clinical`. It will extract clinical entities, assign assertion status and find temporal relationships between clinical entities.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="BMWEfu1QuXY5" outputId="2a7ea5b6-684a-4908-a285-b319b87d4e08"
era_pipeline = PretrainedPipeline('explain_clinical_doc_era', 'en', 'clinical/models')
# + colab={"base_uri": "https://localhost:8080/"} id="xWhFRqvpvIOl" outputId="49296920-5b14-47cc-f0e7-00bc74ff066b"
era_pipeline.model.stages
# + id="O9cTLFNUaQNU"
text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She denied pain and any headache.
She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine,
12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge.
"""
result = era_pipeline.fullAnnotate(text)[0]
# + colab={"base_uri": "https://localhost:8080/"} id="1igwWvsgu7z6" outputId="c21f8723-e046-439e-8fee-1fd7038a113d"
result.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="6rejwSLjyW8S" outputId="c5c37458-5a87-4113-f4f5-1ac705ac00b0"
import pandas as pd
chunks=[]
entities=[]
begins=[]
ends=[]
for n in result['clinical_ner_chunks']:
chunks.append(n.result)
begins.append(n.begin)
ends.append(n.end)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'begin':begins, 'end':ends, 'entities':entities})
df
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="2pKbjUxEv9z3" outputId="a532e27f-81b5-49a8-9f67-2f8c0c70cda8"
chunks=[]
entities=[]
status=[]
for n,m in zip(result['clinical_ner_chunks'],result['assertion']):
chunks.append(n.result)
entities.append(n.metadata['entity'])
status.append(m.result)
df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status})
df
# + id="2OUKeMndIoK3"
import pandas as pd
def get_relations_df (results, col='relations'):
rel_pairs=[]
for rel in results[0][col]:
rel_pairs.append((
rel.result,
rel.metadata['entity1'],
rel.metadata['entity1_begin'],
rel.metadata['entity1_end'],
rel.metadata['chunk1'],
rel.metadata['entity2'],
rel.metadata['entity2_begin'],
rel.metadata['entity2_end'],
rel.metadata['chunk2'],
rel.metadata['confidence']
))
rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence'])
rel_df.confidence = rel_df.confidence.astype(float)
return rel_df
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="VuNbl1nKIpao" outputId="7732f3e6-4474-44df-ab7f-c3e4f01a080e"
annotations = era_pipeline.fullAnnotate(text)
rel_df = get_relations_df (annotations, 'clinical_relations')
rel_df
# + colab={"base_uri": "https://localhost:8080/"} id="BloDyf6ZIxY0" outputId="8e8089de-aa9f-48fa-e52c-de30ce177e04"
annotations[0]['clinical_relations']
# + [markdown] id="TFBWJmXlMue8"
# ## 3.explain_clinical_doc_ade
#
# A pipeline for `Adverse Drug Events (ADE)` with `ner_ade_healthcare`, and `classifierdl_ade_biobert`. It will extract `ADE` and `DRUG` clinical entities, and then assign ADE status to a text(`True` means ADE, `False` means not related to ADE). Also extracts relations between `DRUG` and `ADE` entities (`1` means the adverse event and drug entities are related, `0` is not related).
# + colab={"base_uri": "https://localhost:8080/"} id="oYaFInzXM0Gw" outputId="a2d12a69-1e82-4738-f324-8005ed892e6d"
ade_pipeline = PretrainedPipeline('explain_clinical_doc_ade', 'en', 'clinical/models')
# + colab={"base_uri": "https://localhost:8080/"} id="D4MaO6YIsbS5" outputId="a1c252f6-0fc9-4b6e-94c4-8495624c046a"
result = ade_pipeline.fullAnnotate("The main adverse effects of Leflunomide consist of diarrhea, nausea, liver enzyme elevation, hypertension, alopecia, and allergic skin reactions.")
result[0].keys()
# + colab={"base_uri": "https://localhost:8080/"} id="ACvoR5uZfV1x" outputId="ac4a6b4d-c1a2-477f-b6a6-1bbda9ce1685"
result[0]['class'][0].metadata
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="uiZWyLFuum9g" outputId="36030cf5-f4ce-42ea-c22e-2c7be1a216ca"
text = "Jaw,neck, low back and hip pains. Numbness in legs and arms. Took about a month for the same symptoms to begin with Vytorin. The pravachol started the pains again in about 3 months. I stopped taking all statin drungs. Still hurting after 2 months of stopping. Be careful taking this drug."
import pandas as pd
chunks = []
entities = []
begin =[]
end = []
print ('sentence:', text)
print()
result = ade_pipeline.fullAnnotate(text)
print ('ADE status:', result[0]['class'][0].result)
print ('prediction probability>> True : ', result[0]['class'][0].metadata['True'], \
'False: ', result[0]['class'][0].metadata['False'])
for n in result[0]['ner_chunks_ade']:
begin.append(n.begin)
end.append(n.end)
chunks.append(n.result)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'entities':entities,
'begin': begin, 'end': end})
df
# + [markdown] id="u3ZaUbUkiF77"
# #### with AssertionDL
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="P9UcxSIBg9C-" outputId="eb2c296f-153c-4d44-bcf4-3ef55567097e"
import pandas as pd
text = """I have an allergic reaction to vancomycin.
My skin has be itchy, sore throat/burning/itchy, and numbness in tongue and gums.
I would not recommend this drug to anyone, especially since I have never had such an adverse reaction to any other medication."""
print (text)
light_result = ade_pipeline.fullAnnotate(text)[0]
chunks=[]
entities=[]
status=[]
for n,m in zip(light_result['ner_chunks_ade_assertion'],light_result['assertion_ade']):
chunks.append(n.result)
entities.append(n.metadata['entity'])
status.append(m.result)
df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status})
df
# + [markdown] id="GhzptcbPvtbw"
# #### with Relation Extraction
# + colab={"base_uri": "https://localhost:8080/", "height": 534} id="FFb1mhTmwEJB" outputId="4ec97f40-207e-49d3-bc4f-15b954f58bec"
import pandas as pd
text = """I have Rhuematoid Arthritis for 35 yrs and have been on many arthritis meds.
I currently am on Relefen for inflamation, Prednisone 5mg, every other day and Enbrel injections once a week.
I have no problems from these drugs. Eight months ago, another doctor put me on Lipitor 10mg daily because my chol was 240.
Over a period of 6 months, it went down to 159, which was great, BUT I started having terrible aching pain in my arms about that time which was radiating down my arms from my shoulder to my hands.
"""
print (text)
results = ade_pipeline.fullAnnotate(text)
rel_pairs=[]
for rel in results[0]["relations_ade_drug"]:
rel_pairs.append((
rel.result,
rel.metadata['entity1'],
rel.metadata['entity1_begin'],
rel.metadata['entity1_end'],
rel.metadata['chunk1'],
rel.metadata['entity2'],
rel.metadata['entity2_begin'],
rel.metadata['entity2_end'],
rel.metadata['chunk2'],
rel.metadata['confidence']
))
rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence'])
rel_df
# + [markdown] id="dbws3XaBhGnJ"
# ## 4.Clinical Deidentification
#
# This pipeline can be used to deidentify PHI information from medical texts. The PHI information will be masked and obfuscated in the resulting text. The pipeline can mask and obfuscate `AGE`, `CONTACT`, `DATE`, `ID`, `LOCATION`, `NAME`, `PROFESSION`, `CITY`, `COUNTRY`, `DOCTOR`, `HOSPITAL`, `IDNUM`, `MEDICALRECORD`, `ORGANIZATION`, `PATIENT`, `PHONE`, `PROFESSION`, `STREET`, `USERNAME`, `ZIP`, `ACCOUNT`, `LICENSE`, `VIN`, `SSN`, `DLN`, `PLATE`, `IPADDR` entities.
# + colab={"base_uri": "https://localhost:8080/"} id="8TS6khzVh3RK" outputId="c8071570-802d-4c31-862d-edcff48d4f34"
deid_pipeline = PretrainedPipeline("clinical_deidentification", "en", "clinical/models")
# + id="YRH9Jz-nh6X1"
deid_res = deid_pipeline.annotate("Record date : 2093-01-13 , <NAME> , M.D . Name : Hendrickson , Ora MR 25 years-old . # 719435 Date : 01/13/93 . Signed by <NAME> . Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street. Phone 302-786-5227.")
# + colab={"base_uri": "https://localhost:8080/"} id="UbKdDN71xUfm" outputId="539c2faa-f0b9-42c3-d7c4-51247200cdb7"
deid_res.keys()
# + id="FKDyFJvT8BAW" outputId="48dbb8d3-8f37-4219-a599-e106d15664b5" colab={"base_uri": "https://localhost:8080/", "height": 396}
pd.set_option("display.max_colwidth", 100)
df = pd.DataFrame(list(zip(deid_res['sentence'], deid_res['masked'], deid_res['obfuscated'])),
columns = ['Sentence','Masked', 'Obfuscated'])
df
# + [markdown] id="QBDG2m3z_A0M"
# ## 5.NER Profiling Pipelines
#
# We can use pretrained NER profiling pipelines for exploring all the available pretrained NER models at once. In Spark NLP we have two different NER profiling pipelines;
#
# - `ner_profiling_clinical` : Returns results for clinical NER models trained with `embeddings_clinical`.
# - `ner_profiling_biobert` : Returns results for clinical NER models trained with `biobert_pubmed_base_cased`.
#
# For more examples, please check [this notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/11.2.Pretrained_NER_Profiling_Pipelines.ipynb).
#
#
#
#
# + [markdown] id="wUoe5mEEO5Fv"
# **`ner_profiling_clinical` Model List**
#
# | index | model | index | model | index | model | index | model |
# |--------:|:----------------------------------|--------:|:-----------------------------|--------:|:-------------------------------|--------:|:--------------------------------|
# | 1 | [ner_ade_clinical](https://nlp.johnsnowlabs.com/2021/04/01/ner_ade_clinical_en.html) | 13 | [nerdl_tumour_demo](https://nlp.johnsnowlabs.com/2021/04/01/nerdl_tumour_demo_en.html) | 25 | [ner_drugs](https://nlp.johnsnowlabs.com/2021/03/31/ner_drugs_en.html) | 37 | [ner_radiology_wip_clinical](https://nlp.johnsnowlabs.com/2021/04/01/ner_radiology_wip_clinical_en.html) |
# | 2 | [ner_posology_greedy](https://nlp.johnsnowlabs.com/2021/03/31/ner_posology_greedy_en.html) | 14 | [ner_deid_subentity_augmented](https://nlp.johnsnowlabs.com/2021/06/30/ner_deid_subentity_augmented_en.html) | 26 | [ner_deid_sd](https://nlp.johnsnowlabs.com/2021/04/01/ner_deid_sd_en.html) | 38 | [ner_clinical](https://nlp.johnsnowlabs.com/2020/01/30/ner_clinical_en.html) |
# | 3 | [ner_risk_factors](https://nlp.johnsnowlabs.com/2021/03/31/ner_risk_factors_en.html) | 15 | [ner_jsl_enriched](https://nlp.johnsnowlabs.com/2021/10/22/ner_jsl_enriched_en.html) | 27 | [ner_posology_large](https://nlp.johnsnowlabs.com/2021/03/31/ner_posology_large_en.html) | 39 | [ner_chemicals](https://nlp.johnsnowlabs.com/2021/04/01/ner_chemicals_en.html) |
# | 4 | [jsl_ner_wip_clinical](https://nlp.johnsnowlabs.com/2021/03/31/jsl_ner_wip_clinical_en.html) | 16 | [ner_genetic_variants](https://nlp.johnsnowlabs.com/2021/06/25/ner_genetic_variants_en.html) | 28 | [ner_deid_large](https://nlp.johnsnowlabs.com/2021/03/31/ner_deid_large_en.html) | 40 | [ner_deid_augmented](https://nlp.johnsnowlabs.com/2021/02/19/ner_deid_synthetic_en.html) |
# | 5 | [ner_human_phenotype_gene_clinical](https://nlp.johnsnowlabs.com/2021/03/31/ner_human_phenotype_gene_clinical_en.html) | 17 | [ner_bionlp](https://nlp.johnsnowlabs.com/2021/03/31/ner_bionlp_en.html) | 29 | [ner_posology](https://nlp.johnsnowlabs.com/2020/04/15/ner_posology_en.html) | 41 | [ner_events_clinical](https://nlp.johnsnowlabs.com/2021/03/31/ner_events_clinical_en.html) |
# | 6 | [jsl_ner_wip_greedy_clinical](https://nlp.johnsnowlabs.com/2021/03/31/jsl_ner_wip_greedy_clinical_en.html) | 18 | [ner_measurements_clinical](https://nlp.johnsnowlabs.com/2021/04/01/ner_measurements_clinical_en.html) | 30 | [ner_deidentify_dl](https://nlp.johnsnowlabs.com/2021/03/31/ner_deidentify_dl_en.html) | 42 | [ner_posology_small](https://nlp.johnsnowlabs.com/2021/03/31/ner_posology_small_en.html) |
# | 7 | [ner_cellular](https://nlp.johnsnowlabs.com/2021/03/31/ner_cellular_en.html) | 19 | [ner_diseases_large](https://nlp.johnsnowlabs.com/2021/04/01/ner_diseases_large_en.html) | 31 | [ner_deid_enriched](https://nlp.johnsnowlabs.com/2021/03/31/ner_deid_enriched_en.html) | 43 | [ner_anatomy_coarse](https://nlp.johnsnowlabs.com/2021/03/31/ner_anatomy_coarse_en.html) |
# | 8 | [ner_cancer_genetics](https://nlp.johnsnowlabs.com/2021/03/31/ner_cancer_genetics_en.html) | 20 | [ner_radiology](https://nlp.johnsnowlabs.com/2021/03/31/ner_radiology_en.html) | 32 | [ner_bacterial_species](https://nlp.johnsnowlabs.com/2021/04/01/ner_bacterial_species_en.html) | 44 | [ner_human_phenotype_go_clinical](https://nlp.johnsnowlabs.com/2020/09/21/ner_human_phenotype_go_clinical_en.html) |
# | 9 | [jsl_ner_wip_modifier_clinical](https://nlp.johnsnowlabs.com/2021/04/01/jsl_ner_wip_modifier_clinical_en.html) | 21 | [ner_deid_augmented](https://nlp.johnsnowlabs.com/2021/03/31/ner_deid_augmented_en.html) | 33 | [ner_drugs_large](https://nlp.johnsnowlabs.com/2021/03/31/ner_drugs_large_en.html) | 45 | [ner_jsl_slim](https://nlp.johnsnowlabs.com/2021/08/13/ner_jsl_slim_en.html) |
# | 10 | [ner_drugs_greedy](https://nlp.johnsnowlabs.com/2021/03/31/ner_drugs_greedy_en.html) | 22 | [ner_anatomy](https://nlp.johnsnowlabs.com/2021/03/31/ner_anatomy_en.html) | 34 | [ner_clinical_large](https://nlp.johnsnowlabs.com/2021/03/31/ner_clinical_large_en.html) | 46 | [ner_jsl](https://nlp.johnsnowlabs.com/2021/06/24/ner_jsl_en.html) |
# | 11 | [ner_deid_sd_large](https://nlp.johnsnowlabs.com/2021/04/01/ner_deid_sd_large_en.html) | 23 | [ner_chemprot_clinical](https://nlp.johnsnowlabs.com/2021/03/31/ner_chemprot_clinical_en.html) | 35 | [jsl_rd_ner_wip_greedy_clinical](https://nlp.johnsnowlabs.com/2021/04/01/jsl_rd_ner_wip_greedy_clinical_en.html) | 47 | [ner_jsl_greedy](https://nlp.johnsnowlabs.com/2021/06/24/ner_jsl_greedy_en.html) |
# | 12 | [ner_diseases](https://nlp.johnsnowlabs.com/2021/03/31/ner_diseases_en.html) | 24 | [ner_posology_experimental](https://nlp.johnsnowlabs.com/2021/09/01/ner_posology_experimental_en.html) | 36 | [ner_medmentions_coarse](https://nlp.johnsnowlabs.com/2021/04/01/ner_medmentions_coarse_en.html) | 48 | [ner_events_admission_clinical](https://nlp.johnsnowlabs.com/2021/03/31/ner_events_admission_clinical_en.html) |
#
# **`ner_profiling_BERT` Model List**
#
#
#
# | index | model | index | model | index | model |
# |--------:|:-----------------------|--------:|:---------------------------------|--------:|:------------------------------|
# | 1 | [ner_cellular_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_cellular_biobert_en.html) | 8 | [ner_jsl_enriched_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_jsl_enriched_biobert_en.html) | 15 | [ner_posology_large_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_posology_large_biobert_en.html) |
# | 2 | [ner_diseases_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_diseases_biobert_en.html) | 9 | [ner_human_phenotype_go_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_human_phenotype_go_biobert_en.html) | 16 | [jsl_rd_ner_wip_greedy_biobert](https://nlp.johnsnowlabs.com/2021/07/26/jsl_rd_ner_wip_greedy_biobert_en.html) |
# | 3 | [ner_events_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_events_biobert_en.html) | 10 | [ner_deid_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_deid_biobert_en.html) | 17 | [ner_posology_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_posology_biobert_en.html) |
# | 4 | [ner_bionlp_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_bionlp_biobert_en.html) | 11 | [ner_deid_enriched_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_deid_enriched_biobert_en.html) | 18 | [jsl_ner_wip_greedy_biobert](https://nlp.johnsnowlabs.com/2021/07/26/jsl_ner_wip_greedy_biobert_en.html) |
# | 5 | [ner_jsl_greedy_biobert](https://nlp.johnsnowlabs.com/2021/08/13/ner_jsl_greedy_biobert_en.html) | 12 | [ner_clinical_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_clinical_biobert_en.html) | 19 | [ner_chemprot_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_chemprot_biobert_en.html) |
# | 6 | [ner_jsl_biobert](https://nlp.johnsnowlabs.com/2021/09/05/ner_jsl_biobert_en.html) | 13 | [ner_anatomy_coarse_biobert](https://nlp.johnsnowlabs.com/2021/03/31/ner_anatomy_coarse_biobert_en.html) | 20 | [ner_ade_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_ade_biobert_en.html) |
# | 7 | [ner_anatomy_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_anatomy_biobert_en.html) | 14 | [ner_human_phenotype_gene_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_human_phenotype_gene_biobert_en.html) | 21 | [ner_risk_factors_biobert](https://nlp.johnsnowlabs.com/2021/04/01/ner_risk_factors_biobert_en.html) |
# + colab={"base_uri": "https://localhost:8080/"} id="y5VEZIdBINmn" outputId="821f8354-1e0f-44f8-b13e-ea2c4fb20881"
from sparknlp.pretrained import PretrainedPipeline
clinical_profiling_pipeline = PretrainedPipeline("ner_profiling_clinical", "en", "clinical/models")
# + id="8GYCnYmMnU-H"
text = '''A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation , associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting .'''
# + colab={"base_uri": "https://localhost:8080/"} id="5GJFg2AKpxam" outputId="31e2a70e-858e-4ae2-e4c4-ba3ea7f984b7"
clinical_result = clinical_profiling_pipeline.fullAnnotate(text)[0]
clinical_result.keys()
# + id="C99PTQiNKZtw"
import pandas as pd
def get_token_results(light_result):
tokens = [j.result for j in light_result["token"]]
sentences = [j.metadata["sentence"] for j in light_result["token"]]
begins = [j.begin for j in light_result["token"]]
ends = [j.end for j in light_result["token"]]
model_list = [ a for a in light_result.keys() if (a not in ["sentence", "token"] and "_chunks" not in a)]
df = pd.DataFrame({'sentence':sentences, 'begin': begins, 'end': ends, 'token':tokens})
for model_name in model_list:
temp_df = pd.DataFrame(light_result[model_name])
temp_df["jsl_label"] = temp_df.iloc[:,0].apply(lambda x : x.result)
temp_df = temp_df[["jsl_label"]]
# temp_df = get_ner_result(model_name)
temp_df.columns = [model_name]
df = pd.concat([df, temp_df], axis=1)
return df
# + colab={"base_uri": "https://localhost:8080/", "height": 557} id="trBEKiZRLiff" outputId="a3a42ed6-6ece-47<PASSWORD>"
get_token_results(clinical_result)
# + [markdown] id="P40wIaZ3GDwI"
# ## 6.NER Model Finder Pretrained Pipeline
# `ner_model_finder` pretrained pipeline trained with bert embeddings that can be used to find the most appropriate NER model given the entity name.
# + colab={"base_uri": "https://localhost:8080/"} id="SLqkp-9NGJG5" outputId="4d064739-9c05-4501-fe37-92dced879af5"
from sparknlp.pretrained import PretrainedPipeline
finder_pipeline = PretrainedPipeline("ner_model_finder", "en", "clinical/models")
# + colab={"base_uri": "https://localhost:8080/"} id="5PGY9hNAGJrI" outputId="3df25ce2-87aa-4c92-ff96-36ddbfd97d10"
result = finder_pipeline.fullAnnotate("oncology")[0]
result.keys()
# + [markdown] id="aR5Y9XU7Gjg1"
# From the metadata in the 'model_names' column, we'll get to the top models to the given 'oncology' entity and oncology related categories.
# + id="ARzyEhjUGQba"
df= pd.DataFrame(zip(result["model_names"][0].metadata["all_k_resolutions"].split(":::"),
result["model_names"][0].metadata["all_k_results"].split(":::")), columns=["category", "top_models"])
# + colab={"base_uri": "https://localhost:8080/", "height": 319} id="rY5UVmpyGQXF" outputId="c011e61c-b853-42bf-d32d-fc13ba0e696b"
df.head()
# + [markdown] id="WJUJhRsDN0P4"
# ## 7.ICD10CM to Snomed Code
#
# This pretrained pipeline maps ICD10CM codes to SNOMED codes without using any text data. You’ll just feed a comma or white space delimited ICD10CM codes and it will return the corresponding SNOMED codes as a list. For the time being, it supports 132K Snomed codes and will be augmented & enriched in the next releases.
# + colab={"base_uri": "https://localhost:8080/"} id="URlYyaQnPA1E" outputId="5072c183-4db3-43d5-c7c2-625c5e13c920"
icd_snomed_pipeline = PretrainedPipeline("icd10cm_snomed_mapping", "en", "clinical/models")
# + colab={"base_uri": "https://localhost:8080/"} id="jhqXGDTSTaEW" outputId="323e8bfd-cf2c-4dfe-92ea-fc4cde25ce70"
icd_snomed_pipeline.model.stages
# + colab={"base_uri": "https://localhost:8080/"} id="pExgipi5O7vY" outputId="28d5a9b2-d124-41f4-a087-ad689734da7b"
icd_snomed_pipeline.annotate('M89.50 I288 H16269')
# + [markdown] id="hrgvx5lTXwEt"
# |**ICD10CM** | **Details** |
# | ---------- | -----------:|
# | M89.50 | Osteolysis, unspecified site |
# | I288 | Other diseases of pulmonary vessels |
# | H16269 | Vernal keratoconjunctivitis, with limbar and corneal involvement, unspecified eye |
#
# | **SNOMED** | **Details** |
# | ---------- | -----------:|
# | 733187009 | Osteolysis following surgical procedure on skeletal system |
# | 449433008 | Diffuse stenosis of left pulmonary artery |
# | 51264003 | Limbal AND/OR corneal involvement in vernal conjunctivitis |
# + [markdown] id="kG1whe5KPVH5"
# ## 8.Snomed to ICD10CM Code
# This pretrained pipeline maps SNOMED codes to ICD10CM codes without using any text data. You'll just feed a comma or white space delimited SNOMED codes and it will return the corresponding candidate ICD10CM codes as a list (multiple ICD10 codes for each Snomed code). For the time being, it supports 132K Snomed codes and 30K ICD10 codes and will be augmented & enriched in the next releases.
# + colab={"base_uri": "https://localhost:8080/"} id="Gk5fU0J0Rzw0" outputId="89b2b50c-5df7-4523-9162-54d9c4e3d241"
snomed_icd_pipeline = PretrainedPipeline("snomed_icd10cm_mapping","en","clinical/models")
# + colab={"base_uri": "https://localhost:8080/"} id="m85pQn8lSJ8l" outputId="7e9a59f8-8e4f-4b53-d00e-0612ecb9c1bb"
snomed_icd_pipeline.model.stages
# + colab={"base_uri": "https://localhost:8080/"} id="AivemfSaRdjD" outputId="d3492ae8-f18b-4048-83ee-af385371e9ac"
snomed_icd_pipeline.annotate('733187009 449433008 51264003')
# + [markdown] id="W618yxZYbs5U"
# | **SNOMED** | **Details** |
# | ------ | ------:|
# | 733187009| Osteolysis following surgical procedure on skeletal system |
# | 449433008 | Diffuse stenosis of left pulmonary artery |
# | 51264003 | Limbal AND/OR corneal involvement in vernal conjunctivitis|
#
# | **ICDM10CM** | **Details** |
# | ---------- | ---------:|
# | M89.59 | Osteolysis, multiple sites |
# | M89.50 | Osteolysis, unspecified site |
# | M96.89 | Other intraoperative and postprocedural complications and disorders of the musculoskeletal system |
# | Q25.6 | Stenosis of pulmonary artery |
# | I28.8 | Other diseases of pulmonary vessels |
# | H10.45 | Other chronic allergic conjunctivitis |
# | H10.1 | Acute atopic conjunctivitis |
# | H16.269 | Vernal keratoconjunctivitis, with limbar and corneal involvement, unspecified eye |
# + [markdown] id="ubrDs3qa7gab"
# Also you can find these healthcare code mapping pretrained pipelines here: [Healthcare_Codes_Mapping](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/11.1.Healthcare_Code_Mapping.ipynb)
#
# - ICD10CM to UMLS
# - Snomed to UMLS
# - RxNorm to UMLS
# - RxNorm to MeSH
# - MeSH to UMLS
# - ICD10 to ICD9
# + id="ZeTD6qHwGJn8"
| tutorials/Certification_Trainings/Healthcare/11.Pretrained_Clinical_Pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tfgpu
# language: python
# name: tfgpu
# ---
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.applications import imagenet_utils
# %config Completer.use_jedi = False
tf.__version__
image1 = cv2.imread("dog.jpg",1)
image2 = cv2.imread("cat.jpg",1)
image3 = cv2.imread("traffic.jpg",1)
image1 = cv2.resize(image1, (224,224))
plt.imshow(image1)
image2 = cv2.resize(image2, (224,224))
plt.imshow(image2)
image3 = cv2.resize(image3, (224,224))
plt.imshow(image3)
model = tf.keras.applications.mobilenet_v2.MobileNetV2()
# +
resized_image1 = np.array(image1)
final_image1 = np.expand_dims(resized_image1, axis=0)
final_image1 = tf.keras.applications.mobilenet.preprocess_input(final_image1)
# -
resized_image1.shape
final_image1.shape
predictions1 = model.predict(final_image1)
print(predictions1)
results1 = imagenet_utils.decode_predictions(predictions1)
print(results1)
# +
resized_image2 = np.array(image2)
final_image2 = np.expand_dims(resized_image2, axis=0)
final_image2 = tf.keras.applications.mobilenet.preprocess_input(final_image2)
# -
final_image2.shape
predictions2 = model.predict(final_image2)
results2 = imagenet_utils.decode_predictions(predictions2)
print(results2)
# +
resized_image3 = np.array(image3)
final_image3 = np.expand_dims(resized_image3, axis=0)
final_image3 = tf.keras.applications.mobilenet.preprocess_input(final_image3)
# -
final_image3.shape
predictions3 = model.predict(final_image3)
results3 = imagenet_utils.decode_predictions(predictions3)
print(results3)
final_image3 = tf.keras.applications.mobilenet_v2.preprocess_input(final_image3)
predictions3 = model.predict(final_image3)
results3 = imagenet_utils.decode_predictions(predictions3)
print(results3)
| image_classfication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dianafvx17/daa_2021_1/blob/master/07Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KkE7nL56ShLg"
# #Busqueda lineal
# dado un conjunto de datos no ordenados, la busqueda lineal consiste en recorrer el conjunto de datos desde el inicio al final, moviendose de uno en uno hasta encontrar el elemento o llegar al final del conjunto
#
# datos = [ 4,18,47,2,34,14,78,12,48,21,31,19,1,3,5]
#
# #Busqueda Binaria
# funciona sobre un conjunto de datos lineal ordenado, consiste en dividir el conjunto en mitades y buscar en esa mitad si el elemento buscado, no esta en la mitad, preguntas si el elmento esta a la derecha o a la izquierda. haces la lista igual a la mitad correstondientes y repites el proceso.
# L = [ 4,18,47,2,34,14,78,12,48,21,31,19,1,3,5]
# derecha = longitud(L)-1
# izquierda = 0
# medio = apuntara a la mitad del segmento de busqueda
#
# buscado: es el valor a buscar
# 1. hacer derecha = longitud(L)-1
# 2. hacer izquierda=0
# 3. si izquierda > derecha significa que hay un error en los datos
# 4. calcular medio = int((izquierda + derecha)/2)
# 5. mientras L[medio] != buscado hacer
# 6. - preguntar L[medio] > buscado
# - hacer derecho = medio
# - de lo contrario
# - hacer izquierda = medio
# - preguntar (derecho- izquierdo) % 2
# - medio = (izquierdo+((derecho- izquierdo)/2))+1
# - de lo contrario
# - medio=izquierdo + ((derecho-izquierdo)/2)
# 7. return medio
# + id="BBdT5L-aSfVp" outputId="afde981d-240d-4b25-a434-ca202ec6363b" colab={"base_uri": "https://localhost:8080/"}
"""busqueda lineal: regresa la posicion del elemento buscando si se encuentra dentro de la lista.
regresa-1 si el elemnto buscado no existe dentro de la lista"""
def busq_lineal(L, buscado):
indice=-1
for idx in range(len(L)):
contador += 1
if L[idx]==buscado:
indice=idx
break
print(f"numero de comparaciones realizadas={contador}")
return indice
"""Busqueda lineal"""
def main():
datos = [ 4,18,47,2,34,14,78,12,48,21,31,19,1,3,5]
dato = int(input("¿qué valor deseas buscar? "))
resultado = busq_lineal(datos,dato)
print("resultado: ", resultado)
print("busqueda lineal en una lista ordenada")
datos.sort()
print(datos)
resultado=busq_lineal(datos, dato)
print(resultado)
main()
# + id="xhkuWPTMe1Fz"
"""Busqueda Binaria"""
def busq_binaria( L , buscado):
indice = -1
IZQ = 0
DER = len(L)-1
MID = 0
while not(IZQ > DER):
MID = (IZQ + DER)//2
if(L[MID] < buscado ):
IZQ = MID + 1
else:
DER = MID - 1
print(f"Comparar buscado:{buscado} con {L[MID]}")
if L[MID] == buscado:
indice = MID
break
return indice
def main():
datos = [ 4,18,47,2,34,14,78,12,48,21,31,19,1,3,5]
dato = int(input("¿qué valor deseas buscar? "))
print("busqueda binaria")
posicion= busq_binaria(datos, dato)
print(f"el elemento {dato} esta en la posicion {posicision} de la lista")
main()
# + id="7-q3Eer9jZfc"
"Busqueda binaria recursiva"
def busqueda_binaria_recursiva( L , buscado, IZQ , DER):
MID = (IZQ + DER) //2
if IZQ >= DER:
return -1
if L[MID] == buscado:
return MID
elif L[MID] < buscado:
return busqueda_binaria_recursiva(L,buscado,MID+1,DER)
else:
return busqueda_binaria_recursiva(L,buscado,IZQ,MID)
| 07Octubre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Libraries
import os
import pandas as pd
from tqdm.notebook import tqdm
import shutil
import librosa
import numpy as np
import librosa.display
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import keras
from keras.models import Model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D, Conv2D, MaxPooling2D
import time
from keras.utils import np_utils
from keras import backend as K
from sklearn.manifold import TSNE
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
import seaborn as sns
from sklearn.decomposition import PCA
import gzip
from io import StringIO
import glob
from sklearn.model_selection import train_test_split
# +
train_mfcc_features = np.load('train_mfccs_DCASE_2017.npy')
validation_mfcc_features = np.load('validation_mfccs_DCASE_2017.npy')
train_labels = np.load('train_labels_DCASE_2017.npy')
validation_labels =np.load('validation_labels_DCASE_2017.npy')
train_label_encoder = LabelEncoder().fit(train_labels)
train_labels = train_label_encoder.transform(train_labels)
classes_train = list(train_label_encoder.classes_)
validation_label_encoder = LabelEncoder().fit(validation_labels)
validation_labels = validation_label_encoder.transform(validation_labels)
classes_validation = list(validation_label_encoder.classes_)
# -
classes_train
train_scaled = []
for i in range(len(train_mfcc_features)):
scaler_train = StandardScaler().fit(train_mfcc_features[i])
scaled_train = scaler_train.transform(train_mfcc_features[i])
train_scaled.append(scaled_train)
validation_scaled = []
for i in range(len(validation_mfcc_features)):
scaler_validation = StandardScaler().fit(validation_mfcc_features[i])
scaled_validation = scaler_validation.transform(validation_mfcc_features[i])
validation_scaled.append(scaled_validation)
validation_scaled[1].shape
# +
X_train = train_scaled
y_train = np_utils.to_categorical(train_labels)
X_val = validation_scaled
y_val = np_utils.to_categorical(validation_labels)
# +
print(len(X_train))
print(X_train[1].shape)
print(X_train[1].mean(axis=1).shape)
print(X_train[1].std(axis=1).shape)
X_train_mean = []
X_train_std = []
for i in range(len(X_train)):
X_train_mean.append(X_train[i].mean(axis=1))
X_train_std.append(X_train[i].std(axis=1))
print(len(X_train_mean))
print(len(X_train_std))
X_train_new = np.concatenate((X_train_mean, X_train_std), axis=1)
# -
X_train_new.shape
# +
print(len(X_val))
print(X_val[1].shape)
print(X_val[1].mean(axis=1).shape)
print(X_val[1].std(axis=1).shape)
X_val_mean = []
X_val_std = []
for i in range(len(X_val)):
X_val_mean.append(X_val[i].mean(axis=1))
X_val_std.append(X_val[i].std(axis=1))
print(len(X_val_mean))
print(len(X_val_std))
X_val_new = np.concatenate((X_val_mean, X_val_std), axis=1)
# -
# # 1D Convolutional Parameters
# +
NB_ROWS = 26 # corresponds to number of mfccs
NB_COLS = 1 # correponds to the size of mfccs[i] (one mfcc)
X_train = np.array(X_train_new)
X_val = np.array(X_val_new)
# Reshaping to perform 1D convolution
X_train_1D_cnn = X_train.reshape(X_train.shape[0], NB_ROWS, NB_COLS)
X_test_1D_cnn = X_val.reshape(X_val.shape[0], NB_ROWS, NB_COLS)
y_train_1D_cnn_hot = y_train
y_test_1D_cnn_hot = y_val
print("[INFO] X_train_1D_cnn.shape = {}, y_train_1D_cnn_hot.shape = {} ".format(X_train_1D_cnn.shape,
y_train_1D_cnn_hot.shape))
print("[INFO] X_test_1D_cnn.shape = {}, y_test_1D_cnn_hot.shape = {} ".format(X_test_1D_cnn.shape,
y_test_1D_cnn_hot.shape))
# debug
print("[DEBUG] y_train_1D_cnn_hot.shape = {}, y_train_1D_cnn_hot[0] = {}".format(y_train_1D_cnn_hot.shape,
y_train_1D_cnn_hot[0]))
print("[DEBUG] y_test_1D_cnn_hot.shape = {}, y_test_1D_cnn_hot[0] = {}".format(y_test_1D_cnn_hot.shape,
y_test_1D_cnn_hot[0]))
# -
def construct_model(input_shape=(26, 1), num_classes = 3):
model = Sequential()
model.add(Conv1D(32, kernel_size=2, activation='relu', input_shape=input_shape))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv1D(32, kernel_size=2, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(32, kernel_size=2, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.25))
# model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax', name="output_layer"))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
return model
# +
epochs = 100
batch_size = 32
verbose = 1
num_classes = 3
input_shape = (NB_ROWS, NB_COLS)
outputFolder = "D:\\Extrasensory_Journal"
# Create the model and fit it
start = time.time()
model = construct_model(input_shape=input_shape, num_classes=num_classes)
filepath = outputFolder + "\\DCASE_2017_1D-mfccs-weights.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=verbose,
save_best_only=True,
save_weights_only=True,
mode='auto')
earlystop = EarlyStopping(monitor='val_loss',
patience=7,
verbose=verbose,
mode='auto')
callbacks_list = [checkpoint, earlystop]
history = model.fit(X_train_1D_cnn,
y_train_1D_cnn_hot,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks_list,
verbose=verbose,
validation_data=(X_test_1D_cnn, y_test_1D_cnn_hot))
end = time.time()
print("[INFO] Model trained in {} ms".format((end-start)))
model.save('DCASE_2017_mfccs_1D_k4.h5')
# +
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.figure(dpi=120)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.figure(dpi=120)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
print("Training accuracy: %.2f%% / Validation accuracy: %.2f%%" %
(100*history.history['accuracy'][-1], 100*history.history['val_accuracy'][-1]))
# -
# # Confusion Matrix DCASE_2017 Development 1D MFCC (avg+std)
model = load_model('DCASE_2017_mfccs_1D.h5')
X_val_new = X_val.reshape(1173,26,1)
Y_prediction = model.predict(X_val_new)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_prediction,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_val,axis = 1)
# compute the confusion matrix
target_names = ['indoor', 'outdoor', 'vehicle']
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
Y_prediction
sns.heatmap(confusion_mtx, annot=True, fmt="d", xticklabels=target_names, yticklabels=target_names, cbar=False)
plt.autoscale()
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
# xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
cm = confusion_matrix(Y_true, Y_pred_classes)
cm
# Plot non-normalized confusion matrix
plot_confusion_matrix(Y_true, Y_pred_classes, classes=target_names,
title='Confusion matrix, without normalization')
plt.autoscale()
# Plot normalized confusion matrix
plot_confusion_matrix(Y_true, Y_pred_classes, classes=target_names, normalize=True,
title='Normalized confusion matrix')
plt.autoscale()
print("F1 Score: ", f1_score(Y_true, Y_pred_classes, average="macro"))
print("Precision Score: ", precision_score(Y_true, Y_pred_classes, average="macro"))
print("Recall Score: ", recall_score(Y_true, Y_pred_classes, average="macro"))
print(classification_report(Y_true, Y_pred_classes, target_names=target_names, digits=4))
# # PCA plots
new_model = load_model('DCASE_2017_mfccs_1D_k4.h5')
new_model.save_weights('weights_same_filter.h5')
new_model.load_weights('weights_same_filter.h5')
new_model.summary()
auto_features = new_model.layers[5].output[0]
auto_features.shape
auto_feats_df = pd.DataFrame(auto_features)
print(auto_feats_df.head())
auto_feats_df.to_csv('auto_train_features_same' + '.csv.gz',compression='gzip', index=False, header=None)
X_val = X_val.reshape(X_val.shape[0], -1)
X_val.shape
# +
pca = PCA(n_components=3)
pca_result = pca.fit_transform(X_val)
print('Variance PCA: {}'.format(np.sum(pca.explained_variance_ratio_)))
Xax=pca_result[:,0]
Yax=pca_result[:,1]
Zax=pca_result[:,2]
labels=['indoor', 'outdoor', 'vehicle']
cdict={0:'blue', 1:'orange', 2:'green'}
labl={0:'indoor', 1:'outdoor', 2:'vehicle'}
marker={0:'*', 1:'o', 2:'+'}
alpha={0:.3, 1:.5, 2:.7}
y_val = np.argmax(y_val, axis = 1)
fig, (ax1, ax2) = plt.subplots(1, 2)
for g in np.unique(y_val):
ix = np.where(y_val == g)
ax1.scatter(Xax[ix], Yax[ix], c = cdict[g], label = labl[g], s = 10)
ax2.scatter(Xax[ix], Zax[ix], c = cdict[g], label = labl[g], s = 10)
ax1.legend()
ax2.legend()
ax1.set_xlabel("First Principal Component",fontsize=10)
ax1.set_ylabel("Second Principal Component",fontsize=10)
ax2.set_xlabel("First Principal Component",fontsize=10)
ax2.set_ylabel("Third Principal Component",fontsize=10)
plt.tight_layout()
# -
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(X_val)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
sns.scatterplot(X_val[:,0], X_val[:,1], legend='full')
# # 2D Convolution Parameters
# +
NB_ROWS = 13 # corresponds to number of mfccs
NB_COLS = 431 # correponds to the size of mfccs[i] (one mfcc)
CHANNEL = 1 # the number of channels
X_train = np.array(X_train)
X_val = np.array(X_val)
# Reshaping to perform 2D convolution
X_train_2D_cnn = X_train.reshape(X_train.shape[0], NB_ROWS, NB_COLS, CHANNEL)
X_test_2D_cnn = X_val.reshape(X_val.shape[0], NB_ROWS, NB_COLS, CHANNEL)
y_train_2D_cnn_hot = y_train
y_test_2D_cnn_hot = y_val
print("[INFO] X_train_2D_cnn.shape = {}, y_train_2D_cnn_hot.shape = {} ".format(X_train_2D_cnn.shape,
y_train_2D_cnn_hot.shape))
print("[INFO] X_test_2D_cnn.shape = {}, y_test_2D_cnn_hot.shape = {} ".format(X_test_2D_cnn.shape,
y_test_2D_cnn_hot.shape))
# debug
print("[DEBUG] y_train_2D_cnn_hot.shape = {}, y_train_2D_cnn_hot[0] = {}".format(y_train_2D_cnn_hot.shape,
y_train_2D_cnn_hot[0]))
print("[DEBUG] y_test_2D_cnn_hot.shape = {}, y_test_2D_cnn_hot[0] = {}".format(y_test_2D_cnn_hot.shape,
y_test_2D_cnn_hot[0]))
# -
def construct_model(input_shape=(13, 431, 1), num_classes = 3):
model = Sequential()
model.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=input_shape))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(48, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(120, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.25))
# model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax', name="output_layer"))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
return model
# +
epochs = 100
batch_size = 32
verbose = 1
num_classes = 3
input_shape = (NB_ROWS, NB_COLS, CHANNEL)
outputFolder = "D:\\Extrasensory_Journal"
# Create the model and fit it
start = time.time()
model = construct_model(input_shape=input_shape, num_classes=num_classes)
filepath = outputFolder + "\\DCASE_2017-mfccs-weights.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=verbose,
save_best_only=True,
save_weights_only=True,
mode='auto')
earlystop = EarlyStopping(monitor='val_loss',
patience=7,
verbose=verbose,
mode='auto')
callbacks_list = [checkpoint, earlystop]
history = model.fit(X_train_2D_cnn,
y_train_2D_cnn_hot,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks_list,
verbose=verbose,
validation_data=(X_test_2D_cnn, y_test_2D_cnn_hot))
end = time.time()
print("[INFO] Model trained in {} ms".format((end-start)))
model.save('DCASE_2017_mfccs.h5')
# +
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.figure(dpi=120)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.figure(dpi=120)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
print("Training accuracy: %.2f%% / Validation accuracy: %.2f%%" %
(100*history.history['accuracy'][-1], 100*history.history['val_accuracy'][-1]))
# -
# # Extrasensory Dataset
# +
labels_dir = r"D:\Extrasensory_Journal\labels"
total_files = glob.glob(labels_dir + "\\" + "*.gz")
uuid_list = []
for i in range(len(total_files)):
uuid = total_files[i][31:67]
uuid_list.append(uuid)
user_data_file = labels_dir + '\\' + '%s.features_labels.csv.gz' % uuid_list
# -
user_data_file
file_names = os.listdir(labels_dir + '\\')
user_ids = [fn.split(".")[0] for fn in file_names]
user_ids
activity_labels_name = ['label:BICYCLING', 'label:SLEEPING', 'label:LAB_WORK',
'label:IN_CLASS', 'label:IN_A_MEETING', 'label:LOC_main_workplace',
'label:OR_indoors', 'label:OR_outside', 'label:IN_A_CAR',
'label:ON_A_BUS', 'label:DRIVE_-_I_M_THE_DRIVER',
'label:DRIVE_-_I_M_A_PASSENGER', 'label:LOC_home',
'label:FIX_restaurant', 'label:COOKING', 'label:SHOPPING',
'label:BATHING_-_SHOWER', 'label:CLEANING', 'label:DOING_LAUNDRY',
'label:WASHING_DISHES', 'label:WATCHING_TV',
'label:SURFING_THE_INTERNET', 'label:AT_A_PARTY', 'label:AT_A_BAR',
'label:LOC_beach', 'label:COMPUTER_WORK', 'label:EATING', 'label:TOILET',
'label:AT_THE_GYM', 'label:ELEVATOR', 'label:AT_SCHOOL']
def clean_labels(labels_df):
labels = []
for ind, row in labels_df.iterrows():
max_label = np.argmax(row)
if np.isnan(np.max(row)):
label = max_label
else:
if ":" in max_label:
label = max_label.split(":")[1]
else:
label=max_label
labels.append(label)
return pd.Series(labels)
# +
total_files = []
for i in tqdm(range(len(user_ids))):
with gzip.open(labels_dir + '\\' + '%s.features_labels.csv.gz' % uuid_list[i], 'rb') as fid:
csv_str = fid.read()
csv_str = csv_str.decode("utf-8")
df = pd.read_csv(StringIO(csv_str), sep=",")
aud_col_name = [col for col in df.columns if "audio_naive" in col]
label_col_name = [col for col in df.columns if "label" in col]
aud_features_df = df[df.columns.intersection(aud_col_name)]
aud_labels_df = df[df.columns.intersection(activity_labels_name)]
null_ind = []
for ind, row in aud_features_df.iterrows():
if row.isnull().sum() > 0:
null_ind.append(ind)
aud_features_df.drop(aud_features_df.index[null_ind])
features_df = df[df.columns.intersection(aud_col_name)]
labels_df = clean_labels(df[df.columns.intersection(activity_labels_name)])
user_df = pd.concat((features_df, labels_df), axis=1)
user_df = user_df.rename(columns={0:'label'})
user_df = user_df.dropna(subset=aud_col_name)
total_files.append(user_df)
# -
len(total_files)
total_files[12]
df_with_nan = []
df_without_nans = []
for i, df in enumerate(total_files):
if df.isnull().values.any():
df_with_nan.append(i)
else:
df_without_nans.append(i)
df_with_nan
total_files = [total_files[i] for i in df_without_nans]
for i in range(len(total_files)):
print(total_files[i].isnull().values.any())
final_files = pd.concat(total_files)
final_files['label'].unique()
len(final_files['label'].unique())
# +
final_files['label'] = final_files['label'].replace(['IN_A_MEETING', 'LOC_main_workplace', 'SLEEPING', 'OR_indoors', 'LOC_home',
'IN_CLASS', 'EATING', 'COOKING', 'LAB_WORK', 'COMPUTER_WORK',
'AT_SCHOOL', 'SURFING_THE_INTERNET', 'WATCHING_TV', 'DOING_LAUNDRY',
'WASHING_DISHES', 'CLEANING', 'FIX_restaurant', 'AT_A_PARTY',
'ELEVATOR', 'TOILET'],
['indoor', 'indoor', 'indoor', 'indoor', 'indoor', 'indoor', 'indoor', 'indoor',
'indoor', 'indoor', 'indoor', 'indoor', 'indoor',
'indoor', 'indoor', 'indoor', 'indoor', 'indoor', 'indoor', 'indoor'])
final_files['label'] = final_files['label'].replace(['BICYCLING', 'ON_A_BUS', 'IN_A_CAR', 'DRIVE_-_I_M_A_PASSENGER',
'DRIVE_-_I_M_THE_DRIVER'],
['vehicle', 'vehicle', 'vehicle', 'vehicle', 'vehicle'])
final_files['label'] = final_files['label'].replace(['OR_outside', 'SHOPPING', 'LOC_beach'],
['outdoor', 'outdoor', 'outdoor'])
# -
final_files['label'].unique()
y_test = final_files.iloc[:,-1]
y_test
X_test = final_files.iloc[:, 0:26]
X_test
X_test_scaled = StandardScaler().fit_transform(X_test.values)
X_test_scaled
X_test_scaled.shape
test_label_encoder = LabelEncoder().fit(y_test)
test_labels = train_label_encoder.transform(y_test)
classes_test = list(test_label_encoder.classes_)
classes_test
y_test_new = np_utils.to_categorical(test_labels)
X_test_new = X_test_scaled.reshape(len(X_test_scaled),26,1)
Y_prediction = model.predict(X_test_new)
Y_pred_classes = np.argmax(Y_prediction, axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_test_new, axis = 1)
# compute the confusion matrix
target_names = ['indoor', 'outdoor', 'vehicle']
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
sns.heatmap(confusion_mtx, annot=True, fmt="d", xticklabels=target_names, yticklabels=target_names, cbar=False)
plt.autoscale()
print("F1 Score: ", f1_score(Y_true, Y_pred_classes, average="macro"))
print("Precision Score: ", precision_score(Y_true, Y_pred_classes, average="macro"))
print("Recall Score: ", recall_score(Y_true, Y_pred_classes, average="macro"))
print(classification_report(Y_true, Y_pred_classes, target_names=target_names, digits=4))
print("Accuracy: ", accuracy_score(Y_true, Y_pred_classes))
# Plot normalized confusion matrix
plot_confusion_matrix(Y_true, Y_pred_classes, classes=target_names, normalize=True,
title='Normalized confusion matrix')
plt.autoscale()
x_train, x_test, y_train, y_test = train_test_split(X_test_scaled, y_test_new, test_size=0.1, shuffle=False)
x_train = x_train.reshape(x_train.shape[0], NB_ROWS, NB_COLS)
x_test = x_test.reshape(x_test.shape[0], NB_ROWS, NB_COLS)
model_new = load_model("DCASE_2017_mfccs_1D.h5")
score = model_new.evaluate(x_test, y_test, verbose=0)
print('Test loss 1:', score[0])
print('Test accuracy 1:', score[1])
model_new.fit(x_train, y_train,batch_size=32,epochs=1,verbose=0)
score = model_new.evaluate(x_test, y_test, verbose=0)
print('Test loss 2:', score[0])
print('Test accuracy 2:', score[1])
Y_prediction = model_new.predict(x_test)
Y_pred_classes = np.argmax(Y_prediction, axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_test, axis = 1)
# compute the confusion matrix
target_names = ['indoor', 'outdoor', 'vehicle']
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
sns.heatmap(confusion_mtx, annot=True, fmt="d", xticklabels=target_names, yticklabels=target_names, cbar=False)
plt.autoscale()
print("F1 Score: ", f1_score(Y_true, Y_pred_classes, average="macro"))
print("Precision Score: ", precision_score(Y_true, Y_pred_classes, average="macro"))
print("Recall Score: ", recall_score(Y_true, Y_pred_classes, average="macro"))
print(classification_report(Y_true, Y_pred_classes, target_names=target_names))
print("Accuracy: ", accuracy_score(Y_true, Y_pred_classes))
# Plot normalized confusion matrix
plot_confusion_matrix(Y_true, Y_pred_classes, classes=target_names, normalize=True,
title='Normalized confusion matrix')
plt.autoscale()
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
clf_RF = RandomForestClassifier()
len(x_train)
clf_RF.fit(x_train, y_train)
y_pred = clf_RF.predict(x_test)
clf_RF.score(x_test, y_test)
Y_pred_classes = np.argmax(y_pred, axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_test, axis = 1)
# compute the confusion matrix
target_names = ['indoor', 'outdoor', 'vehicle']
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
print(classification_report(Y_true, Y_pred_classes, target_names=target_names))
sns.heatmap(confusion_mtx, annot=True, fmt="d", xticklabels=target_names, yticklabels=target_names, cbar=False)
plt.autoscale()
# Plot normalized confusion matrix
plot_confusion_matrix(Y_true, Y_pred_classes, classes=target_names, normalize=True,
title='Normalized confusion matrix')
plt.autoscale()
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import GradientBoostingClassifier
rc_rf = GradientBoostingClassifier()
eclf = VotingClassifier(estimators=[('1D_CNN', rc_rf), ('rf', clf_RF)], voting='hard')
eclf
y_test1 = np.argmax(y_test, axis=1)
np.unique(y_test1)
.fit(x_train, y_train)
def read_raw_user_data(uuid, timestamps):
postfix = ".sound.mfcc"
user_data_dir = uuid
total_files = []
for timestamp in timestamps:
file_name = uuid + "\\" + ("%s" % timestamp) + postfix
raw_file = np.fromfile(file_name)
total_files.append(raw_file)
return total_files
audio_features = r"D:\Extrasensory_Journal\audio_naive"
uuid = audio_features + '\\' + '%s' % uuid_list[0]
timestamps = df_final['timestamp']
final_files = read_raw_user_data(uuid, timestamps)
# +
fold = 0
print("Starting fold: ", fold)
train_df_fold_iphone = pd.read_csv("es_cv_5_folds/fold_" + str(fold) + "_train_iphone_uuids.txt", names=['UUID'])
train_df_fold_android = pd.read_csv("es_cv_5_folds/fold_" + str(fold) + "_train_android_uuids.txt", names=['UUID'])
train_df_fold = pd.concat([train_df_fold_iphone, train_df_fold_android])
train_uuids = train_df_fold['UUID'].values.tolist()
test_df_fold_iphone = pd.read_csv("es_cv_5_folds/fold_" + str(fold) + "_test_iphone_uuids.txt", names=['UUID'])
test_df_fold_android = pd.read_csv("es_cv_5_folds/fold_" + str(fold) + "_test_android_uuids.txt", names=['UUID'])
test_df_fold = pd.concat([test_df_fold_iphone, test_df_fold_android])
test_uuids = test_df_fold['UUID'].values.tolist()
# -
test_uuids
len(final_files)
test_file = np.fromfile("D:\\Extrasensory_Journal\\audio_naive\\00EABED2-271D-49D8-B599-1D4A09240601\\1444079161.sound.mfcc")
test_file.shape
431*13
len(final_files[124])
| AUDIO/Inference on Extrasensory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(R.matlab)
library(wavelets)
library(tuneR)
library(seewave)
library(data.table)
library(constellation)
library(GENEAread)
# +
baseName <- "frame"
wavesName = c("8PSK", "16QAM", "64QAM", "BPSK", "CPFSK", "GFSK", "QPSK", "PAM4")
dffile <- data.frame(modulation=wavesName)
timeDomain_columns <- paste('timeDomain', c(1:1024), sep='_')
dffile[, timeDomain_columns] <- NA
fourierDomain_columns <- paste('fourierDomain', c(1:1024), sep='_')
dffile[, fourierDomain_columns] <- NA
waveletsDomain_columns <- c("wavelets_max_serie_1", "wavelets_min_serie_1" ,"wavelets_mean_serie_1", "wavelets_1st_Qu_serie_1" , "wavelets_2nd_Qu_serie_1", "wavelets_3rd_Qu_serie_1",
"wavelets_max_serie_2", "wavelets_min_serie_2" ,"wavelets_mean_serie_2", "wavelets_1st_Qu_serie_2" , "wavelets_2nd_Qu_serie_2", "wavelets_3rd_Qu_serie_2",
"wavelets_max_serie_3", "wavelets_min_serie_3" ,"wavelets_mean_serie_3", "wavelets_1st_Qu_serie_3" , "wavelets_2nd_Qu_serie_3", "wavelets_3rd_Qu_serie_3")
dffile[, waveletsDomain_columns] <- NA
zeroCrossX_columns <- paste('zeroCross_X_', c(1:1024), sep='_')
dffile[, zeroCrossX_columns] <- NA
zeroCrossY_columns <- paste('zeroCross_Y_', c(1:1024), sep='_')
dffile[, zeroCrossY_columns] <- NA
autoCorrelation_columns <- paste('autoCorrelation_', c(1:1024), sep='_')
dffile[, autoCorrelation_columns] <- NA
cs_current_columns <- paste('cs_current_', c(1:1024), sep='_')
dffile[, cs_current_columns] <- NA
cs_prior_columns <- paste('cs_prior_', c(1:1024), sep='_')
dffile[, cs_prior_columns] <- NA
s_ww_columns <- c("sDomain_max", "sDomain_min" ,"sDomain_mean", "sDomain_1st_Qu" , "sDomain_2nd_Qu", "sDomain_3rd_Qu")
dffile[, s_ww_columns] <- NA
names_idx=1
row_idx = 1
for(i in 1:1000) {
for (w in wavesName) {
n <- formatC(i, width=3, flag="0")
name <- paste(baseName, w, n, ".mat", sep="")
pathfile <- paste("../dataset/origin/", name, sep="")
signal <- readMat(pathfile)
values <- signal['frame']$frame
if(i > 1){
dffile[row_idx, 'modulation'] <- wavesName[names_idx]
names_idx = names_idx + 1
if(names_idx == 8) names_idx=1
}
#Time Domain
t=seq(0,1023)
s = Mod(values) * cos((pi*2.5*t) + Arg(values))
dffile[row_idx, timeDomain_columns] <- s
#Frequency Domain (Fourier)
f_s <- fft(s)
f_s_abs <- abs(f_s)
dffile[row_idx, fourierDomain_columns] <- f_s_abs
#Frequency Domain (Wavelets)
s_wavelets <- dwt(s, filter="la8", n.levels=3, boundary="periodic", fast=TRUE)
s_ww <- c(max(s_wavelets@W$W1), min(s_wavelets@W$W1), mean(s_wavelets@W$W1), quantile(s_wavelets@W$W1, probs = c(0.25, 0.5, 0.75), names=FALSE),
max(s_wavelets@W$W2), min(s_wavelets@W$W2), mean(s_wavelets@W$W2), quantile(s_wavelets@W$W2, probs = c(0.25, 0.5, 0.75), names=FALSE),
max(s_wavelets@W$W3), min(s_wavelets@W$W3), mean(s_wavelets@W$W3), quantile(s_wavelets@W$W3, probs = c(0.25, 0.5, 0.75), names=FALSE))
dffile[row_idx, waveletsDomain_columns] <- s_ww
#Zero-Cossing
s_wave <- Wave(left=s, samp.rate=2500000000, bit=8)
s_zc <- zc(s_wave, plot=FALSE)
s_zc_x <- s_zc[,1]
s_zc_y <- s_zc[,2]
dffile[row_idx, zeroCrossX_columns] <- s_zc_x
dffile[row_idx, zeroCrossY_columns] <- s_zc_y
#autocorrelation
ac <- acf(s, lag = 1023, plot=FALSE)
dffile[row_idx, autoCorrelation_columns] <- ac$acf
#Constellation Shape Features
tt <- seq(from = as.POSIXct("00:00:00", format="%H:%M:%S", tz="UTC"), length.out = 1024, by = "mins")
DF = data.table( ID = 1, TIME = tt, VALUE = s)
DT_C_SHAPE <- value_change(DF, value = 1.5, direction = "up", window_hours = 18, join_key = "ID", time_var = "TIME", value_var = "VALUE.V1", mult = "all")
cs_current <- DT_C_SHAPE$CURRENT_VALUE[c(1:1024)]
cs_prior <- DT_C_SHAPE$PRIOR_VALUE[c(1:1024)]
dffile[row_idx, cs_current_columns] <- cs_current
dffile[row_idx, cs_prior_columns] <- cs_prior
#S Transform
s_s <- stft(s, plot = FALSE, reassign = TRUE, win = 100)
s_s_vector <- as.vector(s_s[[1]])
s_s_statista <- c(max(s_s_vector), min(s_s_vector), mean(s_s_vector), quantile(s_s_vector, probs = c(0.25, 0.5, 0.75), names=FALSE))
dffile[row_idx, s_ww_columns] <- s_s_statista
row_idx <- row_idx + 1
}
}
dffile[is.na(data)] <- 0
write.csv(dffile, "../dataset/transform/data.csv", row.names = FALSE)
# -
dfData <- read.csv("../dataset/transform/data100.csv")
dfData
| notebook/transformData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="xu2SVpFJjmJr"
# # DeepDreaming with TensorFlow
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": []} colab_type="code" executionInfo={"elapsed": 371, "status": "ok", "timestamp": 1457963606294, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "1269ead540f76ce5", "userId": "108092561333339272254"}, "user_tz": -60} id="jtD9nb-2QgkY" outputId="b935629b-8608-45c1-942f-612b7dbb13d3"
# boilerplate code
from __future__ import print_function
import os
from io import BytesIO
import numpy as np
from functools import partial
import PIL.Image
from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
# +
# #!wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip inception5h.zip
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": []} colab_type="code" executionInfo={"elapsed": 2264, "status": "ok", "timestamp": 1457962713799, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "761b412462cda2d0", "userId": "108092561333339272254"}, "user_tz": -60} id="1kJuJRLiQgkg" outputId="d2aaf8cc-91e1-4864-8cf8-0aef612db1d6"
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
# + [markdown] colab_type="text" id="eJZVMSmiQgkp"
# To take a glimpse into the kinds of patterns that the network learned to recognize, we will try to generate images that maximize the sum of activations of particular channel of a particular convolutional layer of the neural network. The network we explore contains many convolutional layers, each of which outputs tens to hundreds of feature channels, so we have plenty of patterns to explore.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}]} colab_type="code" executionInfo={"elapsed": 1198, "status": "ok", "timestamp": 1457962715078, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "761b412462cda2d0", "userId": "108092561333339272254"}, "user_tz": -60} id="LrucdvgyQgks" outputId="5936270b-5da8-4825-b2e9-145c494d36e6"
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# Visualizing the network graph. Be sure expand the "mixed" nodes to see their
# internal structure. We are going to visualize "Conv2D" nodes.
tmp_def = rename_nodes(graph_def, lambda s:"/".join(s.split('_',1)))
#show_graph(tmp_def)
# + [markdown] colab_type="text" id="Nv2JqNLBhy1j"
# <a id='naive'></a>
# ## Naive feature visualization
# + [markdown] colab_type="text" id="6LXaGEJkQgk4"
# Let's start with a naive way of visualizing these. Image-space gradient ascent!
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}]} colab_type="code" executionInfo={"elapsed": 3, "status": "ok", "timestamp": 1457962479327, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "761b412462cda2d0", "userId": "108092561333339272254"}, "user_tz": -60} id="ZxC_XGGXQgk7" outputId="1c971a74-bf65-4069-cfd0-1473aa909a83"
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed5a_1x1_pre_relu'
channel = 134 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 1)*255)
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for i in range(iter_n):
g, score = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print(score, end = ' ')
clear_output()
showarray(visstd(img))
#render_naive(T(layer)[:,:,:,channel])
# + [markdown] colab_type="text" id="ZroBKE5YiDsb"
# <a id="multiscale"></a>
# ## Multiscale image generation
#
# Looks like the network wants to show us something interesting! Let's help it. We are going to apply gradient ascent on multiple scales. Details formed on smaller scale will be upscaled and augmented with additional details on the next scale.
#
# With multiscale image generation it may be tempting to set the number of octaves to some high value to produce wallpaper-sized images. Storing network activations and backprop values will quickly run out of GPU memory in this case. There is a simple trick to avoid this: split the image into smaller tiles and compute each tile gradient independently. Applying random shifts to the image before every iteration helps avoid tile seams and improves the overall image quality.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 464, "status": "ok", "timestamp": 1457963844162, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "1269ead540f76ce5", "userId": "108092561333339272254"}, "user_tz": -60} id="2iwWSOgsQglG" outputId="221dae81-914b-4167-eb49-26ef2d431a66"
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}]} colab_type="code" executionInfo={"elapsed": 127, "status": "ok", "timestamp": 1457963487829, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "1269ead540f76ce5", "userId": "108092561333339272254"}, "user_tz": -60} id="GRCJdG8gQglN" outputId="7e21352d-9131-4f81-a52f-912b2e299475"
def render_multiscale(t_obj, img0=img_noise, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print('.', end = ' ')
clear_output()
#showarray(visstd(img))
#render_multiscale(T(layer)[:,:,:,channel])
# + [markdown] colab_type="text" id="mDSZMtVYQglV"
# <a id="laplacian"></a>
# ## Laplacian Pyramid Gradient Normalization
#
# This looks better, but the resulting images mostly contain high frequencies. Can we improve it? One way is to add a smoothness prior into the optimization objective. This will effectively blur the image a little every iteration, suppressing the higher frequencies, so that the lower frequencies can catch up. This will require more iterations to produce a nice image. Why don't we just boost lower frequencies of the gradient instead? One way to achieve this is through the [Laplacian pyramid](https://en.wikipedia.org/wiki/Pyramid_%28image_processing%29#Laplacian_pyramid) decomposition. We call the resulting technique _Laplacian Pyramid Gradient Normalization_.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 512, "status": "ok", "timestamp": 1457963876373, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "1269ead540f76ce5", "userId": "108092561333339272254"}, "user_tz": -60} id="Do3WpFSUQglX" outputId="99835b80-ed6f-47a5-85c3-c77bd55d7b17"
k = np.float32([1,4,6,4,1])
k = np.outer(k, k)
k5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)
def lap_split(img):
'''Split the image into lo and hi frequency components'''
with tf.name_scope('split'):
lo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')
lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])
hi = img-lo2
return lo, hi
def lap_split_n(img, n):
'''Build Laplacian pyramid with n splits'''
levels = []
for i in range(n):
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def lap_merge(levels):
'''Merge Laplacian pyramid'''
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi
return img
def normalize_std(img, eps=1e-10):
'''Normalize image by making its standard deviation = 1.0'''
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img/tf.maximum(std, eps)
def lap_normalize(img, scale_n=4):
'''Perform the Laplacian pyramid normalization.'''
img = tf.expand_dims(img,0)
tlevels = lap_split_n(img, scale_n)
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0,:,:,:]
# Showing the lap_normalize graph with TensorBoard
lap_graph = tf.Graph()
with lap_graph.as_default():
lap_in = tf.placeholder(np.float32, name='lap_in')
lap_out = lap_normalize(lap_in)
#show_graph(lap_graph)
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}]} colab_type="code" executionInfo={"elapsed": 17273, "status": "ok", "timestamp": 1457964054088, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "12341152118244997759", "photoUrl": "https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/s128/photo.jpg", "sessionId": "1269ead540f76ce5", "userId": "108092561333339272254"}, "user_tz": -60} id="zj8Ms-WqQgla" outputId="aa54c6c3-bf38-4054-f3f4-a5c82218e251"
def render_lapnorm(t_obj, img0=img_noise, visfunc=visstd,
iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# build the laplacian normalization graph
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0.copy()
for octave in range(octave_n):
#print('resizing', img, img.shape)
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
#print(np.int32(hw))
img = resize(img, np.int32(hw), order=3,
clip=False, preserve_range=True)
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
g = lap_norm_func(g)
img += g*step
print('.', end = ' ')
clear_output()
showarray(visfunc(img))
return img[np.newaxis]
# 195 & 188
# channel & 189
#render_lapnorm(T(layer)[:,:,:,channel]+T(layer)[:,:,:,189])
# + [markdown] colab_type="text" id="YzXJUF2lQgln"
# <a id="playing"></a>
# ## Playing with feature visualizations
#
# We got a nice smooth image using only 10 iterations per octave. In case of running on GPU this takes just a few seconds. Let's try to visualize another channel from the same layer. The network can generate wide diversity of patterns.
# -
import matplotlib.pyplot as plt
# %matplotlib inline
img2 = render_lapnorm(T(layer)[:, :, :, 195])
def fractal_zoom(img0=img_noise, visfunc=visstd,
n_iterations=50, gif_step=5, crop=1, neuron_1=195, neuron_2=188, gif_name='6-fractal.gif'):
img_copy = img_noise.copy()
img_copy = img_copy[np.newaxis]
imgs = []
n_img, height, width, ch = img_copy.shape
for it_i in range(n_iterations):
print(it_i, end=', ')
img_copy = render_lapnorm(T(layer)[:,:,:,neuron_1]+T(layer)[:,:,:,neuron_2], img0=img_copy[0])
#print(img_copy.shape)
# Crop a 1 pixel border from height and width
img_copy = img_copy[:, crop:-crop, crop:-crop, :]
# Resize (Note: in the lecture, we used scipy's resize which
# could not resize images outside of 0-1 range, and so we had
# to store the image ranges. This is a much simpler resize
# method that allows us to `preserve_range`.)
#print('resizing', img_copy)
img_copy = resize(img_copy[0], (height, width), order=3,
clip=False, preserve_range=True
)[np.newaxis].astype(np.float32)
if it_i % gif_step == 0 and it_i < 5:
imgs.append(visstd(img_copy[0]))
gif.build_gif(imgs, saveto=gif_name)
fractal_zoom(img0=img_copy[0], gif_name="1.gif", n_iterations=2000)
fractal_zoom(neuron_1=channel, neuron_2=189, gif_name="2.gif", n_iterations=2000)
# + [markdown] colab_type="text" id="ka6RyOMEnrB5"
# Lower layers produce features of lower complexity.
# -
import IPython.display as ipyd
ipyd.Image(url='6-fractal.gif', height=300, width=300)
| deepdream.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import nltk
nltk.download("vader_lexicon")
import nltk.sentiment.util
import nltk.sentiment.vader
vader = nltk.sentiment.vader.SentimentIntensityAnalyzer()
df = pd.read_csv("tweets_GroundTruth+java.txt", delimiter="\t", header=None, index_col=0)
df.columns = ["mean", "text", "javaVader_value"]
df["vader"] = df["text"].apply(lambda x: vader.polarity_scores(x)["compound"])
df["norm_mean"] = df["mean"] / max(abs(df["mean"].min()), df["mean"].max())
plt.scatter(x=df["vader"], y=df["mean"])
plt.scatter(x=df["vader"], y=df["norm_mean"])
plt.scatter(x=df["javaVader_value"], y=df["norm_mean"])
plt.scatter(x=df["javaVader_value"], y=df["vader"])
# +
r = np.corrcoef(x=df["vader"], y=df["mean"])[0,1]
print(r, r**2)
# +
r = np.corrcoef(x=df["javaVader_value"], y=df["mean"])[0,1]
print(r, r**2)
# -
df["diffs"] = df["javaVader_value"] - df["vader"]
df[df.diffs.abs() > 0.1]
| validation/Vader_JavaVsPython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.__version__
# import os
# os.path.abspath(pd.__file__)
# %matplotlib inline
from sklearn import datasets, linear_model
import numpy as np
import matplotlib.pyplot as plt
# Set plot font to Times New Roman
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams['font.size'] = 18
colors = ['#1f78b4', '#7d4d4c','#fcc2c1']
outdir = 'graphs/'
date_prefix='06142018'
sampling_types = ['bandit False', 'uniform True', 'uniform False']
sampling_type_names = ['MAB','Revise always', 'Revise if different']
combinedDfFile = '/Users//banditalgorithms/matlabScripts/CombinedDfMatlabProcessedSwitchToBest.csv'
df = pd.read_csv(combinedDfFile)
mask = (df.loc[:,'overall_sampling_type'] == 'bandit False') & (df.loc[:,'reward_type'] == 'normal')
df.loc[(df.overall_sampling_type == 'bandit False') & (df.reward_type == 'normal'),:].sort_values('total_size')
# +
def plot_avg_reward_gain(df, reward_type, ax, series_name, ylabel="", hasLegend=True, legend_anchor=(0.55, 1.55), stdError=False):
df = df.loc[df.reward_type == reward_type,:]
width = 0.2 # the width of the bars
ind = np.arange(4)-width # the x locations for the groups
for index,sampling_type in zip(range(len(sampling_types)),sampling_types):
cur_series = df.loc[(df.overall_sampling_type == sampling_type),:].sort_values('total_size')
p1 = ax.bar(ind + width*index, cur_series.loc[:,'mean_' + series_name],
width, color = colors[index] ,
yerr=cur_series.loc[:,'sem_' + series_name])
# womenMeans = (145*cm, 149*cm, 172*cm, 165*cm, 200*cm)
# womenStd = (30*cm, 25*cm, 20*cm, 31*cm, 22*cm)
# p2 = ax.bar(ind + width, womenMeans, width,
# color='y', bottom=0*cm, yerr=womenStd)
# ax.set_title('Scores by group and gender')
# ax.set_xticks(ind + width / 2)
# ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
# ax.legend((p1[0], p2[0]), ('Men', 'Women'))
# ax.yaxis.set_units(inch)
# ax.autoscale_view()
# m = df[["Optimal Reward"]+list(mean_names_lst)]
# std = df[list(std_names_lst)+['or_std']]
# std.columns = list(mean_names_lst)+['Optimal Reward']
# m.plot.bar(ax=ax, yerr=std, rot=0, color=colors)
ax.set_xlabel('Number of students')
ax.set_xticks(ind+width)
xticks = ['m','2m','6m','11m']
# xticks[1] = 'Moderate'
ax.set_xticklabels(xticks)
ax.set_ylabel(ylabel)
# if hasLegend:
# leg = ax.legend(ncol=5, bbox_to_anchor=legend_anchor, fontsize=16)
# leg.get_texts()[len(leg.get_texts()) - 1].set_text('Uniform assignment')
# else:
# ax.legend_.remove()
# if reward_type == 'binary':
# ax.set_title("Average Reward\n(Binary rewards)")
# elif reward_type == 'normal':
# ax.set_title("Average Reward\n(Norm.-dist. rewards)")
return ax
# +
figure = plt.figure(figsize = (16, 4))
legend_height = 1.85
stdError = True
# Bar for Binary Reward
combinedDfFile = '/Users//banditalgorithms/matlabScripts/CombinedDfMatlabProcessedSwitchToBest.csv'
df = pd.read_csv(combinedDfFile)
df['mean_proportion_arm_2'] = df.mean_sample_size_2 / (df.mean_sample_size_2 + df.mean_sample_size_1)
df['sem_proportion_arm_2'] = 0
ax = figure.add_subplot(1, 4, 1)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student',hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Binary rewards")
ax.legend(sampling_type_names,ncol=3, bbox_to_anchor=(2.4, 1.57))
plt.gcf().text(0.15,.96,'Average Rewards per Student', fontsize=24)
ax = figure.add_subplot(1, 4, 2)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student', hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Normally-dist. rewards")
ax = figure.add_subplot(1, 4, 3)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students',hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Binary rewards")
ax.set_ylim([0, .6])
plt.gcf().text(.63,.96,'Students in Less Effective Condition', fontsize=24)
ax = figure.add_subplot(1, 4, 4)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students', hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Normally-dist. rewards")
ax.set_ylim([0, .6])
# figure.subplots_adjust(wspace = .3)
starting_label_x = 0.005
label_y = .1
label_x_increment = 0.245
for i in range(4):
plt.gcf().text(starting_label_x+i*label_x_increment, label_y, \
'(' + chr(ord('a')+i) + ')', fontsize=18)
figure.tight_layout()
figure.show()
figure.savefig(outdir+date_prefix+'SwitchToBestGraph.pdf', bbox_inches='tight')
print(outdir+date_prefix+'SwitchToBestGraph.pdf')
# +
figure = plt.figure(figsize = (12, 8))
legend_height = 1.85
stdError = True
# Bar for Binary Reward
combinedDfFile = '/Users//banditalgorithms/matlabScripts/CombinedDfMatlabProcessedSwitchToBest.csv'
df = pd.read_csv(combinedDfFile)
df['mean_proportion_arm_2'] = df.mean_sample_size_2 / (df.mean_sample_size_2 + df.mean_sample_size_1)
df['sem_proportion_arm_2'] = 0
ax = figure.add_subplot(2, 2, 1)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student',
hasLegend=True, legend_anchor=(0.55, 0))
ax.set_title("Binary rewards")
ax.legend(sampling_type_names,ncol=3, bbox_to_anchor=(2, 1.75))
# plt.gcf().text(0.15,.96,'Average Rewards per Student', fontsize=24)
ax = figure.add_subplot(2, 2, 2)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student',
hasLegend=False, legend_anchor=(0.55, 1.55))
ax.set_title("Normally distributed rewards")
ax = figure.add_subplot(2, 2, 3)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students',
hasLegend=False, legend_anchor=(0.55, 1.55))
ax.set_title("Binary rewards")
ax.set_ylim([0, .6])
# plt.gcf().text(.63,.96,'Students in Less Effective Condition', fontsize=24)
ax = figure.add_subplot(2, 2, 4)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students',
hasLegend=False, legend_anchor=(0.55, 1.55))
ax.set_title("Normally distributed rewards")
ax.set_ylim([0, .6])
starting_label_x = 0.005
label_y = .1
label_x_increment = 0.245
# for i in range(4):
# plt.gcf().text(starting_label_x+i*label_x_increment, label_y, \
# '(' + chr(ord('a')+i) + ')', fontsize=18)
figure.tight_layout()
figure.subplots_adjust(wspace = .5, hspace = .7)
figure.show()
save_date_prefix = "190617"
figure.savefig(outdir+save_date_prefix+'SwitchToBestGraph.pdf', bbox_inches='tight')
print(outdir+save_date_prefix+'SwitchToBestGraph.pdf')
# +
figure = plt.figure(figsize = (16, 4))
legend_height = 1.85
stdError = True
# Bar for Binary Reward
combinedDfFile = '/Users//banditalgorithms/matlabScripts/CombinedDfMatlabProcessedSwitchToBest.csv'
df = pd.read_csv(combinedDfFile)
df['mean_proportion_arm_2'] = df.mean_sample_size_2 / (df.mean_sample_size_2 + df.mean_sample_size_1)
df['sem_proportion_arm_2'] = 0
ax = figure.add_subplot(1, 4, 1)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student',hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Binary rewards")
ax.legend(sampling_type_names,ncol=3, bbox_to_anchor=(2.4, 1.57))
plt.gcf().text(0.15,.96,'Average Rewards per Student', fontsize=24)
ax = figure.add_subplot(1, 4, 2)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'avg_reward_per_step', ylabel='Reward per student', hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Normally-dist. rewards")
ax = figure.add_subplot(1, 4, 3)
ax = plot_avg_reward_gain(df, 'binary', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students',hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Binary rewards")
ax.set_ylim([0, .6])
plt.gcf().text(.63,.96,'Students in Less Effective Condition', fontsize=24)
ax = figure.add_subplot(1, 4, 4)
ax = plot_avg_reward_gain(df, 'normal', ax, series_name = 'proportion_arm_2', ylabel='Proportion of students', hasLegend=True, legend_anchor=(0.55, 1.55))
ax.set_title("Normally-dist. rewards")
ax.set_ylim([0, .6])
# figure.subplots_adjust(wspace = .3)
starting_label_x = 0.005
label_y = .1
label_x_increment = 0.245
for i in range(4):
plt.gcf().text(starting_label_x+i*label_x_increment, label_y, \
'(' + chr(ord('a')+i) + ')', fontsize=18)
figure.tight_layout()
figure.show()
figure.savefig(outdir+date_prefix+'SwitchToBestGraph.pdf', bbox_inches='tight')
print(outdir+date_prefix+'SwitchToBestGraph.pdf')
| PostDiffMixture/simulations_folder/simulation_analysis_saves/banditsGraphs/BN_three_arms_switchToBest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
march = pd.read_csv('march.csv', names = ['region', 'day', 'rec', 'work'])
april = pd.read_csv('april.csv', names = ['region', 'day', 'rec', 'work'])
may = pd.read_csv('may.csv', names = ['region', 'day', 'rec', 'work'])
june = pd.read_csv('june.csv', names = ['region', 'day', 'rec', 'work'])
# +
print(np.shape(march))
print(np.shape(april))
print(np.shape(may))
print(np.shape(june))
print(np.shape(march.region))
print(357./7.)
# +
i=0
marrecavgs = []
marworkavgs = []
aprrecavgs = []
aprworkavgs = []
mayworkavgs = []
mayrecavgs = []
junrecavgs = []
junworkavgs = []
marrecstate = []
marworkstate = []
aprrecstate = []
aprworkstate = []
mayrecstate = []
mayworkstate = []
junrecstate = []
junworkstate = []
zerolow = 0.
onelow = -26.
twolow = -53.
while i < np.shape(march)[0]:
marwk = march.iloc[i:i+7]
marrecavg = np.mean(marwk.rec)
marworkavg = np.mean(marwk.work)
marrecavgs.append(marrecavg)
marworkavgs.append(marworkavg)
if marrecavg > zerolow:
marrecstate.append(3)
elif (marrecavg < zerolow) & (marrecavg > onelow):
marrecstate.append(2)
elif (marrecavg < onelow) & (marrecavg > twolow):
marrecstate.append(1)
elif marrecavg < twolow:
marrecstate.append(0)
if marworkavg > zerolow:
marworkstate.append(3)
elif (marworkavg < zerolow) & (marworkavg > onelow):
marworkstate.append(2)
elif (marworkavg < onelow) & (marworkavg > twolow):
marworkstate.append(1)
elif marworkavg < twolow:
marworkstate.append(0)
aprwk = april.iloc[i:i+7]
aprrecavg = np.mean(aprwk.rec)
aprworkavg = np.mean(aprwk.work)
aprrecavgs.append(aprrecavg)
aprworkavgs.append(aprworkavg)
if aprrecavg >= zerolow:
aprrecstates.append(3)
elif (aprrecavg < zerolow) & (aprrecavg >= onelow):
aprrecstate.append(2)
elif (aprrecavg < onelow) & (aprrecavg >= twolow):
aprrecstate.append(1)
elif aprrecavg < twolow:
aprrecstate.append(0)
if aprworkavg >= zerolow:
aprworkstate.append(3)
elif (aprworkavg < zerolow) & (aprworkavg >= onelow):
aprworkstate.append(2)
elif (aprworkavg < onelow) & (aprworkavg >= twolow):
aprworkstate.append(1)
elif aprworkavg < twolow:
aprworkstate.append(0)
maywk = may.iloc[i:i+7]
mayrecavg = np.mean(maywk.rec)
mayworkavg = np.mean(maywk.work)
mayrecavgs.append(mayrecavg)
mayworkavgs.append(mayworkavg)
if mayrecavg > zerolow:
mayrecstate.append(3)
elif (mayrecavg < zerolow) & (mayrecavg > onelow):
mayrecstate.append(2)
elif (mayrecavg < onelow) & (mayrecavg > twolow):
mayrecstate.append(1)
elif mayrecavg < twolow:
mayrecstate.append(0)
if mayworkavg > zerolow:
mayworkstate.append(3)
elif (mayworkavg < zerolow) & (mayworkavg > onelow):
mayworkstate.append(2)
elif (mayworkavg < onelow) & (mayworkavg > twolow):
mayworkstate.append(1)
elif mayworkavg < twolow:
mayworkstate.append(0)
junwk = june.iloc[i:i+7]
junrecavg = np.mean(junwk.rec)
junworkavg = np.mean(junwk.work)
junrecavgs.append(junrecavg)
junworkavgs.append(junworkavg)
if junrecavg > zerolow:
junrecstate.append(3)
elif (junrecavg < zerolow) & (junrecavg > onelow):
junrecstate.append(2)
elif (junrecavg < onelow) & (junrecavg > twolow):
junrecstate.append(1)
elif junrecavg < twolow:
junrecstate.append(0)
if junworkavg > zerolow:
junworkstate.append(3)
elif (junworkavg < zerolow) & (junworkavg > onelow):
junworkstate.append(2)
elif (junworkavg < onelow) & (junworkavg > twolow):
junworkstate.append(1)
elif junworkavg < twolow:
junworkstate.append(0)
i += 7
print(len(marrecavgs))
print(len(aprrecavgs))
print(len(mayrecavgs))
print(len(junrecavgs))
print(len(marworkavgs))
print(len(aprworkavgs))
print(len(mayworkavgs))
print(len(junworkavgs))
print(len(marrecstate))
print(len(aprrecstate))
print(len(mayrecstate))
print(len(junrecstate))
print(len(marworkstate))
print(len(aprworkstate))
print(len(mayworkstate))
print(len(junworkstate))
# -
mobility_data_andstates = pd.DataFrame(data={'marrec': marrecavgs, 'aprrec' : aprrecavgs, 'mayrec':mayrecavgs, 'junrec':junrecavgs
,'marwork':marworkavgs, 'aprwork':aprworkavgs, 'maywork':mayworkavgs, 'junwork':junworkavgs
,'marrecstates':marrecstate, 'aprrecstates':aprrecstate, 'mayrecstates':mayrecstate, 'junrecstates':junrecstate
,'marworkstates':marworkstate, 'aprworkstates':aprworkstate, 'mayworkstates':mayworkstate, 'junworkstates':junworkstate})
print(mobility_data_andstates)
# +
#mobility_data_andstates.to_csv('mobility_data_andstates.csv')
# -
| data/make_mobility_states.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
a = np.arange(15).reshape(3,5)
# -
print(a)
# +
import numpy as np
def zeroMatrix(inputMatrix):
aDims = inputMatrix.shape
for row in range(aDims[0]) :
for col in range(aDims[1]) :
if inputMatrix[row][col] == 0:
for icol in range(aDims[1]):
inputMatrix[row][icol] = 0
for irow in range(aDims[0]):
inputMatrix[irow][col] = 0
return
print(inputMatrix)
# -
b = np.arange(12).reshape(4,3)
print(b)
zeroMatrix(b)
print(b)
| zeromatrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# How to create a Deployment
# ==========================
#
# In this notebook, we show you how to create a Deployment with 3 ReplicaSets. These ReplicaSets are owned by the Deployment and are managed by the Deployment controller. We would also learn how to carry out RollingUpdate and RollBack to new and older versions of the deployment.
# + deletable=true editable=true
from kubernetes import client, config
# + [markdown] deletable=true editable=true
# ### Load config from default location
# + deletable=true editable=true
config.load_kube_config()
extension = client.ExtensionsV1beta1Api()
# + [markdown] deletable=true editable=true
# ### Create Deployment object
# + deletable=true editable=true
deployment = client.ExtensionsV1beta1Deployment()
# + [markdown] deletable=true editable=true
# ### Fill required Deployment fields (apiVersion, kind, and metadata)
# + deletable=true editable=true
deployment.api_version = "extensions/v1beta1"
deployment.kind = "Deployment"
deployment.metadata = client.V1ObjectMeta(name="nginx-deployment")
# + [markdown] deletable=true editable=true
# ### A Deployment also needs a .spec section
# + deletable=true editable=true
spec = client.ExtensionsV1beta1DeploymentSpec()
spec.replicas = 3
# + [markdown] deletable=true editable=true
# ### Add Pod template in .spec.template section
# + deletable=true editable=true
spec.template = client.V1PodTemplateSpec()
spec.template.metadata = client.V1ObjectMeta(labels={"app": "nginx"})
spec.template.spec = client.V1PodSpec()
# + [markdown] deletable=true editable=true
# ### Pod template container description
# + deletable=true editable=true
container = client.V1Container()
container.name="nginx"
container.image="nginx:1.7.9"
container. ports = [client.V1ContainerPort(container_port=80)]
# + deletable=true editable=true
spec.template.spec.containers = [container]
deployment.spec = spec
# + [markdown] deletable=true editable=true
# ### Create Deployment
# + deletable=true editable=true
extension.create_namespaced_deployment(namespace="default", body=deployment)
# + [markdown] deletable=true editable=true
# ### Update container image
# + deletable=true editable=true
deployment.spec.template.spec.containers[0].image = "nginx:1.9.1"
# + [markdown] deletable=true editable=true
# ### Apply update (RollingUpdate)
# + deletable=true editable=true
extension.replace_namespaced_deployment(name="nginx-deployment", namespace="default", body=deployment)
# + [markdown] deletable=true editable=true
# ### Create DeploymentRollback object
# This object is used to rollback to a previous version of the deployment.
# + deletable=true editable=true
rollback = client.ExtensionsV1beta1DeploymentRollback()
rollback.api_version = "extensions/v1beta1"
rollback.kind = "DeploymentRollback"
rollback.rollback_to = client.ExtensionsV1beta1RollbackConfig()
rollback.rollback_to.revision = 0
rollback.name = "nginx-deployment"
# + [markdown] deletable=true editable=true
# ### Execute RollBack
# + deletable=true editable=true
extension.create_namespaced_deployment_rollback_rollback(name="nginx-deployment", namespace="default", body=rollback)
# + [markdown] deletable=true editable=true
# ### Delete Deployment
# + deletable=true editable=true
extension.delete_namespaced_deployment(name="nginx-deployment", namespace="default", body=client.V1DeleteOptions(propagation_policy="Foreground", grace_period_seconds=5))
# + deletable=true editable=true
| examples/notebooks/create_deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Copyright (c) Cornac Authors. All rights reserved.*
#
# *Licensed under the Apache 2.0 License.*
#
# # Hyperparameter Search for VAECF
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/PreferredAI/cornac/blob/master/tutorials/param_search_vaecf.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/PreferredAI/cornac/blob/master/tutorials/param_search_vaecf.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# This notebook describes how to perform hyperparameter searches in Cornac. As a running example, we consider the VAECF model and MovieLens 100K dataset.
# ## Setup
import numpy as np
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.hyperopt import Discrete, Continuous
from cornac.hyperopt import GridSearch, RandomSearch
# ## Prepare an experiment
# First, we load our data and instantiate the necessary objects for running an experiment.
# +
# Load MovieLens 100K ratings
ml_100k = movielens.load_feedback(variant="100K")
# Define an evaluation method to split feedback into train, validation and test sets
ratio_split = RatioSplit(data=ml_100k, test_size=0.1, val_size=0.1, seed=123, verbose=False)
# Instantiate Recall@100 for evaluation
rec100 = cornac.metrics.Recall(100)
# Instantiate VACF with fixed hyperparameters
vaecf = cornac.models.VAECF(k=20, h=40, learning_rate=0.005, n_epochs=100, seed=123, verbose=False)
# -
# ## Perform searches for the *beta* parameter
# Assume for now we are interested in determining a good value for the hyperparameter $\beta$, the weight of the KL term in VAECF's objective:
#
# $$ \mathcal{L}(\theta,\phi) = \mathbb{E}_{q_{\phi}(z|r)}[\log{p_{\theta}(r|z)}] - \beta \cdot KL(q_{\phi}(z|r)||p(z)), $$
#
# where $z$ is the latent variable (user representation), and $r$ is the observed user-item feedback.
# ### Wrap vaecf into search methods
# All we need to do is to wrap our instantiated model ``vaecf`` into a search method and specify a search space for *beta*, a metric of interest, as well as the evaluation method. Cornac supports two types of searching methods, namely random search and grid search, we consider both of them here.
# +
# GridSearch
gs_vaecf = GridSearch(
model=vaecf,
space=[
Discrete("beta", np.linspace(0.0, 2.0, 11)),
],
metric=rec100,
eval_method=ratio_split,
)
# RandomSearch
rs_vaecf = RandomSearch(
model=vaecf,
space=[
Continuous("beta", low=0.0, high=2.0),
],
metric=rec100,
eval_method=ratio_split,
n_trails=20,
)
# -
# As evident from above, there are two types of parameter search domain, namely `Discrete` and `Continuous`. More details in the [documentation](https://cornac.readthedocs.io/en/latest/hyperopt.html).
# ### Run an experiment
# Next, we put everything into an experiment and run it. The results on the validation set, as well as the test results corresponding to the best value of *beta* found by each search method (as measured on the validation set in terms of Recall@100) will be displayed. One can print out more information during this step by setting `verbose=True` when instantiating `VAECF`.
cornac.Experiment(
eval_method=ratio_split,
models=[gs_vaecf, rs_vaecf],
metrics=[rec100],
user_based=False,
).run()
# The best *beta* values found by our search methods are as follows,
print('Grid search: beta =', gs_vaecf.best_params.get('beta'))
print('Random search: beta = {:.2f}'.format(rs_vaecf.best_params.get('beta')))
# It is also possible to access the best model through the attribute `best_model`.
# ## Perform searches for multiple parameters
# We can also perform a joint search for multiple parameters. For instance, in addition to *beta*, lets include the number of epochs.
# +
# Instantiate VACF with fixed hyperparameters
vaecf = cornac.models.VAECF(k=20, h=40, learning_rate=0.005, seed=123, verbose=False)
# GridSearch
gs_vaecf = GridSearch(
model=vaecf,
space=[
Discrete("n_epochs", [20, 50, 100]),
Discrete("beta", [0.0, 0.4, 0.8, 1.0, 1.4, 1.8, 2.0]),
],
metric=rec100,
eval_method=ratio_split,
)
# RandomSearch
rs_vaecf = RandomSearch(
model=vaecf,
space=[
Discrete("n_epochs", [20, 50, 100]),
Continuous("beta", low=0.0, high=2.0),
],
metric=rec100,
eval_method=ratio_split,
n_trails=20,
)
# Put everything into an experiment and run it
cornac.Experiment(
eval_method=ratio_split,
models=[gs_vaecf, rs_vaecf],
metrics=[rec100],
user_based=False,
).run()
# -
# The best hyperparameter settings are:
print('Grid search: ', gs_vaecf.best_params)
print('Random search: ', rs_vaecf.best_params)
# The output of this experiment is different from the previous one, due to a "correlation" between the effect of the considered parameters on training. Recall that *beta* can be thought of as a regularization coefficient controlling the effect of the KL term in VAECF. Previously, we fixed the number of epochs to 100, and more regularization seemed to be necessary to avoid overfitting (our searches selected higher beta values). However, the latter case reveals that we may achieve competitive performance with a smaller *beta* if we reduce the number of training iterations.
| tutorials/param_search_vaecf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Atlas API For Data Science Demo
# +
import requests
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import DateFormatter
register_matplotlib_converters()
# -
# ## API Setup
# +
api_key = 'YOUR_API_KEY_HERE'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# -
# ### API Parameters for Crop Health Fetch
geo_id = '19071'
country_iso = 'USA'
start_date = '2018-01-01'
end_date = '2020-03-25'
crop = 'corn'
geo_level = 'level_3'
# Crop health index and the 5 year median
metric = 'TELLUSCHIN,TLCHIN5YMN'
# ### API Request using 'metrics' Endpoint
# You can use this method if you know which metric(s) you want.
crop_health_request = requests.get('https://api.kernel.telluslabs.com/api/v1/metrics/',
params={'geo_id': geo_id,
'country_iso': country_iso,
'geo_level': geo_level,
'start_date': start_date,
'end_date': end_date,
'crop': crop,
'metric_code': metric,
'api_key': api_key},
headers={'Accept': 'text/csv'})
# ### Get Results
# +
crop_health = pd.read_csv(pd.compat.StringIO(crop_health_request.text), sep=",")
crop_health['metric_date']= pd.to_datetime(crop_health['metric_date'])
crop_health.head(10)
# -
# ### Crop Health Curve
# +
# Create figure and plot space
fig, ax = plt.subplots(figsize=(10, 10))
ax.plot("metric_date", 'TELLUSCHIN', data=crop_health, color='teal')
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Crop Health Index",
title="Crop Health Fremont, Iowa"
)
# Define the date format
date_form = DateFormatter("%b-%Y")
ax.xaxis.set_major_formatter(date_form)
plt.show()
# -
# ### Outcomes - Example using the 'metrics' Endpoint
country_iso = 'BRA'
start_date = '2019-12-01'
end_date = '2020-03-31'
crop = 'corn'
geo_level = 'level_1'
in_yield = 'INBRCRM1WK'
# ### API Request
outcomes_request = requests.get('https://api.kernel.telluslabs.com/api/v1/metrics/',
params={'country_iso': country_iso,
'geo_level': geo_level,
'start_date': start_date,
'end_date': end_date,
'crop': crop,
'metric_code': in_yield,
'api_key': api_key},
headers={'Accept': 'text/csv'})
# ### Check which data are returned
# outcomes_request.json()['result']
indigo_yield = pd.read_csv(pd.compat.StringIO(outcomes_request.text), sep=",")
indigo_yield['metric_date']= pd.to_datetime(indigo_yield['metric_date'])
indigo_yield.head(10)
# ### Get the Indigo 2020 Yield Model for Brazil Corn
# ### Plot
# +
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(indigo_yield['metric_date'],indigo_yield['INBRCRM1WK'], color = 'purple')
plt.xticks(rotation=45)
plt.title('BRAZIL Corn Predictions 2020')
plt.show()
# -
# ## Zambia Corn
# ##### Getting data from a specific endpoint and parsing it a little differently as another option.
country_iso = 'ZMB'
start_date = '2010-01-01'
end_date = '2020-04-09'
crop = 'corn'
geo_level = 'level_1'
metric = 'TELLUSCHIN'
# +
crop_health_request = requests.get(' https://api.kernel.telluslabs.com/api/v1/plant_health/',
params={'country_iso': country_iso,
'geo_level': geo_level,
'start_date': start_date,
'end_date': end_date,
'crop': crop,
'metric_code': metric,
'api_key': api_key},
headers = headers)
# -
crop_health = pd.DataFrame.from_records(crop_health_request.json()['result'][0]['data'][metric], columns=['metric_date', 'CHI'])
crop_health['metric_date']= pd.to_datetime(crop_health['metric_date'])
crop_health.head(10)
# +
# Create figure and plot space
fig, ax = plt.subplots(figsize=(10, 10))
ax.plot("metric_date", 'CHI', data=crop_health, color='teal')
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Crop Health Index",
title="Crop Health - Zambia Corn"
)
# Define the date format
date_form = DateFormatter("%b-%Y")
ax.xaxis.set_major_formatter(date_form)
plt.show()
| notebooks/Atlas_API_DataScience_Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
import math
import random
import pandas as pd
import numpy as np
#import plotnine as p9
# -
# Specify path and file name of file containing list of file names with metadata:
# +
filenames_path = r"C:\Users\m229246\OneDrive - AZCollaboration\Documents\!Other Data\!2015 - Pharm Dev\!Work Items\Brilinta Particle Size\Data\!TEXT FILES - RAW DATA/"
#file_path = r"C:\Users\m229246\OneDrive - AZCollaboration\Documents\!Other Data\!2015 - Pharm Dev\!Work Items\Brilinta Particle Size\Data\!TEXT FILES - RAW DATA/"
filenames_name = "Raw data & meta data.xlsx"
df_names = pd.read_excel(filenames_path + filenames_name,sheet_name='Data for Python DEMO')
#print(df_names)
# +
#print(df_names)
# -
# # Create one stacked file combining all individual files
# See list of the file names:
for i, j in df_names.iterrows():
file_name = j['Filename']
#print(j)
print(file_name)
# Loop through the rows of file "df_names":
for i, j in df_names.iterrows():
file_name = j['Filename']
# Create a list "Skip" specifying random rows to omit leaving a sample size of 5000:
n = sum(1 for line in open(file_name)) - 1 #number of records in file (excludes header)
s = 5000 #desired sample size
skip = sorted(random.sample(range(1,n+1),n-s)) #the 0-indexed header will not be included in the skip list
# Read each file skipping the random rows and creating a new variable "Batch":
df = pd.read_csv(file_name,sep='\t', encoding='latin1', skiprows=skip)
df['Batch'] = j['Batch']
# Stack the current file to the last created ones:
if (i==0):
appended_data = pd.concat([df], ignore_index=True)
else:
appended_data = pd.concat([appended_data,df], ignore_index=True)
df = appended_data
print(df)
# +
#df.to_excel(filenames_path + 'df_test_all.xlsx')
# -
# # Summarise the data by binning
# +
# Create a new variable which is log cylinder volume:
df["cyl_vol"]=math.pi*df["Length (µm)"]*(df["Width (µm)"]/2)**2
df["log_cyl_vol"]=np.log10(df["cyl_vol"])
# Create new variable "bin centres":
df["bins"] = pd.cut(df['log_cyl_vol'], bins=np.linspace(0, 10, 41))
df["bin_centres"] = df["bins"].apply(lambda x: x.mid)
# Create a df with medians by bin:
df_summ = df.groupby(['Batch','bin_centres'])[["Circularity","HS Circularity","Convexity","Solidity","Aspect Ratio","Elongation","Intensity Mean","Intensity SD"]].median()
df_summ = df_summ.reset_index()
df_summ = df_summ.dropna(subset=['Circularity'])
# Create a df with particle counts by bin:
df_summ2 = df.groupby(['Batch','bin_centres'])[["Id"]].count()
df_summ2 = df_summ2.reset_index()
df_summ2 = df_summ2.dropna(subset=['Id'])
# Create a df with sum of cylinder volume by bin:
df_summ3 = df.groupby(['Batch','bin_centres'])[["cyl_vol"]].sum()
df_summ3 = df_summ3.reset_index()
df_summ3 = df_summ3.dropna(subset=['cyl_vol'])
# Merge together: df_summ, df_summ2 and df_summ3:
df_comb = df_summ2.merge(df_summ, how='outer', on=['Batch','bin_centres'])
df_comb2 = df_comb.merge(df_summ3, how='outer', on=['Batch','bin_centres'] )
df_comb2 = df_comb2.rename(columns = {'Id':'Count'})
df_comb2 = df_comb2.rename(columns = {'cyl_vol':'Sum of cylinder volume'})
# -
print(df_comb2)
# +
#df_comb2['cum_sum_cyl_vol'] = df_comb2.groupby('Batch')['Sum of cylinder volume'].transform('cumsum')
#df_comb2['cum_perc_cyl_vol_1'] = 100*df_comb2['cum_sum_cyl_vol'] / df_comb2.groupby('Batch')['Sum of cylinder volume'].transform('sum')
# -
# # Create 1st Excel file for Spotfire - binned data
# Calculate % cylinder volume and cumulative % per batch
# Similarly for count of particles
df_comb2['perc_cyl_vol'] = 100*df_comb2['Sum of cylinder volume'] / df_comb2.groupby('Batch')['Sum of cylinder volume'].transform('sum')
df_comb2['cum_perc_cyl_vol'] = df_comb2.groupby('Batch')['perc_cyl_vol'].transform('cumsum')
df_comb2['perc_N'] = 100*df_comb2['Count'] / df_comb2.groupby('Batch')['Count'].transform('sum')
df_comb2['cum_perc_N'] = df_comb2.groupby('Batch')['perc_N'].transform('cumsum')
print(df_comb2)
# Merge with df_names to join with meta data:
df_comb2 = df_comb2.merge(df_names, how='outer', on=['Batch'])
df_comb2 = df_comb2.drop(['Filename'], axis=1)
# Output to Excel file
df_comb2.to_excel(filenames_path + 'df_comb2b DEMO.xlsx')
# # Create 2nd Excel file for Modde - pivot of binned data
# Count data:
df_count = pd.pivot_table(df_comb2, values='Count', index=['Batch'],columns='bin_centres')
df_count = df_count.add_prefix('Count_')
# Shape data:
df_shape1 = pd.pivot_table(df_comb2, values='Circularity', index=['Batch'],columns='bin_centres')
df_shape1 = df_shape1.add_prefix('Circularity_')
df_shape2 = pd.pivot_table(df_comb2, values='HS Circularity', index=['Batch'],columns='bin_centres')
df_shape2 = df_shape2.add_prefix('HS_Circularity_')
df_shape3 = pd.pivot_table(df_comb2, values='Convexity', index=['Batch'],columns='bin_centres')
df_shape3 = df_shape3.add_prefix('Convexity_')
df_shape4 = pd.pivot_table(df_comb2, values='Solidity', index=['Batch'],columns='bin_centres')
df_shape4 = df_shape4.add_prefix('Solidity_')
df_shape5 = pd.pivot_table(df_comb2, values='Aspect Ratio', index=['Batch'],columns='bin_centres')
df_shape5 = df_shape5.add_prefix('Aspect_Ratio_')
df_shape6 = pd.pivot_table(df_comb2, values='Elongation', index=['Batch'],columns='bin_centres')
df_shape6 = df_shape6.add_prefix('Elongation_')
df_shape7 = pd.pivot_table(df_comb2, values='Intensity Mean', index=['Batch'],columns='bin_centres')
df_shape7 = df_shape7.add_prefix('Intensity_Mean_')
df_shape8 = pd.pivot_table(df_comb2, values='Intensity SD', index=['Batch'],columns='bin_centres')
df_shape8 = df_shape8.add_prefix('Intensity_SD_')
from functools import partial, reduce
dfshapes = [df_shape1, df_shape2, df_shape3, df_shape4, df_shape5, df_shape6, df_shape7, df_shape8]
merge = partial(pd.merge, on=['Batch'], how='outer')
dfshapes = reduce(merge, dfshapes)
dfshapes = dfshapes.merge(df_names, how='outer', on=['Batch'])
dfshapes = dfshapes.drop(['Filename'], axis=1)
# Export pivoted data to Excel:
dfshapes.to_excel(filenames_path + 'df_shape DEMO.xlsx')
| challenge/RobertShaw/G3 example - DEMO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
using Pkg,Statistics,Random,Printf,GZip,Knet,Plots,LinearAlgebra
Random.seed!(1);
Range=3.0; # range of the x values for the target Gaussian function
Incr = 0.3; # determines the number of samples from which we'll learn
Noise_std=0.1; # add noise on the Gaussian
# +
# generate the data from which we'll learn the Gaussian function
# obligatory arguments listed before ";" while optional arguments come after ";".
function gen_noisy_gaussian(;range=1.0,noise=0.1)
# x = sort(randn(len)*range) # randn(): normal distributed
x = collect(-Range:Incr:Range)
# y = exp.(-x.^2).*(-noise*2*(rand(length(x)).-0.5).+1) # fractional noise
y = exp.(-x.^2) + randn(length(x))*noise; # additive gaussian noise
return (x,y)
end
# -
(x_train,y_train) = gen_noisy_gaussian(range=Range,noise=Noise_std);
N_train =length(x_train) # number of training data points
plot(x_train,[y_train,exp.(-x_train.^2)])
# transpose for easier manipulation during training
x_train = permutedims(x_train);
y_train = permutedims(y_train);
Layersize = 50; # number of neurons in the hidden layer
# +
# output = 1
# input = 1
# hidden = 50
# batchsize = 1
# one layer: tahn.(w[hidden,input] * x[input,batchsize] .+ b[hidden,1])
# -
Random.seed!(2); # Modify weight initialization w/o changing the training data after kernel resets.
# initialize weights
w = [0.1*rand(Layersize,1),0.1*rand(Layersize,1),0.1*rand(1,Layersize),0.1*rand(1,1)]
# +
# dimensions:
# w[1]: (hidden x input) - input->hidden weights
# x: (input x batchsize) - input
# w[2]: (hidden x 1) - input->hidden bias
# w[3]: (hidden x output) - hidden-> output weights
# w[4]: (output x 1) - hidden->output bias
function loss(w,x,y)
guesses = sum(w[3] * tanh.(w[1]*x.+w[2]) .+ w[4],dims=1) # w[1]=w, w[2]=w0, w[3]=w', w[4]=w0'
return mean(abs2,y-guesses)
end
# -
# construct the gradient-calculating function
lossgradient = grad(loss)
dw = lossgradient(w,[x_train[1]],[y_train[1]]) # dw has dimnensions of w
# output is the gradient w.r.t. the corresponding weight
function mytrain!(w;lr=0.1)
for n=1:N_train
# for n=1:randperm(N_train)
dw = lossgradient(w,[x_train[n]],[y_train[n]]);
for i=1:length(w)
for j=1:length(w[i])
w[i][j] -= lr*dw[i][j]
end
end
end
return w
end
Nepoch = 200000; # This needs to be determined by trial and error, depending on the data size.
Learning_Rate = 0.01;
# collect weights after each epoch in an array (trajectory)
@time weights = [ deepcopy(mytrain!(w,lr=Learning_Rate)) for epoch=1:Nepoch ]; # copy only copies the top layer, does not descend.
xplot=collect(-Range:0.01:Range);
y_pred = permutedims(sum(w[3] * tanh.(w[1]*permutedims(xplot).+w[2]) .+ w[4],dims=1));
# plot the converged function, the initial gaussian and the noisy training samples
plot(xplot,[y_pred,exp.(-xplot.^2)]); scatter!(x_train,y_train,leg=false)
# +
SamplingRate=10;
x = collect(1:SamplingRate:Nepoch);
y = [loss(weights[i],x_train,y_train) for i in x];
plot(x,y;yscale=:log,xscale=:log)
# +
# STOP HERE AND GUESS THE "GOOD" MINIMUM FROM THE PLATEAU IN THE LOSS CURVE
# -
Ngoodmin = 1000;
# +
# check the predicted function after Ngoodmin epochs
# -
y_pred = permutedims(sum(weights[Ngoodmin][3] * tanh.(weights[Ngoodmin][1]*permutedims(xplot).+weights[Ngoodmin][2]) .+ weights[Ngoodmin][4],dims=1));
# plot the converged function, the initial gaussian and the noisy training samples
plot(xplot,[y_pred,exp.(-xplot.^2)]); scatter!(x_train,y_train,leg=false)
# +
# Look at the loss on (w0 -> wg), (wg -> w*) and (w0 -> w*) segments (wg is the "good" solution)
# +
x = collect(1:100);
y = [loss(weights[1]+0.01*i*(weights[Ngoodmin]-weights[1]),x_train,y_train) for i in x];
plot(x,y)
# +
x = collect(1:100);
y = [loss(weights[Ngoodmin]+0.01*i*(weights[Nepoch]-weights[Ngoodmin]),x_train,y_train) for i in x];
plot(x,y)
# +
x = collect(1:100);
y = [loss(weights[1]+0.01*i*(weights[Nepoch]-weights[1]),x_train,y_train) for i in x];
plot(x,y)
# -
# sgd steps
deltaweights = [ vcat(weights[i][1],weights[i][2],weights[i][3]',weights[i][4])-vcat(weights[i-1][1],weights[i-1][2],weights[i-1][3]',weights[i-1][4]) for i=2:Nepoch ];
# coarse sgd steps
Ncoarse = 100;
coarse_dw = [ sum(deltaweights[1+(i-1)*Ncoarse:i*Ncoarse]) for i=1:floor(Int,(Nepoch-1)/Ncoarse)];
coarse_normdw = [ v/norm(v) for v in coarse_dw];
dot_normdw = dot.(coarse_normdw,coarse_normdw');
heatmap(dot_normdw) # would be better if I could plot this heatmap in log-scale. I need to sample dw's accordingly.
# Note that each increment in x and y axes corresponds to Ncoarse epochs (the "good" prediction is already there at 10)
Nzoom = floor(Int,5*Ngoodmin/Ncoarse);
heatmap(dot_normdw[1:Nzoom,1:Nzoom])
# +
# now try to find the dimensionality of the GD trajectory
# -
function project_out(v,basis) # returns the component of v orthogonal to the support of orthonormal cols of "basis"
if length(basis)==0
return v
else
dots = v'*basis
return (v-sum(basis*diagm(0=>dots[:]),dims=2))
end
end
function gd_support(gdsteps;minnorm=0.5) # minnorm = threshold beyond which perp. gradient component is considered new
mybasis = Array{Float64}(undef,length(gdsteps[1]),0) # records the basis vectors for the past steps
mystrides = []; # records the number of gd steps taken in the current manifold
nsteps = 1;
for v in gdsteps
vperp = project_out(v,mybasis)
if (norm(vperp) > minnorm)
mybasis = hcat(mybasis,vperp/norm(vperp))
mystrides = push!(mystrides,nsteps)
nsteps = 1
else
nsteps += 1
end
end
push!(mystrides,nsteps)
return(mybasis,mystrides)
end
(mybasis,mystrides) = gd_support(coarse_normdw);
length(mystrides) # this is the effective dimension of the gd trajectory
# +
# If the gd steps were random vectors what would be the calculated dimension?
# -
rand_dw = [ randn(length(coarse_dw[1])) for i=1:floor(Int,(Nepoch-1)/100)];
rand_normdw = [ v/norm(v) for v in rand_dw];
(randbasis,randstrides) = gd_support(rand_normdw);
length(randstrides) # effective dimension of the "random" gd steps (same number of steps, same vector size)
| archive/20181019-noisy-gaussian_sgd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Usage
#
# ## Installation
#
# To Install please run:
#
# ```bash
# pip intsall pydatafaker
# ```
#
# ## Business
#
# The business module allows you to create fake business data. Calling `business.create_business()` will return a dictionary of related tables.
import pandas as pd
from pydatafaker import business
biz = business.create_business()
biz.keys()
# Each value inside the dictionary contains a Pandas DataFrame.
biz["vendor_table"]
biz["employee_table"]
biz["po_table"]
biz["invoice_summary_table"]
biz["invoice_line_item_table"]
# Tables can be joined together to add additional details.
# +
invoice_summary = biz['invoice_summary_table']
vendors = biz['vendor_table']
pd.merge(invoice_summary, vendors, how='left', on='vendor_id')
# -
# ## School
#
# The school module allows you to generate fake school data
import pandas as pd
from pydatafaker import school
skool = school.create_school()
skool.keys()
skool['student_table']
skool['teacher_table']
skool['room_table']
skool['grade_table']
| docs/source/usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Pytorch
# language: python
# name: pytorch
# ---
# ## Part 3.6. Load a pretrained weights and report the accuracy (<span style="color:green">3.0 points</span>)
# import necessary packages
import os, time
import torch
import requests, zipfile, sys
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from utils import download_fm
import torchvision
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, utils
import random, matplotlib
import pandas as pd
# +
class FashionMNISTDataset(Dataset):
def __init__(self, csv_file, transform=None):
self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
self.label_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def __getitem__(self, idx):
image_name = self.frame.iloc[idx, 0]
image = Image.open(image_name)
label = self.frame.iloc[idx, 1]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label}
return sample
def __len__(self):
return len(self.frame)
def getTestData(csv_file='', batch_size=64):
__imagenet_stats = {'mean': [0.5], 'std': [0.5]}
# TODO: fill these blanks
transformed_test = FashionMNISTDataset(csv_file=csv_file,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_test = DataLoader(transformed_test, batch_size, shuffle=True, num_workers=0)
return dataloader_test
# -
# ### Define this model
# 
# ### Fill in the model (<span style="color:green">1.0 point</span>)
class Network(nn.Module):
def __init__(self, num_classes):
super().__init__()
# define layers
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(6)
self.relu1 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# TODO: fill the rest part (0.5 points)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(12)
self.relu2 = nn.ReLU()
self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc3 = nn.Linear(588, 128)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.max_pool1(x)
# TODO: fill these blanks (0.5 points)
# fill the rest part, you may need more lines like x = ...
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.max_pool2(x)
x = x.reshape(x.shape[0], -1)
x = self.fc3(x)
x = self.relu3(x)
x = self.fc4(x)
return x
# ### Load the pretrained weights (<span style="color:green">1.0 point</span>)
# +
# TODO: Create test_loader with no shuffling
test_loader = getTestData(csv_file='./fashion_mnist_extracted/test.csv', batch_size=64)
net = Network(num_classes=10)
pretrained = True
if not pretrained:
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 1.5)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, -1.5)
nn.init.constant_(m.bias, 0)
net.apply(weights_init)
else:
# TODO: Load the model and report the accuracy of model with and without pretrained weights (1 point)
net.load_state_dict(torch.load('./net.pth'))
pass
if torch.cuda.is_available():
net = net.cuda()
# evaluation function
def eval(net, data_loader):
net.eval()
correct = 0.0
num_images = 0.0
for i, sample in enumerate(data_loader):
# TODO: fill these blanks
images, labels = sample['image'], sample['label']
outs = net(images)
_, preds = outs.max(1)
correct += preds.eq(labels).sum()
num_images += len(labels)
acc = correct.float() / num_images
return acc
# TODO: test your network here on testing data
acc_test = eval(net, test_loader)
print('Accuracy on testing data: {:.05f}'.format(acc_test))
# -
# ### Report the accuracy (<span style="color:green">1.0 point</span>)
# TODO:
# + Not load weights (<span style="color:green">0.5 point</span>)
# ```
# pretrained = False
# ```
# Accuracy on testing data: 0.10000
#
#
# + Correctly load model weights (<span style="color:green">0.5 point</span>)
# ```
# pretrained = True
# ```
# Accuracy on testing data: 0.89260
| assignment4/.ipynb_checkpoints/Assignment_4_load_model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_context(context="talk",
rc={'xtick.labelsize': 14,
'ytick.labelsize': 14})
colors = sns.color_palette()
# Original data: https://www.cdc.gov.tw/
# Aggregated data: https://www.worldometers.info/coronavirus/country/taiwan
start = "2020-02-17"
daily_cases = [2,0,1,1,2,0,2,2,1,1,0,2,5,1,1,1,0,2,1,0,0,0,2,1,1,1,3,6,8,10,23,8,27,18,16,26,21,19,17,15,16,15,8,16,7,10,9,7,8,10,3,3,1,2,3,3,5,0,2,0,0,3,22,2,3,1,1,1,1,0,0,0,0,0,0,3,0,6,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,2,0,0,0,0,0,1,2,0,1,0,0,0,0,3,0,0,4,5,0,0,0,7,1,0,1,0,1,0,2,1,0,0,1,0,0,1,2,1,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,2,1,1,1,0,1,2,0,0,1,0,1,3,0,3,1,2,0,0,0,1,0,0,3,0,1,1,2,0,0,1,3,2,1,3,0,0,2,1,0,1,4,0,0,5,3,1,4,0,2,0,0,0,0,3,1,1,3,5,4,1,1,4,0,4,1,2,4,5,8,3,2,1,2,2,2,2,0,6,1,0,5,2,14,9,3,24,4,6,1,4,3,23,0,2,2,4,1,8,3,4,2,7,8,2,4,3,0,4,6,0,4,3,2,8,2,2,2,3,6,4,3,2,2,3,3,3,0,6,4,4,0,1,8,4,7,6,2,3,8,3,5,0,1,3,2,4,10,2,1,3,2,2,4,1,3,1,5,2,1,1,0,0,0,0,1,2,1,1,0,0,0,4,5,0,3,1,0,0,3,2,0,7,2,7,1,1,0,6,0,1,5,0,0,8,6,1,1,0,1,2,3,1,7,2,1,1,6,6,3,6,2,1,2,0,0,4,2,1,1,4,5,1,2,2,1,3,2,4,4,4,7,3,4,6,6,5,8,3,5,8,8,7,13,5,5,1,15,11,21,25,25,113,287,539,466,543,492,438,477,497,512,503,536,537,419,429,425,394,342,488,419,343,483,314,198,218,263,257,282,251,175,185,133,170,174,185,128,109,73,78,103,130,76,80,89,60,54,55,50,58,80,39,29,29,40,21,36,33,31,24,28,27,18,32,12,18,21,24,25,33,22,25,13,11,17,20,18,25,12,14,14,19,21,11,12,10,7,8,8,16,4,18,7,9,10,18,11,6]
daily_deaths = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,0,2,6,6,6,11,13,19,21,10,15,13,12,17,21,37,36,26,22,25,28,24,26,26,15,8,18,19,21,20,11,20,6,24,6,5,13,9,3,8,5,13,15,10,2,1,17,9,3,12,6,4,1,6,6,6,4,1,4,1,4,5,4,2,2,0,0,1,0,0,0,0,2,0,2,0,0,3,12,3,4,1,2,1,2,2,0,0,0,0,5]
taiwan_population = 23_570_000
date = pd.date_range(start=start, periods=len(daily_cases), name="Date")
df = pd.DataFrame({"Daily Cases": daily_cases, "Daily Deaths": daily_deaths},
index=date)
df = df["2021-05-01":]
def makeplot():
fig, (ax1,ax2) = plt.subplots(2,1, sharex=True, figsize=(10,6))
ax1t = ax1.twinx()
ax2t = ax2.twinx()
def convert_to_100k(ax_raw, ax_100k):
ratio = 100_000 / taiwan_population
ymin, ymax = ax_raw.get_ylim()
ax_100k.set_ylim(ymin * ratio, ymax * ratio)
ax_100k.figure.canvas.draw()
ax1.callbacks.connect("ylim_changed", lambda ax: convert_to_100k(ax, ax1t))
ax2.callbacks.connect("ylim_changed", lambda ax: convert_to_100k(ax, ax2t))
fig.autofmt_xdate()
ax1.set_ylim(0, 600)
ax2.set_ylim(0, 40)
ax2._get_lines.prop_cycler = ax1._get_lines.prop_cycler
sns.lineplot(data=df["Daily Cases"], ax=ax1, color=colors[0])
sns.lineplot(data=df["Daily Deaths"], ax=ax2, color=colors[1])
ax1t.set_ylabel("Per 100k people", fontsize=14, labelpad=30)
ax2t.set_ylabel("Per 100k people", fontsize=14, labelpad=10)
annheight = 620
textheight = 660
p0 = mdates.date2num(df.index[15])
p1 = mdates.date2num(df.index[29])
p2 = mdates.date2num(df.index[75])
ax1.annotate(text='curve flattened\nquickly',
xy=((p0+p1)/2, textheight),
ha='center', size=12,
annotation_clip=False)
ax1.annotate(text='', xy=(p0,annheight), xytext=(p1, annheight),
annotation_clip=False,
arrowprops=dict(color=colors[2], arrowstyle='<->'))
ax1.annotate(text='lockdown rapidly effective',
xy=((p1+p2)/2, textheight),
ha='center', size=12,
annotation_clip=False)
ax1.annotate(text='', xy=(p1,annheight), xytext=(p2, annheight),
annotation_clip=False,
arrowprops=dict(color=colors[3], arrowstyle='<->'))
fig.suptitle("Taiwan's handling of 2021 COVID Outbreak", y=1.05, fontsize=22)
plt.show()
# ## Taiwan's handling of 2021 COVID Outbreak
#
# The initial outbreak was country-wide, due to various social interactions and travel plans of the first to be infected. The case load jumped to only about 2 cases per 100k people, thanks to the vigilence of the population prior to the outbreak.
#
# The government rapidly implemented a strict, but not total, shutdown. The effect of the shutdown is seen two weeks later, in line with the incubation period of the virus. Within 8 weeks from the initial outbreak, cases and deaths had dropped substantially.
makeplot()
| taiwan_coronavirus_response.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### K-Nearest Neighbor Algorithmus:
#
# #### Parameter:
#
# dataset = (X, Y)
# mit X := Features
# und Y := Classes
#
# K := Hyperparameter für die nächsten k Nachbarn
# sample := Neuer Datenpunkt zum Klassifizieren
#
# #### Pseudo-Code:
#
# kNN (dataset, K, sample):
# - Bestimme von jedem Punkt $p \in dataset$ die Distanz, mit der geg. Distanzfunktion.
# - Bestimme die $K$ nächst gelegenen Nachbarn und bestimme die zugehörige Klasse von $sample$.
import numpy as np
np.random.seed(42)
def generate_dataset():
num_samples_class1 = 10
num_samples_class2 = 6
num_samples_class3 = 13
cov = np.array([[1, 0], [0, 1]])
data1 = np.random.multivariate_normal(
np.array([0, 0]), cov, num_samples_class1
)
data2 = np.random.multivariate_normal(
np.array([-10, 4]), cov, num_samples_class2
)
data3 = np.random.multivariate_normal(
np.array([10, 10]), cov, num_samples_class3
)
data = np.concatenate((data1, data2, data3), axis=0)
classes = np.array(
[0 for i in range(num_samples_class1)] +
[1 for i in range(num_samples_class2)] +
[2 for i in range(num_samples_class3)]
)
return data, classes
x, y = generate_dataset()
# +
print(f"x shape: {x.shape}")
print(f"y shape: {y.shape}")
print(f"x:\n{x}")
print(f"y:\n{y}")
# +
import matplotlib.pyplot as plt
def plot_dataset(x, y):
colors = ["red", "blue", "green"]
for index, point in enumerate(x):
plt.scatter(point[0], point[1], color=colors[y[index]])
plt.show()
# -
plot_dataset(x, y)
# ### KNN Implementation
class KNeighborsClassifier:
def __init__(self, n_neighbors: int = 5):
self.n_neighbors = n_neighbors
self.X = None
self.y = None
self.num_classes = None
def _distance(self, p1: np.ndarray, p2: np.ndarray):
return np.linalg.norm(p1 - p2)
def kneighbors(self, X_samples: np.ndarray):
neighbors_idxs = np.array(
[np.argsort([self._distance(sample, x_i) for x_i in self.X])[:self.n_neighbors] for sample in X_samples]
)
return neighbors_idxs
def fit(self, X: np.ndarray, y: np.ndarray):
self.X = X
self.y = y
self.num_classes = len(np.unique(self.y))
def _vote_class(self, neighbors_idxs: np.ndarray):
votes = np.array([0 for class_idx in range(self.num_classes)])
for neighbor_idx in neighbors_idxs:
neighbor_class = self.y[neighbor_idx]
votes[neighbor_class] += 1
voted_class = np.argmax(votes)
return voted_class
def predict(self, X_samples: np.ndarray):
neighbors_idxs = self.kneighbors(X_samples)
y_pred = np.array([self._vote_class(neighbors_idx) for neighbors_idx in neighbors_idxs])
return y_pred
def score(self, X_samples: np.ndarray, y_samples: np.ndarray):
y_pred = self.predict(X_samples)
accuracy = np.sum([y_pred_i == y_i for y_pred_i, y_i in zip(y_pred, y_samples)]) / len(y_samples)
return accuracy
# +
x_test = np.array([[0.0, 4.0]])
y_test = np.array([0])
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(x, y)
neighbors_idxs = clf.kneighbors(x_test)
print(f"NeighborsIdxs:\n{neighbors_idxs}")
print(f"NeighborsPoints:\n{x[neighbors_idxs]}")
print(f"NeighborsClasses:\n{y[neighbors_idxs]}")
print(f"TestSamples:\n{x_test}")
print(f"Predictions:\n{clf.predict(x_test)}")
print(f"Score:\n{clf.score(x_test, y_test)}")
| Chapter3_MLIntroduction/KnnAlgorithm/KnnImplementation_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="wlYslPD1uRp2"
# # OC4IDS Database - Check and Import
#
# Use this notebook to check data using the OC4IDS Data Review Tool and to import the data and check results into the OC4IDS database.
#
# If your data is formatted as project package, edit the `source_id` and download url then run all cells in the notebook (`Ctrl+F9`) and enter your database credentials at the prompt.
#
# Otherwise, you need to reformat your data into a project package and save it as `project_package.json` before running the notebook.
# + [markdown] id="z4-iWuZRhoEe"
# Enter database credentials.
#
# > **Helpdesk analysts:** See [CRM-6335](https://crm.open-contracting.org/issues/6335).
# + id="IVy70_JfeV9C"
import getpass
user = 'postgres'
password = getpass.getpass('Password:')
# + [markdown] id="pV9HIDVHxpKS"
# Set `source_id`:
# + id="3Uzdauk6xsIe"
source_id = 'example'
# + [markdown] id="veSGp6SIwYRt"
# Download a project package:
# + id="RjdRITkBsid3"
# %%shell
curl -L https://standard.open-contracting.org/infrastructure/latest/en/_static/example.json > project_package.json
# + [markdown] id="3PU3KAsPuYP7"
# ## Setup
# + [markdown] id="N7eihXsOqxiZ"
# Install `psql` client:
# + id="TnDPv6EXquxF"
# %%shell
sudo apt-get update
sudo apt-get install -y postgresql-client
# + [markdown] id="UjTbla1Xq7jJ"
# Create a `.pgpass` file with database credentials:
# + id="zhYmhNnwqxHB"
# !touch ~/.pgpass
# !chmod 0600 ~/.pgpass
# !echo database-1.cmc8bohiuyg3.us-east-1.rds.amazonaws.com:5432:postgres:{user}:{password} > ~/.pgpass
# + [markdown] id="Xycifs7is8Dt"
# Install `.jq`:
# + id="VcCYPKJbtALB"
# %%shell
sudo apt-get install jq
# + [markdown] id="femBJjzTsQtO"
# Connect notebook to database:
# + id="bIYcnkQ7rl6B"
connection_string = 'postgresql://' + user + ':' + password + '@database-1.cmc8bohiuyg3.us-east-1.rds.amazonaws.com/postgres'
# %load_ext sql
# %sql $connection_string
# + [markdown] id="-q3kWP4a7bIW"
# Install lib-cove-oc4ids:
# + id="Ic5au-Fq7dMn"
# %%shell
pip install jsonschema>=3.0.0 --upgrade
pip install libcoveoc4ids
# + [markdown] id="s7w1j3t9wTPM"
# ## Check data
# + [markdown] id="fz9ZqgSk7q6V"
# Check data using `libcoveoc4ids`:
# + id="kBxOdYUg7CiD"
# %%shell
libcoveoc4ids project_package.json > results.json
# + [markdown] id="PpKqvkB_8BWd"
# ## Import data and check results
# + [markdown] id="KP5uMmSuMapZ"
# Use `jq` to generate a new-line delimited JSON file from the project package:
# + id="SEwoDVukMapa"
# %%shell
# cat project_package.json | jq -crM .projects[] > projects.json
# + [markdown] id="aZCclPabw2Cv"
# Import data to `temp_data` table:
# + id="qOouDNlD8tVF" language="sql"
#
# delete from temp_data;
# + id="dA8Z8ZLbtElG"
# !cat projects.json | psql -h "database-1.cmc8bohiuyg3.us-east-1.rds.amazonaws.com" -U {user} -d "postgres" -c "COPY temp_data (data) FROM STDIN WITH escape '\' quote e'\x01' delimiter e'\x02' CSV"
# + [markdown] id="AZhfpWPO8zri"
# Import check results to `temp_checks`:
# + id="7iutDHJQ84mH" language="sql"
#
# delete from temp_checks;
# + id="vAi0SC7684mn"
# !cat results.json | jq -crM . | psql -h "database-1.cmc8bohiuyg3.us-east-1.rds.amazonaws.com" -U {user} -d "postgres" -c "COPY temp_checks (cove_output) FROM STDIN WITH escape '\' quote e'\x01' delimiter e'\x02' CSV"
# + [markdown] id="bBj3VI2Ew5jb"
# Create collection, copy data to `projects` table, copy check results to `collection_check` table, populate `field_counts` and `project_fields` tables:
# + id="rmTiK5xft12d" language="sql"
#
# INSERT INTO collection (source_id, data_version)
# VALUES (:source_id, CURRENT_TIMESTAMP);
#
# INSERT INTO projects (collection_id, project_id, data)
# SELECT
# (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1) AS collection_id,
# trim(BOTH '"' FROM (data -> 'id')::text) AS project_id,
# data AS data
# FROM
# temp_data;
#
# DELETE FROM temp_data;
#
# INSERT INTO collection_check (collection_id, cove_output)
# SELECT
# (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1) AS collection_id,
# cove_output AS cove_output
# FROM
# temp_checks;
#
# DELETE FROM temp_checks;
#
# INSERT INTO field_counts
# SELECT
# (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1) AS collection_id,
# path,
# regexp_split_to_array(path, '/') AS path_array,
# sum(object_property) object_property,
# sum(array_item) array_count,
# count(DISTINCT id) distinct_projects
# FROM
# projects
# CROSS JOIN flatten (data)
# WHERE
# collection_id = (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1)
# GROUP BY
# collection_id,
# path;
#
# WITH RECURSIVE paths (
# project_id,
# path,
# "value"
# ) AS (
# SELECT
# project_id,
# (key_value).KEY "path",
# (key_value).value "value",
# 'true'::boolean "use_path"
# FROM (
# SELECT
# project_id,
# jsonb_each(data) key_value
# FROM
# projects
# WHERE
# collection_id = (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1)) a
# UNION ALL (
# SELECT
# project_id,
# CASE WHEN key_value IS NOT NULL THEN
# path || '/'::text || (key_value).KEY::text
# ELSE
# path
# END "path",
# CASE WHEN key_value IS NOT NULL THEN
# (key_value).value
# ELSE
# array_value
# END "value",
# key_value IS NOT NULL "use_path"
# FROM (
# SELECT
# project_id,
# path,
# jsonb_each(
# CASE WHEN jsonb_typeof(value) = 'object' THEN
# value
# ELSE
# '{}'::jsonb
# END) key_value,
# jsonb_array_elements(
# CASE WHEN jsonb_typeof(value) = 'array'
# AND jsonb_typeof(value -> 0) = 'object' THEN
# value
# ELSE
# '[]'::jsonb
# END) "array_value"
# FROM
# paths) a))
# INSERT INTO project_fields
# SELECT
# (
# SELECT
# id
# FROM
# collection
# ORDER BY
# id DESC
# LIMIT 1) AS collection_id,
# project_id,
# array_agg(path) AS paths
# FROM
# paths
# WHERE
# use_path
# GROUP BY
# project_id;
#
# + id="Gu8onHSw0bHq"
| OC4IDS_Database_Data_Import.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# <hr>
# # PREDICTING THE STOCK MARKET WITH WATSON
#
# ## Part I: Introduction
#
# In this Jupyter Notebook you will learn step-by-step how to extract financial data from one of the most popular public databases for econometric data, as well as the cleansing and preparation processes of the data science traditional workflow.
#
# After preparing the data into an adequate format, structured into a pandas dataframe, we train a predictive model utilizing techniques such as curve-fitting and ARIMA (AutoRegressive Integrated Moving Average).
#
# Lastly, we create interactive plots to visualize the collected data, as well as the results of the forecasters trained.
#
# ## Table of Contents
#
# #### 1. Mining Stock Market Data
# * 1.1. Quandl API Setup
# * 1.2. Downloading the WIKI/PRICES Table
# * 1.3. Visualizing the Collected Data
#
# #### 2. Training a Predictive Model using Open-Source Libraries
# * 2.1. Fbprophet Setup
# * 2.2. Data Preparation
# * 2.3. The Machine Learning
#
# #### 3. Analyzing the Results
# * 3.1. Analyzing Seasonalities
# * 3.2. Overlapping of Actual Values with Expected Values
# * 3.3. Trained Model Evaluation
#
# #### 4. Exporting Data
# * 4.1. Exporting Data to IBM Cloud Object Storage
# <hr>
# # 1: Mining Stock Market Data
# The data mining step is done using the Quandl API, a financial database (with both public and private tables) purchased by NASDAQ in 2018. It is not mandatory to use an API key to extract data, however it is recommended to generate one for free <a href="https://www.quandl.com/">at the Quandl website</a>.
# ### 1.1: Quandl API Setup
# Install quandl package (this can take some minutes)
# !pip install --user quandl==3.4.8 --upgrade
# Import quandl package
import quandl
print("Quandl package imported.")
# If you registered and generated an API key for free on the Quandl web page, type it in the variable in the cell below.
quandl.ApiConfig.api_key = ""
# ### 1.2: Downloading the WIKI/PRICES Table
# Quandl has several public and private data tables available.
#
# In this project we will use the table <a href="https://www.quandl.com/databases/WIKIP"> WIKI/PRICES</a>. This table has information on the daily closing and opening values of the shares of more than 3000 companies.
#
# The data download is done via an API call executed with the quandl library, using the `get_table` function. The arguments of this function are: the table name, the <a href="https://en.wikipedia.org/wiki/Ticker_symbol"> ticker symbol </a> of the desired stock, and the desired columns of the table. The `paginate=True` parameter allows more than 10000 rows of data to be transferred with a single call.
#
# The result of this function call is a pandas dataframe with the desired stock data, ready to be transformed.
# API Call example - Downloading financial data from IBM stocks
data = quandl.get_table('WIKI/PRICES',
ticker='IBM',
qopts={'columns':['date', 'open', 'high', 'low', 'close']},
paginate=True)
# At the <a href="https://www.nasdaq.com/symbol/">NASDAQ page</a> you can see the ticker symbols for various stocks.
#
# You can also choose a ticker from the ones listed in the `nasdaq-ticker-table.csv` file, which can be accessed <a href ="https://github.com/vanderleipf/ibmdegla-ws-projects/blob/master/EN-US/forecasting-the-stock-market/data/stock-ticker-table.csv">here</a>.
#
# Choose the ticker of another company, or use the data already configured and saved in the dataframe `date` by the API call executed in the above code cell.
# +
# Renaming the pandas dataframe columns
data.columns = ['Date', 'Open', 'High', 'Low', 'Close']
# Reset the pandas dataframe index
data = data.reset_index(drop=True)
# Show the last five rows of the pandas dataframe
data.tail()
# -
# ### 1.3: Visualizing the Collected Data
# To view the data, the `bokeh` package is used. This module is capable of generating interactive Javascript charts.
# !pip install --user bokeh==1.0.4 --upgrade
# +
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource
from bokeh.embed import components
from bokeh.io import output_notebook
print('Packages imported.')
# -
# Load bokeh
output_notebook()
# +
# Bokeh Figure
p = figure(plot_width=1200, plot_height=550, title='Stock Value Historical Data', x_axis_type="datetime")
# Plot Lines
p.line(data.Date, data.Open, line_width=2, line_color="#0099ff", legend='Open')
p.line(data.Date, data.Close, line_width=2, line_color="#ff6699", legend='Close')
p.line(data.Date, data.High, line_width=1, line_color="#000000", legend='High')
p.line(data.Date, data.Low, line_width=1, line_color="#000000", legend='Low')
# Axis and Labels
p.legend.orientation = "vertical"
p.xaxis.axis_label = "Date"
p.xaxis.axis_label_text_font_style = 'bold'
p.xaxis.axis_label_text_font_size = '16pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = "Value ($ USD)"
p.yaxis.axis_label_text_font_style = 'bold'
p.yaxis.axis_label_text_font_size = '16pt'
p.yaxis.major_label_text_font_size = '12pt'
# -
# In the figure below we can see and interact with the data extracted from the WIKI/PRICES table. In blue we have the opening value of the stock; in red the closing value of the stock; in black we have the maximum and minimum values (daily).
show(p)
# <hr>
# # 2: Training a Predictive Model using Open-Source Libraries
# In the training process of our predictive model, we will use the `fbprophet` package, developed by Facebook for time series analysis.
#
# Fbprophet follows the same style of objects as `sklearn`, an extremely popular machine learning python library. An instance of the Prophet class is created and then the `fit` and` predict` methods are used.
#
# The training set of a Prophet model is a two-column pandas dataframe with the columns `ds` and` y`. The `ds` (datestamp) column must be a date in the format YYYY-MM-DD, or a timestamp in the format YYYY-MM-DD HH:MM:SS. The `y` column must be numeric, and represents the variable we wish to model in the future.
# ### 2.1: Fbprophet Setup
# Install required packages (this can take some minutes)
# UPDATE(vnderlev): setuptools-git is now required for fbprophet 0.5.
# !pip install --user pystan==2.17.1.0 holidays==0.9.8 setuptools-git==1.2 matplotlib==3.0.2 --upgrade
# Install fbprophet from pip (this will take some time, please be patient)
# !pip install fbprophet==0.5
# +
# Import packages
import fbprophet
import datetime as dt
print('Packages imported.')
# -
# ### 2.2: Data Preparation
# First of all, the previously collected dataframe needs to be prepared.
#
# The preparation includes the selection of a subset of the data for model training. In this Jupyter notebook we will only sample the latest data, starting from 2008.
#
# Of the four columns with stock values, only one column will be chosen as the variable of interest. We will choose the daily opening value of the shares (the `Open` column).
# Select train sample
df_train = data.copy()
df_train = df_train[(df_train.Date > dt.datetime(2008,1,1))]
df_train.tail()
# Format the dataframe for FBProphet
df_train.rename(columns={'Open':'y', 'Date':'ds'}, inplace=True)
df_train = df_train.filter(items=['ds', 'y'])
df_train.tail()
# ### 2.3: The Machine Learning
# As previously mentioned, Prophet uses the same interface style as the sklearn library.
#
# A Prophet class is instantiated, with the desired types of seasonality. For this work, we will consider annual and monthly seasonalities in stock values, and disregard daily and weekly effects.
#
# Usually the presented settings are the most adequate for stock prediction, but feel free to test different seasonalities.
# Instantiate a fbprophet model
model = fbprophet.Prophet(daily_seasonality=False,
weekly_seasonality=False,
yearly_seasonality=True,
changepoint_prior_scale=0.05,
changepoints=None)
model.add_seasonality(name='monthly', period=30.5, fourier_order=5)
# Train model
model.fit(df_train)
# In the following cell, the prediction runs in a period of 365 days (1 year) in the future. The result is a multi-column dataframe with information about trends, tolerances, and forecasts.
# Execute forecasting algorithm (1 year into the future)
future_data = model.make_future_dataframe(periods=365, freq='D')
future_data = model.predict(future_data)
future_data.tail()
# <hr>
# ## 3: Results
# ### 3.1: Analyzing Seasonalities
# ]In the figure below we have a quick plot of the results using the Prophet class `plot` method.
#
# The black dots are the actual data, the dark blue line is the modeled trend, and in light blue we have the tolerance (minimum and maximum values modeled).
fig1 = model.plot(future_data)
# Using the `plot_components` function of the Prophet object, we can also analyze trend information, as well as the monthly and yearly seasonalities considered in the modeling.
fig2 = model.plot_components(future_data)
# ### 3.2: Overlapping of Actual Values with Expected Values
# +
# Bokeh Figure
p = figure(plot_width=1200, plot_height=550, title='Stock Value Historical Data', x_axis_type="datetime")
# Plot Lines
p.line(data[data['Date'] > dt.datetime(2008,1,1)].Date, data[data['Date'] > dt.datetime(2008,1,1)].Open, line_width=2, line_color="#0099ff", legend='Observed Open Value')
p.line(future_data.ds, future_data.yhat, line_width=2, line_color="#2B0000", legend='Modeled Open Value')
p.line(future_data.ds, future_data.yhat_upper, line_width=0.5, line_color="#000099", legend='Upper Estimates')
p.line(future_data.ds, future_data.yhat_lower, line_width=0.5, line_color="#000099", legend='Lower Estimates')
# Axis and Labels
p.legend.orientation = "vertical"
p.xaxis.axis_label = "Date"
p.xaxis.axis_label_text_font_style = 'bold'
p.xaxis.axis_label_text_font_size = '16pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = "Value ($ USD)"
p.yaxis.axis_label_text_font_style = 'bold'
p.yaxis.axis_label_text_font_size = '16pt'
p.yaxis.major_label_text_font_size = '12pt'
# -
# In the next cell we can thoroughly analyze the quality of the trained model by comparing the modeled values (in black) with the actual observed values (light blue).
show(p)
# ### 3.3: Trained Model Evaluation
# The Prophet class also provides the means to perform an evaluation of the created model.
#
# The `cross-validation` method will be used to evaluate our model.
from fbprophet.diagnostics import cross_validation
df_cv = cross_validation(model, initial='730 days', period='180 days', horizon='365 days')
df_cv.head()
from fbprophet.diagnostics import performance_metrics
df_p = performance_metrics(df_cv)
df_p.head()
from fbprophet.plot import plot_cross_validation_metric
fig = plot_cross_validation_metric(df_cv, metric='mape')
# Analyzing the `MAPE` criterion projected in the cell above, it is noticed that the error for a forecast of up to 50 days turns around 5%, reaching 15% for a forecast of 365 days. This error is a relatively good metric if the objective is to construct an image of the market trends. For day-trading purposes, even 95% accuracy is not really adequate.
# <hr>
# # 4: Exporting Data
# ### 4.1: Exporting Data to IBM Cloud Object Storage
# The mined data, as well as the data produced by the model, can be exported as CSV files to IBM Cloud Object Storage, and eventually be used or published in other applications.
#
# Next we present the code necessary to perform this task in an automated way.
# +
from ibm_botocore.client import Config
import ibm_boto3, os
print('Packages imported.')
# -
# Set up your IBM Cloud Object Storage credentials in the next cell.
# The following code contains the credentials for a file in your IBM Cloud Object Storage.
# You might want to remove those credentials before you share your notebook.
cos_credentials = {
'IAM_SERVICE_ID': '',
'IBM_API_KEY_ID': '',
'ENDPOINT': '',
'IBM_AUTH_ENDPOINT': '',
'BUCKET': '',
'FILE': ''
}
# The `upload_data_to_ibm_cos` is a function created to upload pandas dataframes as CSV files in IBM COS.
def upload_data_to_ibm_cos(credentials, df, df_future, tick):
cos = ibm_boto3.client(service_name='s3',
ibm_api_key_id=credentials['IBM_API_KEY_ID'],
ibm_service_instance_id=credentials['IAM_SERVICE_ID'],
ibm_auth_endpoint=credentials['IBM_AUTH_ENDPOINT'],
config=Config(signature_version='oauth'),
endpoint_url=credentials['ENDPOINT'])
df.to_csv('{}.csv'.format(tick), sep=',', encoding='utf-8')
df_future.to_csv('{}_future.csv'.format(tick), sep=',', encoding='utf-8')
try:
res=cos.upload_file(Filename='{}.csv'.format(tick), Bucket=credentials['BUCKET'], Key='{}.csv'.format(tick))
res=cos.upload_file(Filename='{}_future.csv'.format(tick), Bucket=credentials['BUCKET'], Key='{}_future.csv'.format(tick))
except Exception as e:
print(Exception, e)
else:
print("{} data uploaded to IBM COS.".format(tick))
os.remove('{}.csv'.format(tick))
os.remove('{}_future.csv'.format(tick))
# Upload mined and modeled data to IBM COS
upload_data_to_ibm_cos(cos_credentials, data, future_data, 'IBM')
# You can now access the `<stock_ticker>.csv` and `<stock_ticker>_future.csv` file anywhere using the IBM COS API.
# <hr>
#
#
# ## Want to Learn More?
#
# Watch the public video on how to replicate this analysis with SPSS Modeler, an analytics platform that has several embedded machine learning algorithms. This platform is designed to facilitate data analysis through a graphical programming language, and is integrated with Watson Studio.
#
# This notebook and its source code is made available under the terms of the <a href = "https://github.com/IBM/watson-stock-market-predictor/blob/master/LICENSE">Apache License 2.0</a>.
#
# <hr>
# ### Thank you for completing this journey!
| notebooks/forecasting-the-stock-market.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.3 64-bit
# language: python
# name: python3
# ---
# +
#read striped lines from day4.txt
lines = []
with open('day4.txt', 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
# with open('test.txt', 'r') as f:
# lines = f.readlines()
# lines = [line.strip() for line in lines]
# +
# Bingo is played on a set of boards each consisting of a 5x5 grid of numbers. Numbers are chosen at random, and the chosen number is marked on all boards on which it appears. (Numbers may not appear on all boards.) If all numbers in any row or any column of a board are marked, that board wins. (Diagonals don't count.)
#check board has row or column
def check_win(board):
for i in range(5):
if 5 == board[i].count('X'):
return True
for i in range(5):
if 5 == [board[j][i] for j in range(5)].count('X'):
return True
return False
# The submarine has a bingo subsystem to help passengers (currently, you and the giant squid) pass the time. It automatically generates a random order in which to draw numbers and a random set of boards (your puzzle input). For example:
# 7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
# 22 13 17 11 0
# 8 2 23 4 24
# 21 9 14 16 7
# 6 10 3 18 5
# 1 12 20 15 19
# 3 15 0 2 22
# 9 18 13 17 5
# 19 8 7 25 23
# 20 11 10 24 4
# 14 21 16 12 6
# 14 21 17 24 4
# 10 16 15 9 19
# 18 8 23 26 20
# 22 11 13 6 5
# 2 0 12 3 7
#get the number of boards
num_boards = lines[0].split(',')
#get the set of boards
boards = lines[1:]
#separate the boards into board every 6 lines
boards = [boards[i:i+6] for i in range(0,len(boards),6)]
#ignore boards every first line in boards
boards = [board[1:] for board in boards]
#separate ervery line with space in board
for board in boards:
for i in range(len(board)):
board[i] = board[i].split(' ')
while(len(board[i]) != 5):
board[i].remove('')
# +
# After the first five numbers are drawn (7, 4, 9, 5, and 11), there are no winners, but the boards are marked as follows (shown here adjacent to each other to save space):
# 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
# 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
# 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
# 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
# 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
# After the next six numbers are drawn (17, 23, 2, 0, 14, and 21), there are still no winners:
# 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
# 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
# 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
# 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
# 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
# Finally, 24 is drawn:
# 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
# 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
# 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
# 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
# 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
# At this point, the third board wins because it has at least one complete row or column of marked numbers (in this case, the entire top row is marked: 14 21 17 24 4).
#mark number on the boards
def mark_numbers(boards, num):
for board in boards:
for i in range(5):
if num in board[i]: #replace num with 'X'
board[i] = ['X' if j == num else j for j in board[i]]
return boards
def which_board(boards):
for i in range(len(boards)):
if check_win(boards[i]):
return i
return -1
# The score of t
# he winning board can now be calculated. Start by finding the sum of all unmarked numbers on that board; in this case, the sum is 188. Then, multiply that sum by the number that was just called when the board won, 24, to get the final score, 188 * 24 = 4512.
#get all unmarked 'X' numbers on the board
def get_unmarked_numbers(board):
nums = []
for i in range(5):
for j in range(5):
if board[i][j] != 'X':
nums.append(int(board[i][j]))
return nums
#get the sum of all unmarked numbers on the board
def get_sum(board):
return sum(get_unmarked_numbers(board))
#go through all the numbers
for num in num_boards:
sums = -1
boards = mark_numbers(boards, num)
board_index = which_board(boards)
if board_index != -1:
print(boards[board_index],board_index, num)
nums = get_unmarked_numbers(boards[board_index])
sums = sum(nums) * int(num)
print(num,sums,sum(nums) ,nums)
break
# To guarantee victory against the giant squid, figure out which board will win first. What will your final score be if you choose that board?
# +
# On the other hand, it might be wise to try a different strategy: let the giant squid win.
# You aren't sure how many bingo boards a giant squid could play at once, so rather than waste time counting its arms, the safe thing to do is to figure out which board will win last and choose that one. That way, no matter which boards it picks, it will win for sure.
# In the above example, the second board is the last to win, which happens after 13 is eventually called and its middle column is completely marked. If you were to keep playing until this point, the second board would have a sum of unmarked numbers equal to 148 for a final score of 148 * 13 = 1924.
for num in num_boards:
sums = -1
boards = mark_numbers(boards, num)
while which_board(boards) != -1:
board_index = which_board(boards)
nums = get_unmarked_numbers(boards[board_index])
sums = sum(nums) * int(num)
print(board_index, num,sums, len(boards))
#remove the board
boards.remove(boards[board_index])
| 2021/day04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import networkx as nx
import numpy as np
from collections import Counter
# # Get the karate club example graph
G = nx.karate_club_graph()
# ## Auxiliary functions to help spidering process
# +
def merge_dols(list_of_dicts):
"""
Marge a list of dictionaries of lists.
Args:
- list_of_dicts (list) - List containing a set of dictionaries to merge.
Each dictionary value should be list-type.
Returns:
- A dictionary where the unique values for each key have merged.
"""
# Get the first dictionary
base_dict = list_of_dicts.pop()
# Placeholder for empty list
no = []
# Loop through all of the remaining dictionaries in the list
while len(list_of_dicts) > 0:
# Get the next new dictionary
new_dict = list_of_dicts.pop()
# Extract all unique keys from the two dictionaries
keys = set(base_dict).union(new_dict)
# Merge in the new items for each key uniquely
base_dict = dict((k, list(set(base_dict.get(k, no) + new_dict.get(k, no)))) for k in keys)
# Return the final merged dictionary
return base_dict
def flatten(l):
"""
Flatten a list of lists one level.
Args:
- l (list) - List of lists
Returns:
- A flattened list.
"""
return [item for sublist in l for item in sublist]
# -
# ## Function to spider out from focal nodes
def spider_nodes(nodes, graph, neighborhood_size):
"""
Spider out from a set of base nodes to return a fixed-sized neighborhood.
Args:
- nodes (list) - List of all nodes from which to spider out.
- graph (networkx.Graph) - Graph of the full covalent structure.
- neighborhood_size (int) - Size of neighborhood to be returned (including `nodes`).
Returns:
- Subgraph (networkx Graph object) containing the local neighborhood around the focal nodes
specified in `nodes` argument.
"""
# Get the successor trees for all of the focal nodes
successors = merge_dols([dict(nx.bfs_successors(G, node)) for node in nodes])
# Add the focal nodes to the nodeset
nodeset = {*nodes}
# Ensure that neighborhood size is large enough to fit focal nodes
if len(nodeset) > neighborhood_size:
raise(ValueError(f'Neighborhood size ({neighborhood_size}) not large enough to fit focal nodes ({len(nodeset)}).'))
# Generate a path from the focal nodes
path = nodeset
# Keep increasing the neighborhood stepwise until the neighborhood size limit is reached
# or there are no more nodes to add
while len(nodeset) < neighborhood_size and path:
# Get all node descendents (successors)
node_dec = [successors[node] for node in path if node in successors]
node_dec = flatten(node_dec)
new_nodeset = set(node_dec)
# Get the new set of nodes that may potentially be added to the neighborhood
nodeset_union = nodeset.union(new_nodeset)
path = new_nodeset.difference(nodeset)
# Add new nodes to neighborhood if there is space
if len(nodeset_union) <= neighborhood_size:
nodeset = nodeset_union
# Otherwise sample the new nodes weighted by the number of connections to nodes
# already in the neighborhood
else:
edge_count = Counter(node_dec)
sample_list = list(new_nodeset.difference(nodeset))
p = np.array([edge_count[node] for node in sample_list])
p = p / p.sum()
size = neighborhood_size - len(nodeset)
sample_set = set(np.random.choice(sample_list, size=size, replace=False, p=p))
nodeset = nodeset.union(sample_set)
return nodeset
# %%timeit
np.random.seed(0)
nodeset2 = spider_nodes([24, 25, 7, 8], G, 20)
nodeset2 = spider_nodes([24, 25, 7, 8], G, 20)
print(len(nodeset2))
print(nodeset2)
H = G.subgraph(nodeset2)
nx.draw_networkx(H, with_labels=True)
# # Tests
# +
# Test merge_dols
dict1 = {
1: [2, 3],
2: [4, 5],
}
dict2 = {
2: [4, 5, 6],
6: [7]
}
manual_combine = {
1: [2, 3],
2: [4, 5, 6],
6: [7]
}
out_dict = merge_dols([dict1, dict2])
assert manual_combine == out_dict
# Test flatten
lols = [[1, 2, 3], [4, 5, 6]]
assert flatten(lols) == [1, 2, 3, 4, 5, 6]
# Only flattens one level
lols = [[1, 2, 3], [4, 5, 6, [7, 8]]]
assert flatten(lols) == [1, 2, 3, 4, 5, 6, [7, 8]]
# Test spider
np.random.seed(0)
nodeset = spider_nodes([24, 25, 7, 8], G, 20)
assert len(nodeset) == 20
np.random.seed(0)
nodeset = spider_nodes([24, 25, 7, 8], G, 30)
assert len(nodeset) == 30
# Test beyond number of nodes in graph
np.random.seed(0)
nodeset = spider_nodes([24, 25, 7, 8], G, 40)
assert len(nodeset) == 34
# -
| notebooks/module_sandbox/spider_nodes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
library(sparklyr)
library(dplyr)
# Spark session & context
sc <- spark_connect(master = "local")
# Do something to prove it works
iris_tbl <- copy_to(sc, iris)
iris_tbl %>%
filter(Petal_Width > 0.2) %>%
head()
# -
| work/local_sparklyr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # XDAWN Denoising
#
#
# XDAWN filters are trained from epochs, signal is projected in the sources
# space and then projected back in the sensor space using only the first two
# XDAWN components. The process is similar to an ICA, but is
# supervised in order to maximize the signal to signal + noise ratio of the
# evoked response.
#
# WARNING: As this denoising method exploits the known events to
# maximize SNR of the contrast between conditions it can lead to overfit.
# To avoid a statistical analysis problem you should split epochs used
# in fit with the ones used in apply method.
#
# References
# ----------
# [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2009). xDAWN
# algorithm to enhance evoked potentials: application to brain-computer
# interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
#
# [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011,
# August). Theoretical analysis of xDAWN algorithm: application to an
# efficient sensor selection in a P300 BCI. In Signal Processing Conference,
# 2011 19th European (pp. 1382-1386). IEEE.
#
#
# +
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from mne import (io, compute_raw_covariance, read_events, pick_types,
Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.viz import plot_epochs_image
print(__doc__)
data_path = sample.data_path()
# -
# Set parameters and read data
#
#
# +
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, method='iir') # replace baselining with high-pass
events = read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
# Fit xdawn
xd.fit(epochs)
# Denoise epochs
epochs_denoised = xd.apply(epochs)
# Plot image epoch after xdawn
plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
| 0.12/_downloads/plot_xdawn_denoising.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
batch_size = 32
img_size = 224
data_dir = "train_images"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="training",
seed=123,
image_size=(img_size, img_size),
batch_size=32)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="validation",
seed=123,
image_size=(img_size, img_size),
batch_size=32)
# -
class_names = train_ds.class_names
print(class_names)
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
# -
len(train_ds)
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
# +
import numpy as np
from tensorflow.keras import layers, regularizers
input_shape = ()
num_classes = len(class_names)
model = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_size, img_size, 3)),
layers.Conv2D(32, (3, 3), padding='same', activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Dropout(0.5),
layers.Flatten(),
layers.Dense(128, kernel_regularizer=regularizers.l2(0.001), activation='relu'),
layers.Dropout(0.5),
layers.Dense(128, kernel_regularizer=regularizers.l2(0.001), activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes, activation='softmax')
])
# -
model.summary()
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
baseline_history = model.fit(
train_ds,
batch_size=batch_size,
validation_data=val_ds,
epochs=10
)
def plot_history(histories):
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_loss'], '--', label=name.title() + ' Val')
plt.plot(history.epoch, history.history['loss'], color=val[0].get_color(), label=name.title() + ' Train')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_accuracy'], '--', label=name.title() + ' Val')
plt.plot(history.epoch, history.history['accuracy'], color=val[0].get_color(), label=name.title() + ' Train')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.xlim([0, max(history.epoch)])
plot_history([('baseline', baseline_history)])
| CNN/.ipynb_checkpoints/TrainWithCNN-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimizing Python Code with [Cython](https://cython.org/)
# +
import Cython
from random import random
import math
import numpy as np
import matplotlib.pyplot as plt
# This is needed to load the Cython magic
# %load_ext Cython
# -
# ## Calculating $\pi$ using Monte Carlo Integration
# ### Pure Python
def pi_mc(n=1000):
'''Calculate PI using Monte Carlo method'''
in_circle = 0
for i in range(n):
x, y = random(), random()
if x ** 2 + y ** 2 <= 1.0:
in_circle += 1
return 4.0 * in_circle / n
# %time pi_mc(10000000)
# ### Cython
# #### 1. Use `cython` cell magic without making any code change
# + language="cython"
#
# from random import random
#
# def pi_mc(n=1000):
# '''Calculate PI using Monte Carlo method'''
# in_circle = 0
# for i in range(n):
# x, y = random(), random()
# if x ** 2 + y ** 2 <= 1.0:
# in_circle += 1
#
# return 4.0 * in_circle / n
# -
# %time pi_mc(10000000)
# #### 2. Static type declarations in Cython with `cdef`
#
# Static type declarations allow Cython to step out of the dynamic nature of the Python code and produce efficient **C** code.
# + language="cython"
#
# from random import random
#
# def pi_mc(n=1000):
# '''Calculate PI using Monte Carlo method'''
# cdef int in_circle = 0
# cdef int i
# cdef double x, y
# for i in range(n):
# x, y = random(), random()
# if x ** 2 + y ** 2 <= 1.0:
# in_circle += 1
#
# return 4.0 * in_circle / n
# -
# %time pi_mc(10000000)
# #### 3. Using Cython `annotate` option and inspect the generated C code
# + magic_args="--annotate" language="cython"
#
# from random import random
#
# def pi_mc(int n=1000):
# '''Calculate PI using Monte Carlo method'''
# cdef:
# int in_circle = 0
# int i
# double x, y
# for i in range(n):
# x, y = random(), random()
# if x ** 2 + y ** 2 <= 1.0:
# in_circle += 1
#
# return 4.0 * in_circle / n
# -
# %time pi_mc(10000000)
# ## Types of Cython functions
#
# Cython offers three different types of function declared with `def`, `cdef`, `cpdef`:
#
# 1. Functions declared with `def` can be called from Python and Cython code.
# 2. Functions declared with `cdef` can be only called from Cython code.
# 3. `cpdef` causes Cython to create two versions of the function. One which is used when the function is called from Cython code and one when it is called from Python.
# + language="cython"
#
#
# cdef double cube(double x):
# return x * x * x
#
#
# cpdef double cube_sum(double x, double y):
# return cube(x) + cube(y)
# -
# ### Function Inlining
#
# We can use the `cdef inline` for small functions that are used often and Cython inlines them reducing overhead
# + magic_args="--annotate" language="cython"
#
#
# cdef inline double cube(double x):
# return x * x * x
#
#
# cpdef double cube_sum(double x, double y):
# return cube(x) + cube(y)
# -
# ## Typed Memory Views
#
# Cython allows access to the contents of NumPy arrays by **memory views**.
# + magic_args="--annotate" language="cython"
#
# def my_sum(double[:] x):
# cdef int i, nx = x.shape[0]
# cdef double s = 0.0
# for i in range(nx):
# s += x[i]
#
# return s
# -
x = np.ones(1000, dtype=np.float64)
s1 = my_sum(x)
s2 = x.sum()
print(s1, s2)
# + magic_args="--annotate" language="cython"
#
# def my_sum2d(double[:, :] x):
# cdef int i, j, nx = x.shape[0], ny = x.shape[1]
# cdef double s = 0.0
# for i in range(nx):
# for j in range(ny):
# s += x[i, j]
#
# return s
# -
x = np.ones((1000, 1000), dtype=np.float64)
s1 = my_sum2d(x)
s2 = x.sum()
print(s1, s2)
# ## Disable bounds checking and wraparound
#
# For safety reasons, Cython checks if we try to access elements out of the array boundaries.
# Furthermore it allows using negative array indices. We can exhange safety with performance disabling both of them.
#
# + magic_args="--annotate" language="cython"
#
# from cython cimport wraparound, boundscheck
#
# @wraparound(False)
# @boundscheck(False)
# cpdef double my_sum2d(double[:, :] x):
# cdef int i, j, nx = x.shape[0], ny = x.shape[1]
# cdef double s = 0.0
# for i in range(nx):
# for j in range(ny):
# s += x[i, j]
#
# return s
# -
# ## Releasing the GIL and parallelizing loops
# ### Calculating Julia Sets
# + language="cython"
#
# from cython cimport boundscheck, wraparound
#
# @wraparound(False)
# @boundscheck(False)
# def julia_set_cython(const double[:, :] X, const double[:, :] Y,
# const double cx, const double cy,
# const int iter_max, const double radius2,
# int[:, :] julia):
# cdef:
# int i, j, k, nx, ny
# double x, y
# nx = X.shape[0]
# ny = Y.shape[1]
# for i in range(nx):
# for j in range(ny):
# x = X[i, j]
# y = Y[i, j]
# k = 0
# while x * x + y * y < radius2 and k < iter_max:
# x, y = x * x - y * y + cx, 2.0 * x * y + cy
# k = k + 1
#
# julia[i, j] = k
# -
X, Y = np.meshgrid(np.linspace(-2.0 , 2.0, 5000), np.linspace(-2.0, 2.0, 5000))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
julia = np.zeros_like(X, dtype=np.int32)
c = -0.9 + 0.22143j
radius2 = 4.0
# %timeit julia_set_cython(X, Y, c.real, c.imag, 100, radius2, julia)
ax.set_aspect('equal')
ax.imshow(julia, extent=[-2, 2, -2, 2]);
# + language="cython"
#
# from cython cimport boundscheck, wraparound
# from cython.parallel cimport prange
#
# @boundscheck(False)
# @wraparound(False)
# def julia_set_cython(const double[:, :] X, const double[:, :] Y,
# const double cx, const double cy,
# const int iter_max, const double radius2,
# int[:, :] julia):
# cdef:
# int i, j, k, nx, ny
# double x, y
# nx = X.shape[0]
# ny = Y.shape[1]
# for i in prange(nx, nogil=True):
# for j in range(ny):
# x = X[i, j]
# y = Y[i, j]
# k = 0
# while x * x + y * y < radius2 and k < iter_max:
# x, y = x * x - y * y + cx, 2.0 * x * y + cy
# k = k + 1
#
# julia[i, j] = k
# -
X, Y = np.meshgrid(np.linspace(-2.0 , 2.0, 5000), np.linspace(-2.0, 2.0, 5000))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
julia = np.zeros_like(X, dtype=np.int32)
c = -0.9 + 0.22143j
radius2 = 4.0
# %timeit julia_set_cython(X, Y, c.real, c.imag, 100, radius2, julia)
ax.set_aspect('equal')
ax.imshow(julia, extent=[-2, 2, -2, 2]);
# + language="cython"
#
# from cython cimport boundscheck, wraparound
# from cython.parallel cimport prange
#
# @boundscheck(False)
# @wraparound(False)
# def julia_set_cython(const double[:, :] X, const double[:, :] Y,
# const double cx, const double cy,
# const int iter_max, const double radius2,
# int[:, :] julia):
# cdef:
# int i, j, k, nx, ny
# double x, y
# nx = X.shape[0]
# ny = Y.shape[1]
# for i in prange(nx, nogil=True):
# for j in range(ny):
# x = X[i, j]
# y = Y[i, j]
# k = 0
# while x * x + y * y < radius2 and k < iter_max:
# x, y = x * x - y * y + cx, 2.0 * x * y + cy
# k = k + 1
#
# julia[i, j] = k
# -
X, Y = np.meshgrid(np.linspace(-2.0 , 2.0, 5000), np.linspace(-2.0, 2.0, 5000))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
julia = np.zeros_like(X, dtype=np.int32)
c = -0.9 + 0.22143j
radius2 = 4.0
# %timeit julia_set_cython(X, Y, c.real, c.imag, 100, radius2, julia)
ax.set_aspect('equal')
ax.imshow(julia, extent=[-2, 2, -2, 2]);
# ### !!! WE FORGOT TO PUT THE CORRECT COMPILATION/LINKING OPTIONS !!!
# + language="cython"
#
# # distutils: extra_compile_args = -fopenmp -march=native
# # distutils: extra_link_args = -fopenmp
# from cython cimport boundscheck, wraparound
# from cython.parallel cimport prange
#
# @boundscheck(False)
# @wraparound(False)
# def julia_set_cython(const double[:, :] X, const double[:, :] Y,
# const double cx, const double cy,
# const int iter_max, const double radius2,
# int[:, :] julia):
# cdef:
# int i, j, k, nx, ny
# double x, y
# nx = X.shape[0]
# ny = Y.shape[1]
# for i in prange(nx, nogil=True):
# for j in range(ny):
# x = X[i, j]
# y = Y[i, j]
# k = 0
# while x * x + y * y < radius2 and k < iter_max:
# x, y = x * x - y * y + cx, 2.0 * x * y + cy
# k = k + 1
#
# julia[i, j] = k
# -
X, Y = np.meshgrid(np.linspace(-2.0 , 2.0, 5000), np.linspace(-2.0, 2.0, 5000))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
julia = np.zeros_like(X, dtype=np.int32)
c = -0.9 + 0.22143j
radius2 = 4.0
# %timeit julia_set_cython(X, Y, c.real, c.imag, 100, radius2, julia)
ax.set_aspect('equal')
ax.imshow(julia, extent=[-2, 2, -2, 2]);
| cython/notebooks/Cython1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# # Data Munging: Strings
#
# Data munging, the process of wrestling with data to make it into something clean and usable, is an important part of any job analyzing data.
#
# Today we're going to focus on some data that has information we want, but the information is not properly *structured*. In particular, it comes as a single column with a string value, and we want to turn it into a series of boolean columns.
#
# To do that, we're going to use the powerful built-in methods Python provides us to work with strings. You can read all about the available methods here:
#
# https://docs.python.org/3/library/string.html
#
# In particular, we're going to use `.split()`, which is a method that turns a string into a list of strings, and `.strip()`, which removes the "whitespace" from a string.
# +
# Play:
#
# Take a look at the official Python documentation for the
# "split" and "strip" methods. Play around with them now
# to make sure you understand how they work:
import pandas as pd
# -
#
# 1)
# Read the data in a csv called "jobs.csv" into a DataFrame.
# This data is from a site that posts job ads online.
# Each row represents an ad for a job on the site.
jobs = pd.read_csv('jobs.csv')
# +
#
# Take a look at your data and note that you have
# a column called `pay`. That column is a string,
# as far as Python is concerned. However, to us
# humans, we notice that the information is more
# structured than that. It seems like a "collection
# of keywords," where each job can have zero or more
# keywords such as "Part-Time" or "Contract" which
# describe the type of contract.
#
# There are 6 different contract types.
contract_types = ['Part-time', 'Temporary', 'Internship', 'Contract', 'Commission', 'Other']
jobs['paysplit'] = pd.Series([str(n).split(', ') for n in jobs.pay])
for contract in contract_types:
jobs[contract] = pd.Series([contract in n for n in jobs.paysplit])
def fulltime(listo):
if 'nan' in listo:
return True
return False
jobs['Full-time'] = pd.Series([fulltime(n) for n in jobs.paysplit])
jobs['alljobs'] = pd.Series([True for n in jobs.paysplit])
jobs.head(30)
# +
#
# 2)
# Break down your tasks, write a "pipeline" function
# called "add_contract_types".
#
# HINT: last time, each "step" returned a DataFrame
# object. This might not be the case this time, the
# steps can return any data type that is helpful
# to move the to next step!
#Did simplified code first...
# +
#
# 3)
# Now write all the "steps" (functions) needed
# by your pipeline function (add_contract_types)
#Did simplified code first...
# +
#
# 4)
# Now add the needed columns by using your function
# add_contract_types. You will want the returned
# DataFrame for some of the further exercises.
#Did simplified code first...
# +
#
# 5)
# Assume that all jobs that don't specify a contract
# type in "pay" are Full-time. Create a new column,
# called "Full-time", which is a boolean that
# should be True if the job is Full-time, false otherwise.
# Added to original code
# +
#
# 6)
# Get the percentage of jobs for each contract type
# i.e. number of jobs of X type / number of jobs
proportions = jobs.loc[:,'Part-time':'Full-time'].sum()/jobs.loc[:,'Part-time':'Full-time'].sum().sum()*100
proportions
# +
#
# 7)
# Which industries ('category') have the highest
# percentage of part-time jobs posted?
# The lowest?
share_parttime = (jobs.groupby('category').sum()['Part-time']/jobs.loc[:,'Part-time':'Full-time'].sum().sum()*100).sort_values(ascending= False)
print('Highest\n', share_parttime.head(5))
print('Lowest\n', share_parttime.tail(5))
# +
#
# 8)
# Which industries ('category') have the highest
# percentage of Internship jobs posted?
# The lowest?
# Note: this question is very similar to the last.
# make a function that can answer both questions
def industry_share(df, job_type):
share = (df.groupby('category').sum()[job_type]/df.loc[:,'Part-time':'Full-time'].sum().sum()*100).sort_values(ascending= False)
print('Highest\n', share.head(5))
print('Lowest\n', share.tail(5))
industry_share(jobs,'Internship')
# -
#
# 9)
# Use your function to ask the same question about
# Comission jobs
industry_share(jobs, 'Commission')
# +
#
# 10)
# Let's call jobs that are either Temporary,
# Part-time or Internships "precarious".
#
# Order the industries (category) by the
# percentage of precarious jobs
#
# HINT: can you modify some previous function
# to make this question easy to answer?
#
# HINT: Make sure your variables reflect their
# content. Collections should be plural, single
# elements should be singular.
precarious_shares = (jobs.groupby('category').sum()[['Internship', 'Part-time', 'Temporary']]/jobs.loc[:,'Part-time':'Full-time'].sum().sum()*100)
precarious = (precarious_shares['Internship'] + precarious_shares['Part-time'] + precarious_shares['Temporary']).sort_values(ascending = False)
precarious
# +
#
# 11)
# Get the 5 companies who post the most jobs
# in each category, along with the number of
# jobs listed by each company.
job_counts = jobs.groupby('company').sum()
job_types = ['Part-time', 'Temporary', 'Internship', 'Contract', 'Commission', 'Other', 'Full-time', 'alljobs']
for n in job_types:
print(job_counts[n].sort_values(ascending=False)[0:5])
# +
# 12)
# Is any company in the top 5 across more than one categories??
# Return the companies who are, along with the categories
# in which they appear in the top 5.
#
# FORMAT: Dataframe with 3 columns: company, category, number of jobs
job_counts = jobs.groupby('company').sum()
job_types = ['Part-time', 'Temporary', 'Internship', 'Contract', 'Commission', 'Other', 'Full-time', 'alljobs']
top_jobs = []
for n in job_types:
top_jobs += [pd.DataFrame(job_counts[n].sort_values(ascending=False)[0:5])]
top_jobs = pd.concat(top_jobs).reset_index()
appearances = pd.DataFrame(top_jobs.groupby('company').count().sum(axis = 1)).reset_index()
appearances.columns = ['company', 'appearances']
appearances = appearances[appearances.appearances > 1]
appearances = appearances.merge(top_jobs, on = 'company', how = 'inner').melt(id_vars = ['company', 'appearances'], var_name = 'job_type', value_name = 'no_jobs')
appearances[pd.notnull(appearances.no_jobs)].sort_values('company')
# HINT: take a look at the `.filter` method on GroupBy:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.core.groupby.DataFrameGroupBy.filter.html
| exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Alignment Mark Model Training
#
# Make sure that the paths below are correct before continuing
# Path to the json file created by Labelbox
JSON_PATH = 'labels.json'
# path to the fodler containing the trainig images
dataset_path = 'C:\\Users\\cell_ml\\Downloads\\images'
# name of generated model
model_path = "alignment_50_v2.h5"
# +
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import json
import pandas as pd
from skimage.io import imread, imshow, imread_collection, concatenate_images
import skimage.draw
from smartscope.source.maskrcnn import utils
from smartscope.source.maskrcnn import model as modellib
from smartscope.source.maskrcnn import visualize
from smartscope.source.maskrcnn.config import Config
from smartscope.source.maskrcnn.model import log
# %matplotlib inline
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# +
# Split the labels into training and validation subsets
annotations = json.load(open('labels.json', 'r'))
number_of_annotations = len(annotations)
# Use 4/5ths of the data for training and 1/5 for validation
split_at = number_of_annotations * 4/5
training_data = []
val_data = []
for i, a in enumerate(annotations):
if i < split_at:
training_data.append(a)
else:
val_data.append(a)
with open('train.json', 'w+') as trainfile:
json.dump(training_data, trainfile)
with open('val.json', 'w+') as valfile:
json.dump(val_data, valfile)
# +
class MarkConfig(Config):
"""Configuration for training on the cell dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "mark"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + cell
# Number of training steps per epoch
STEPS_PER_EPOCH = 150
# set validation steps
VALIDATION_STEPS = 50
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = MarkConfig()
config.display()
# -
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
class MarkDataset(utils.Dataset):
def load_shapes(self, subset):
# Add classes. We have only one class to add.
self.add_class("Mark", 1, "Mark")
# Train or validation dataset?
dataset_dir = dataset_path
if subset == 'train':
annotations = json.load(open('train.json', 'r'))
else:
annotations = json.load(open('val.json', 'r'))
# Add images annotated with Labelbox
for a in annotations:
print(a['Label'])
if 'Mark' in a['Label']:
polys = [r['geometry'] for r in a['Label']['Mark']]
polygons = []
for i, p in enumerate(polys):
x_points = [x['x'] for x in p]
y_points = [y['y'] for y in p]
polygons.append({'all_points_x': x_points, 'all_points_y': y_points})
# Get Image Size
image_path = os.path.join(dataset_dir, a['External ID'])
image = imread(image_path)
height, width = image.shape[:2]
self.add_image(
"Mark",
image_id=a['External ID'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "Mark":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "Mark":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
# +
# Training dataset
dataset_train = MarkDataset()
dataset_train.load_shapes("train")
dataset_train.prepare()
# Validation dataset
dataset_val = MarkDataset()
dataset_val.load_shapes("val")
dataset_val.prepare()
# -
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# +
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# -
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# +
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=50,
layers="all")
model.keras_model.save_weights(model_path)
# +
class InferenceConfig(MarkConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
# model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# +
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
# +
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
# -
| SmartScope/smartscope/notebook/training/alignment_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to Python 3!
# In this [Jupyter Notebook](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html) we will cover some [Python 3](https://en.wikipedia.org/wiki/Python_(programming_language)) basics.
#
# I recommed you take this one step at a time and try to practice along the way. Every explaination will contain an example that you can run within the notebook.
#
# ### Here are the things we will be covering: <a name = "index"></a>
# - [Jupyter Notebook:](#jupyter)
# - What is Jupyer?
# - What can I use Jupyter Notebooks for?
#
#
# - [GitHub:](#github)
# - What is version control?
# - What is Git?
# - What is GitHub?
# - What are we going to use GitHub for?
#
#
# - [Python 3:](#python3)
# - What is Python 3?
# - Why Python 3?
#
#
# - [Syntax:](#syntax)
# - Comments
# - Print
# - Variables
# - [Strings](#section-syntax-strings)
# - [Intergers and Floats](#section-variables-numbers)
# - [Mathematical Operations](#variables-math)
# - [String Math](#section-variables-stringmath)
#
#
# - [Loops:](#loops)
# - What are loops?
# - Types of loops:
# - For
# - While
#
#
# - [Conditionals:](#conditionals)
# - What are conditionals?
# - Types of conditionals:
# - If
# - Elif
# - Else
#
#
# - [Functions:](#functions)
# - What are functions?
#
#
# ---
# ---
# ## 1. Jupyter Notebooks: <a name = "jupyter"></a>
#
# #### - What are Jupyter Notebooks?
# Jupyter Notebooks is an online notebook that allows developers to weave together computational information (code, data, statistics) with a narrative, multimedia or graphs.
#
# #### - What can I use Jupyter Notebooks for?
# In this case, we are goig to be using it to set up an interactive textbook, full of explanations and examples which you can test out right from your browser.
#
# You could use it to explain the reasoning behind one of your programs, show your work, etc.
#
# Scientists, journalists, and researchers often use it to open up their data to the public, share the stories behind their computations, and enable future collaboration and innovation.
# In short, there are endless uses for Jupyter Notebooks.
# ---
# ---
# ## 2. GitHub: <a name = "github"></a>
# In broad, GitHub is a website and cloud-based service that helps developers store and manage their code, as well as track and control changes to it. To understand exactly what GitHub is, you need to understand two principles, version control and git.
#
# #### - What is version control?
# Version control helps developers track and manage changes to a project’s code. As a software project grows, version control becomes essential. It lets developers safely work through branching and merging.
#
# - **Branching:** With branching, a developer clones part of the source code to a safe location. Where the developer can then make changes without affecting the rest of the project.
# - **Merging:** Once the developer gets their version of the code working properly, they can merge that code back into the main source code to make it official.
#
# All of these changes are then tracked and can be undone if anything breaks or a feature's implementation is deemed unnecessary.
#
# #### - What is Git?
# Git is a specific open-source version control system created by <NAME> (the creator of the Linux kernel) in 2005.
#
# Specifically, Git is a distributed version control system, which means that the entire codebase and history is available on every developer’s computer, which allows for seamless branching and merging by everyone in the project.
#
# #### - What is GitHub?
# GitHub is a company that offers a cloud-based Git repository hosting service. Basically, it makes it a lot easier for individuals and teams to use Git for version control and collaboration.
#
# GitHub’s interface is so user-friendly that beginner developers can take advantage of Git. Without GitHub, using Git generally requires a bit more technical knowledge and use of the command line.
#
# Additionally, anyone can sign up and host a public code repository for free, which makes GitHub especially popular with open-source projects.
#
# #### What are we going to use GitHub for?
# We will mainly use GitHub to store project files that will be covered during each session of the Coding Club.
# That will allow us to keep track of the file's history and provide you, the user, a place to conveniently access all project files.
# ---
# ---
# ## 3. Python 3: <a name = "python3"></a>
#
# Python is one of the most widely used languages out there. Be it web development, machine learning and AI, or even micro-controller programming, Python has found its place just about everywhere.
#
# #### - What is Python 3?
# Python is an extremely powerful, yet beginner friendly, programming language. It was created by <NAME>, and released in 1991. Python 3 is ideal for scripting and rapid application development. It is used in areas such as web development (Django, Bottle...), scientific and mathematical computing (Orange, SymPy, NumPy...) or desktop graphical user Interfaces (Pygame, Panda3D, Processing.py...).
#
# Python also supports an extremely wide range of modules, sets of pre-made code that you can easily include in your own projects, which speed up your code building sessions significantly, and help make it be even more beginner friendly.
#
# Python has consistently been one of the most used programming languages since the early 2000s.
#
# #### - Why Python 3?
# There are many things that make Python 3 stand out from other languages. For example:
# - Python is seamlessly integrated into may different platforms such as MacOS, Windows or most Linux distributions
#
# - Python's syntax is fairly similar to the English language, which allows for easy readability and understanding of the code.
#
# - Python runs on an interpreter system, meaning that code can be executed as soon as it is written. This means that prototyping can be very quick.
#
# - Python can be treated in a procedural way or an object-orientated way. [Extra Resources](https://www.youtube.com/watch?v=pTB0EiLXUC8)
#
# #### Extra information:
# Python was designed for readability, and has some similarities to the English language with influence from mathematics.
# Python uses new lines to complete a command, as opposed to other programming languages which often use semicolons or parentheses, it also relies on indentation, using whitespace, to define [scope](#word-scope).
# ---
# ---
# ## 4. Syntax: <a name = "syntax"></a>
# We tell the computer what we want it to do by writing commands inside a text file using a programming language. These files are called programs. Running a program means telling a computer to read the text file, translate it to the set of operations that it understands, and perform those actions.
#
# Complete the next example by writing what your favorite food is whithin the quotes ("") and hitting run.
fav_food = " "
print("My favorite food is " + fav_food + "!")
# #### - Comments:
# The first thing we’re going to do is learn how to tell a computer to ignore a part of a program. Text within a program but not run by the computer is called a comment. Python interprets anything after a `#` as a comment.
#
# - ***Some uses for comments are:***
# - Provide context for why something is written the way it is:
# ```python
# # This variable will be used to count the number of times anyone steps on a banana peel
# slip_n_slide_count = 0
# ```
# - Improve the readibility of your code so that future you or someone else can easily understand what it does:
# ```python
# # This function stores all the times I have said XD into a text file
# xd_to_textfile()
# ```
# - Ignore a line of code that is proving to be buggy or unnecessary:
# ```python
# # superstar_name = get_superstar_name_old()
# superstar_name = get_superstar_name_new()
# ```
#
# Complete the following example by writing the description of a realy cool superhero within a comment.
# Write you comment bellow this one...
# #### - Print: <a name='section-syntax-print'> </a>
# Now what we’re going to do is teach our computer to share information. Getting information from the computer is very important, as it can answer questions about “how” or “why” or “what” it is doing. In Python, the `print()` function is used to get some information from the computer. The message to be printed should be surrounded by quotes (""):
# ```python
# # from Karen circa 2020
# print(""Live, laugh, love."")
# ```
# In the example above, we get our program to `print()` a quote from an entitled person. The printed words that appear as a result of the `print()` function are referred to as *output*. The output of this example program would be:
#
# ⠀⠀⠀⠀⠀⠀⠀⠀⠀`Live, laugh, love.`
#
# For this example, you will need to print out your favorite quote, remember to put it between the quotes:
# Print your favorite quote below:
print("")
# #### - Strings: <a name='section-syntax-string'> </a>
# In programming, blocks of text are called strings. In our last exercise, we created the string `"Hello world!"`. To tell Python a group of letters is a string, we surround it in either double quotes `("Hello world")` or single quotes `('Hello world')`. They both do the same, so it doesn't matter which ones you use, just make sure to be consistent.
#
# To complete the following example exercise print your name:
# #### - Variables:
# All programming languages offer a way to store data and reuse it later. If, for example, there is a greeting we want to say, a date we need to reuse, or a username we need to save we can create a variable which will store that value. In Python, we assign variables by using the equals sign `=`. Look at the following example:
# ```python
# user_name = "jarredthejellyfish"
# # Prints "jarredthejellyfish"
# print(user_name)
# ```
# In the last example, we stored my username into a variable called `user_name`. Variables can’t have spaces or symbols in their names other than an underscore `_`. They can’t begin with numbers but they can have numbers after the first letter (e.g., `cool_variable_5` is OK).
#
# We call these objects `variables` because the values they can hold can vary throughout the program we are running. If you look at the following example, you will see how the value stored within `favorite_song` is changed.
# ```python
# # Favorite song
# favorite_song = "Eventually, Tame Impala"
# print(message_string)
#
# # Change of mind
# favorite_song = "New Person, Same Old Mistakes, Tame Impala"
# print(message_string)
# ```
#
# In the example above, we create a variable, `favorite_song`, and assign the string `"Eventually, Tame Impala"` to it and after that, we use `print()` to output it to the console.
#
# In the second part, we assign a new value to `favorite_song`, `New Person, Same Old Mistakes, Tame Impala`. Finally we print the last value using `print()`.
#
# Complete the following practice exercisse:
# +
# Below you have the varibale 'movie_title'
movie_title = 'Interstellar'
# Printing out movie_title
print("Gerard's favorite movie is:")
print(movie_title)
# Reassign the title of another movie to movie title below (remember to use quotes)
movie_title =
# Printing out movie_title
print('A good movie to watch is:')
print(movie_title)
# And now do it again but with a horror movie!
# Printing out 'movie_title'
print('Hey, you should watch the following movie:')
print(movie_title)
# -
# Up to this point we have been working with strings, but there are other types of variables such as:
# - **Integer (or int):** Used for whole numbers, numbers that have no decimal points.
# - **Float (or Real):** Used for numbers that contain decimal points, or for fractions.
# - **Boolean (or bool):** Used where data is restricted to binary options (True/False, yes/no).
#
# We will work with these types of variales throughout the following sections.
# #### - Varibales: Intergers and Floats: <a name='section-variables-numbers'> </a>
# A computer is capable of handling much more than a group of letters. Python has a few numeric [data types](#word-data-type), it has multiple ways of storing numbers. Which one you use depends on your intended purpose for the number you are saving.
#
# Intergers, or `ints`, are numbers without a decimal point, whole numbers. Intergers contain all natural numbers (`0, 1, 2 ,3...`) and their negative counterparts (`-1, -2, -3...`). As a practical example of how to use an interger, lets take counting the number of people in a room. If you try to do that, you should always get a whole number, if you don't, please make sure you didn't split Jonny in half, he doesn't deserve it.
#
# Floating-point numbers, or `floats`, are a decimal numbers (`420.69`). Floats can be used for a wide range of things such as holding a fractional number or a measurement.
#
# If you were measuring the length of your leg, storing a chess player’s average time between moves, or the average score of your friend group's rice purity test, you would likely use a float.
#
# Numbers can be assigned to variables or used for mathematical operations in a program:
# ```python
# an_interger = 4
# a_float = 20.6
#
# print(an_interger + 3) # There is a mathematical operation within the print!
# # prints 5
# ```
# Above we defined an `integer` and a `float` as the variables `an_interger` and `a_float`. We printed out the sum of the variable `an_interger` with the number `3`. `We call the number 3 here a literal`, meaning it’s actually the number 3 and not a variable with the number 3 assigned to it.
#
# There are some limitations that floating point numbers face, they can affect their functioning which can lead to some unexpected behaviour. If you'd like to read more about their limitations visit [Python’s documentation on their limitations](https://docs.python.org/3/tutorial/floatingpoint.html).
#
# Complete the next exercise by followin the instructions within it:
# +
# Create a new variable bewlow that stores your phone number (without any formatting or country code eg. 605660522)
# Create a new variable below that holds any decimal number you want
# Print both of your variables using the print() function below
# -
# #### - Variables: Mathematical Operations <a name='variables-math'> </a>
# Computers are absolutely amazing at doing math. Their name literally comes from "compute", which means to calculate. Python can perform most basic operations; addition, subtraction, multiplication, and division by using +, -, *, and /.
# ```python
# # Prints "420"
# print(430 - 124 + 1)
#
# # Prints "666"
# print(333 * 2)
#
# # Prints "69.0"
# print(345 / 5)
# ```
# If you look at the output from `print(345/5)` you will see that it has a decimal place. That happens because python converts interger numbers to floats before performing a division so that it can store decimals, in case that your division results in any.
#
# Division can throw its own special error: `ZeroDivisionError`. Python will raise this error when attempting to divide by 0. Quick reminder, dividing by zero has no definite answer because you can't split something in 0 parts.
#
# Mathematical operations in Python follow the standard mathematical order of operations. Here is a [Wikipedia article](https://en.wikipedia.org/wiki/Order_of_operations) in case you need a refresher.
#
# Complete the following exercise by followin the instructions within it:
# +
# Print out the result of: 64500/150 - 10.31 + 1
# -
# <a name='section-plus-equals'> </a> Python offers an easier way to update variables. When you have a number saved in a variable and want to add to the current value of the variable, you can use the `+=` (plus-equals) operator.
# ```python
# # First we have a variable with a number saved
# number_of_headshots = 120
#
# # Then we need to update that variable
# # Let's say we headshot another 2 players
# number_of_headshots += 2
#
# # The new value is the old value
# # Plus the number after the plus-equals
# print(number_of_miles_hiked)
# # Prints 122
# ```
# Above, we keep a running count of the number of miles a person has gone hiking over time. Instead of recalculating from the start, we keep a grand total and update it when we’ve gone hiking further.
# Variables with numbers assigned to them can be treated as [literals](#word-literal). Basically, we can perform any arithmetic operation on variables and Python will not be able to tell the difference between them and a literal. Performing arithmetic on variables does not change their value. You can only update a variable using the `=` sign.
# ```python
# concer_ticket_price = 231.50
# number_of_people = 2000
#
# # Prints "463000"
# print(concer_ticket_price * number_of_people)
# # Prints "231.5"
# print(concer_ticket_price)
# # Prints "2000"
# print(number_of_people)
#
# # We haven't sold any, so we update the price to make it more competitive
# concer_ticket_price = 104
#
# # Prints "208000"
# print(concer_ticket_price * number_of_people)
# # Prints "104"
# print(concer_ticket_price)
# # Prints "2000"
# print(number_of_people)
# ```
# We asign numeric values to 2 variables,`concer_ticket_price` and `number_of_people`. After that, we use them to perform a mathematical operation. This doesn’t affect thir values.
# When we update the `concer_ticket_price` variable and perform the calculations again, they use the updated values for the variable!
#
# Complete the following exercise:
# +
# You are a professional stutns-person, your hourly rate is $250.4, and you usually do 15h of work per day.
# Define a variable that holds your usual hourly rate (omit the $ sign)
# Define another variable that holds the usual amount of hours you work in a day
# Print out how much money you make in a day (rember to use the print() function)
# -
# Python can also raise numbers to any power. When doing math on paper you usulally write your exponents in superscript (`2²`), typing superscript numbers isn’t always easy on modern keyboards since youhave to do some wierd key combinations. Seeing ad this operation is so closely related to multiplication, we use the notation **.
# ```python
# # 3 to the 4th power, or 81
# print(3 ** 4)
#
# # 2 squared, or 4
# print(2 ** 2)
#
# # 9 * 9 * 9, 9 cubed, or 729
# print(9 ** 3)
#
# # We can even perform fractional exponents using floating-point numbers
# # 16 to the half power, or 4
# print(16 ** 0.5)
# ```
# In the obove example we can see how we calculated some simple exponents. We calculate 3 to the 4th power, 2 to the 2nd power, 9 to the 3rd power, and 16 to the 0.5th power.
#
# Complete the next exercise:
# +
# You have decided to refloor your kitchen, it is a perfectly square room which measures 6x6 meters
# Each of the tiles you have is 1 by 1 meter
# Print how many tiles you will need to refloor your kitchen
# -
# When you devide two numbers that are coprime, you will always get a remainder. Python makes the job of getting that remainder easy by allowing you to use the modulo operator `%`. The modulo operator gives the remainder of a division calculation. If the numbers are not coprime (they are divisible by eachother), the result of the modulo operator will be 0.
# ```python
# # Prints 1 because 5/2 equals 2 with a remainder of 1
# print(5%2)
#
# # Prints 2 because 12/5 equals 2 with a remainder of 2
# print(12%5)
# ```
# In the obove example we used the modulo operator to get the remainder of two divisions.
#
# The modulo operator is useful in programming when we want to perform an action every nth-time the code is run.
# +
# You have 529 grains of rice and you want to feed 5 children.
# You decide to divide them evenly and take the remainder for yourself.
# Calculate how many grains of rice you would get using the modulo operator and print the value out
# -
# #### - Variables: String Math <a name='section-variables-stringmath'> </a>
#
# The `+` operator isn't only used for numbers, it can also be used for strings. Addig two strings together is called `string concatenation`.
# When you concatenate two strings you create a new string, that string contains the first string’s contents followed by the second string’s contents (without any added space in-between).
# ```python
# danny_phantom_quote = "If somebody catches me, I go from geek to freak around here!"
# quote_date = "Circa April 3, 2004"
# full_quote = danny_phantom_quote + quote_date
#
# # Prints "If somebody catches me, I go from geek to freak around here!Circa April 3, 2004"
# print(full_quote)
# ````
#
# In the example above we create two variables that hold strings, which we then concatenate. If you look at the output you will see that there is no space between the strings we added together. That is because concatenation does not add spaces. Let's fix that issue by concatenating a space between our variables.
# ```python
# full_quote = danny_phantom_quote + " " + quote_date
#
# # Prints "If somebody catches me, I go from geek to freak around here! Circa April 3, 2004"
# print(full_quote)
# ```
# After that little addition, the code prints the message we wanted it to print.
#
# In Python you can only concatenate strings with other strings. In the event that you want to concatenate a string with anything other than a string, you will need to turn that non-string object intro a string. To do that you can use the `str()` function. If you’re trying to `print()` a numeric variable you can use commas to pass it as a different argument rather than converting it to a string.
# ```python
# eating_competition = "I have eaten "
# hotdogs_eaten = 32
# eating_competition_2 = " hotdogs in 60 seconds!"
#
# # Concatenating an integer with strings is possible if we turn the integer into a string first
# full_competition_string = eating_competition + str(hotdogs_eaten) + eating_competition_2
#
# # Prints "I have eaten 32 hotdogs in 60 seconds!"
# print(full_competition_string)
#
# # If we just want to print an integer
# # we can pass a variable as an argument to
# # print() regardless of whether
# # it is a string.
#
# # This also prints "I have eaten 32 hotdogs in 60 seconds!"
# print(eating_competition, hotdogs_eaten, eating_competition_2)
# ```
# Using `str()` we can convert variables that are not strings to strings and then concatenate them. But we don’t need to convert a number to a string for it to be an argument to a print statement.
#
# The [plus-equals operator](#section-plus-equals) also can be used for string concatenation, like so:
# ```python
# quirky_caption = "I licked it, so it's mine."
#
# # Almost forgot the hashtags! (notice the use of spacing)
# quirky_caption += " #not_like_other_girls "
# quirky_caption += " #quirky"
#
# # This prints "I licked it, so it's mine. #not_like_other_girls #quirky"
# print(quirky_caption)
# ```
# In the above example we created a cpation for a photograph of me licking an orange I found on the street. But then we added some hashtags for me to get instafamous.
#
# Complete the following exercise please:
# +
# Our close friends are trying to hide from the coding mafia and have decided to skip country
# They taked us with making thier fake passport. We initially quoted them 3200€
quoted_price = 3200
# Last minute he decided to add some stuff to the deal. A fake ID, some food for the trip, and plant tickers
fake_id = 150
food = 50
plane_tickets = 2309
# Update the quoted price using the += operator
# We need to send them a message to tell them the new price
# Use the next sentence bits and the += operator to build the message
message_part_1 = "Hi Heathers, we know we quoted you 3200€ but the extra stuff has made the price go up to "
message_part_2 = "sorry if that complicates things a little... hugs, totally not the coding mafia."
# Print the message.
# Make sure that the spacing between words is correct!
print()
# -
# ---
# ---
#
#
# ### Word definitions:
#
# - **Scope:** The visibility of variables and methods in one part of a program to another part of that program. The importance and meaning of scope varies between languages, but there are two general scope concepts many languages have in common: local and global scope. [Wikiversity](https://en.wikiversity.org/wiki/Introduction_to_Programming/Scope) <a name = "word-scope"></a>
#
# - **Data type:** A particular kind of data item, as defined by the values it can take, the programming language used, or the operations that can be performed on it. [Oxford](https://www.lexico.com/definition/data_type) <a name = "word-data-type"></a>
#
# - **Literal:** In programming, a value written exactly as it's meant to be interpreted (eg: 3, 2, cat, mouse, etc). In contrast, a variable is a name that can represent different values during the execution of the program. [Webopedia](https://www.webopedia.com/TERM/L/literal.html) <a name= "word-literal"></a>
| Jupyter Notebook/Coding Workshop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DenysNunes/data-examples/blob/main/spark/basic/read_save.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="mGNVvW3yHbaD"
# # **Init spark**
# + colab={"base_uri": "https://localhost:8080/"} id="JmSpDN-CHTMM" outputId="dc4c7fba-31b0-4f18-87fc-0ab97c975c12"
# !pip install -q pyspark==3.1.1
# !sudo apt install tree
# !rm -rf /tmp/read-save-example/
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.master('local[*]') \
.appName("New Session Example") \
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.config("spark.jars.packages", "org.apache.hudi:hudi-spark3-bundle_2.12:0.9.0,org.apache.spark:spark-avro_2.12:3.0.1") \
.enableHiveSupport() \
.getOrCreate()
# + colab={"base_uri": "https://localhost:8080/"} id="uuPNLERcHd8h" outputId="7663327b-92bd-4c77-e654-c039655869cb"
from pyspark.sql.types import Row
from datetime import date
import random
def get_random():
return random.randrange(2000, 5000, 100)
raw_rows = [
Row(id=1, name='John', salary=get_random(), hire_date=date(2020, 1, 1)),
Row(id=2, name='Joana', salary=get_random(), hire_date=date(2020, 1, 1)),
Row(id=3, name='Maria', salary=get_random(), hire_date=date(2020, 1, 2)),
Row(id=4, name='Sandra', salary=get_random(), hire_date=date(2020, 1, 2)),
Row(id=5, name='Ben', salary=get_random(), hire_date=date(2020, 1, 3)),
Row(id=6, name='Carl', salary=get_random(), hire_date=date(2020, 1, 3)),
Row(id=7, name='Joseph', salary=get_random(), hire_date=date(2020, 1, 4)),
Row(id=8, name='Oliver', salary=get_random(), hire_date=date(2020, 1, 4))
]
df = spark.createDataFrame(raw_rows)
df.show()
# + [markdown] id="I5nRtaAbWxrG"
# # **Saving Mode 1**
# + id="ojFeV9TUVrPe"
df.write.save(path='/tmp/read-save-example/df1/', format='csv', delimiter=',', header=True)
# + colab={"base_uri": "https://localhost:8080/"} id="pqyGbVKyVke4" outputId="106aa7bf-4fcf-4b3d-9a93-fbe7312e931e"
# !ls /tmp/read-save-example/df1/
# + colab={"base_uri": "https://localhost:8080/"} id="gXZaZubbWQBH" outputId="16972ecb-6d9d-4756-874d-532459c44a07"
# !cat /tmp/read-save-example/df1/*.csv
# + [markdown] id="DO7uZzp6XhRm"
# # **Saving Mode 2**
# + id="XDaj1toKWXFc"
df.write.option('delimiter', ',').option('header', True).csv(path='/tmp/read-save-example/df2/')
# + colab={"base_uri": "https://localhost:8080/"} id="gK2eH_QwdArZ" outputId="842023d2-4ed6-4a51-b9e3-219d1d19aeb2"
# !cat /tmp/read-save-example/df2/*.csv
# + [markdown] id="SocNma7KgPWU"
# # **Saving Mode 3 - Partitioned**
# + id="xyyB0m3tgOtc"
df.write.save(path='/tmp/read-save-example/df3/', format='parquet', partitionBy=['hire_date'])
# + colab={"base_uri": "https://localhost:8080/"} id="yOp3BvPZghmt" outputId="fa313c94-eb24-4cdd-9318-fb1c6c3f710b"
# !tree /tmp/read-save-example/df3/
# + [markdown] id="4ALEoO4jgUB9"
# # **Saving Mode 4 - As Table**
# + id="z6o291yigTWM"
df.write.saveAsTable(path='/tmp/read-save-example/df3/', name='tb_parquet_salaries')
# + colab={"base_uri": "https://localhost:8080/"} id="dtEKiFkShQIC" outputId="40c05208-3618-4810-ebc2-373675c91afc"
# !tree /tmp/read-save-example/df3/
# + [markdown] id="_t2aVSLfYmhm"
# # **Saving with diferent formats**
# + id="iyigqkypYiVf"
# mode = overwrite -> remove all previously data and save your dataframe
# mode = append -> append new data with old
df.write.save(path='/tmp/read-save-example/otherformat/parquet/',
format='parquet',
mode='overwrite')
df.write.save(path='/tmp/read-save-example/otherformat/orc/',
format='orc',
mode='overwrite')
# + colab={"base_uri": "https://localhost:8080/"} id="q6rsJuxWZkns" outputId="54340716-15c8-4363-bf8f-b3a34fa3c797"
# !tree /tmp/read-save-example/otherformat/
# + [markdown] id="lhfYu1YNaDZr"
# # **Saving with a external format**
#
# **Notice, HUDI jar already added on session !!!**
#
# More information about hudi [here](https://hudi.apache.org/).
# + id="24kU4N4RY2ym"
hudi_options = {
'hoodie.table.name': 'tb_hudi_salaries',
'hoodie.datasource.write.recordkey.field': 'id',
'hoodie.datasource.write.partitionpath.field': 'hire_date',
'hoodie.datasource.write.table.name': 'tb_hudi_salaries',
'hoodie.datasource.write.operation': 'insert',
'hoodie.datasource.write.precombine.field': 'ts',
'hoodie.insert.shuffle.parallelism': 2
}
df.write.format("hudi"). \
options(**hudi_options). \
mode("overwrite"). \
save('/tmp/read-save-example/otherformat/hudi/')
# + colab={"base_uri": "https://localhost:8080/"} id="npTrHGM9aOda" outputId="2e014c92-2f10-4451-cfa4-4205d5e991e7"
# !tree /tmp/read-save-example/otherformat/hudi/
# + [markdown] id="PkuIAUZcetsi"
# # **Reading mode 1 - All params like arguments**
# + colab={"base_uri": "https://localhost:8080/"} id="AlacMdECer_A" outputId="e447e2f6-3a43-40cf-beab-38bba244764e"
df_load = spark.read.load(format='parquet', path='/tmp/read-save-example/otherformat/parquet/')
df_load.show()
# + [markdown] id="TgmqPSPLfG-O"
# # **Reading mode 2 - Implicit format method**
# + colab={"base_uri": "https://localhost:8080/"} id="nOJ7_gCLctSI" outputId="637c94de-5320-4a01-bdff-7cea4e5c546a"
df_load2 = spark.read.parquet('/tmp/read-save-example/otherformat/parquet/')
df_load2.show()
# + [markdown] id="CSUMrYMefVR6"
# # **Reading mode 3 - Using wildcards**
# + colab={"base_uri": "https://localhost:8080/"} id="bWHYF4-EfVDr" outputId="7dc970a5-d5f9-4e60-cd72-59051fca8bdb"
df_load3 = spark.read.parquet('/tmp/read-save-example/otherformat/parquet/part*.parquet')
df_load3.show()
# + [markdown] id="935leJVNfJDK"
# # **Reading mode 4 - Spark SQL direct path**
# + colab={"base_uri": "https://localhost:8080/"} id="zp2eTyOLfD5I" outputId="0546ab9f-ce94-473d-8d0e-162152f9ce81"
df_load4 = spark.sql('select * from parquet.`/tmp/read-save-example/otherformat/parquet/`')
df_load4.show()
# + [markdown] id="J5dxKB-WhVsH"
# # **Reading mode 4 - Spark SQL Table**
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="sz13mmGOfQmJ" outputId="181b28f6-c641-411f-916c-63112458d949"
df_load5 = spark.sql('select * from tb_parquet_salaries')
df_load5.show()
# + id="6Q5G29bUhhck"
| spark/1 - basic/read_save.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="5NOCVQLXLMzE"
# # Anticipez les besoins en consommation électrique de bâtiments
# + [markdown] id="VGaXePggLMzL"
# * Ville neutre en emissione de carbonne en 2050
# * Prédire les émission de co2 et la consommation totale d'énergie des batiments pour lesquelles pas de mesure
# * Évaluer l'interet de l'ENERGY STAR SCORE pour la prédiction d'émissions
# + [markdown] id="q1QJ_qG4LMzM"
# * **1. Réaliser une courte analyse exploratoire**
# * **2. Tester différents modèles de prédiction afin de réponf=dre au mieux à la problématique**
# * **3. Attention à la fuite de données**
# + [markdown] id="Dz_J80ffLMzM"
# * choisir les variables à utiliser
# * data leak
# * regresseur differents (min 1 linéaire, 1 non linéaire
# * validation croisée lors de l'optimisation des hyperparams
# * justifier le choix de l'un des regresseurs
# * choisir métrique poour l'optimisation
# * Explorer differents score et indic de performances
# * présentation du plus simple au plus complexe
# * encodage adapté des var catégorielles
# * Normalisation des variables ou transformation de leur distrib si besoin
# -
import dill
dill.load_session('kernels/p4_explor_kernel.db')
# + executionInfo={"elapsed": 2785, "status": "ok", "timestamp": 1607545546941, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00853056097421135069"}, "user_tz": -60} id="a0wU8AiKLMzM"
import os
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
from scipy.stats import shapiro
from sklearn.preprocessing import LabelEncoder
# + executionInfo={"elapsed": 1168, "status": "ok", "timestamp": 1607545558637, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00853056097421135069"}, "user_tz": -60} id="lN0Kc_uWLMzN"
pd.set_option("display.max_colwidth", 1000)
# + [markdown] id="A7q6MK5lLMzO"
# ## 1. IMPORT DES DONNÉES
# + id="aPVmDYhKLc73"
df_2015 = pd.read_csv(r'D:\Work\OC_DS_P4\2015-building-energy-benchmarking.csv')
df_2016 = pd.read_csv(r'D:\Work\OC_DS_P4\2016-building-energy-benchmarking.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 391} executionInfo={"elapsed": 1258, "status": "error", "timestamp": 1607545560016, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00853056097421135069"}, "user_tz": -60} id="OcYStq4vLMzO" outputId="3f87f6cf-328d-4b2e-c5d6-60769dfd7b1d"
df_2015 = pd.read_csv(r'E:\t\Cours Open Classeroom\DATA SCIENCE\Projet 4\data\2015-building-energy-benchmarking.csv')
df_2016 = pd.read_csv(r'E:\t\Cours Open Classeroom\DATA SCIENCE\Projet 4\data\2016-building-energy-benchmarking.csv')
# -
df_2015 = pd.read_csv(r'C:\Users\t.favrel\Downloads\data p4\2015-building-energy-benchmarking.csv')
df_2016 = pd.read_csv(r'C:\Users\t.favrel\Downloads\data p4\2016-building-energy-benchmarking.csv')
# + id="VxldSU69LMzO"
with open(r'D:\Work\OC_DS_P4\socrata_metadata_2015-building-energy-benchmarking.json') as json_data:
metaData_2015 = json.load(json_data)
with open(r'D:\Work\OC_DS_P4\socrata_metadata_2016-building-energy-benchmarking.json') as json_data:
metaData_2016 = json.load(json_data)
# + id="RG3lBCAMLMzP"
# Données relatives aux colonnes
meta_col_2015 = pd.DataFrame.from_dict(metaData_2015['columns'])
meta_col_2016 = pd.DataFrame.from_dict(metaData_2016['columns'])
# + [markdown] id="HywoQjGoLMzP"
# ## 2. APERÇU DES DONNÉES
# + id="L-94MSXYLMzP"
def df_display_shape(df, name='_'):
'''
Affiche le nombre de lignes et de colonnes d'un DataFrame
'''
if name != '_':
print(f'{name} contient {df.shape[0]} lignes et {df.shape[1]} colonnes')
else:
print(f'Le DataFrame contient {df.shape[0]} lignes et {df.shape[1]} colonnes')
# + id="10HuzuaoLMzP" outputId="41ab5cd6-3d5a-4981-c295-eaeec8ebfefa"
df_display_shape(df_2015)
df_2015.head(4)
# + id="vRcpLl7LLMzR" outputId="ce5fbbe8-e82c-4bef-f2d5-f9066c83edea"
df_display_shape(df_2016)
df_2016.head(4)
# + [markdown] id="VUyudRUuLMzR"
# ### 2.a Colonnes
# + id="B1BC774fLMzR"
col_2015 = set(df_2015.columns)
col_2016 = set(df_2016.columns)
# + id="6nIiUe96LMzS"
inter = col_2015.intersection(col_2016) # Dans 2015 et 2016
only_2015 = col_2015.difference(col_2016) # Uniquement dans 2015
only_2016 = col_2016.difference(col_2015) # Uniquement dans 2016
# + id="ien58E1xLMzS" outputId="bc6351e8-0ef8-422f-904e-aa6a796399b5"
print(f"Il y a {len(inter)} colonnes présentes en 2015 et 2016\n\
Il y a {len(only_2015)} colonnes uniquement en 2015 et {len(only_2016)} colonnes uniquement en 2016.")
# + id="Ruli2pxoLMzS" outputId="e07963fe-f6f5-4371-fa19-acbb6219f3ad"
inter, only_2015, only_2016
# + id="668Vn5Z8LMzT" outputId="156228c3-8788-43c1-a9bb-98ddc7b9b40a"
df_2015.loc[:, ['Zip Codes', 'OSEBuildingID']]
# + id="9IM0RCSFLMzT" outputId="ebe185bf-ff9d-47b5-f310-5d13978c18b6"
meta_col_2015[meta_col_2015['name'] == 'Zip Codes']
# + id="W2dDdipSLMzT" outputId="fe8d7060-097b-4195-beb7-03653d37f22e"
df_2016.loc[:3339, ['ZipCode', 'OSEBuildingID']]
# -
df_2015.rename(columns={'GHGEmissions(MetricTonsCO2e)' : 'TotalGHGEmissions'}
,inplace=True)
# +
# On sépare la variable location en différentes variables pour être cohérente au df 2016
Adresse = df_2015['Location'].str.split(',', expand= True)
#On enlève les caractères inutiles sur nos nouvelles variables
Adresse[0].replace("{'latitude': '","",regex=True,inplace=True)
Adresse[0].replace("'","",regex=True,inplace=True)
Adresse[1].replace("'longitude': '","",regex=True,inplace=True)
Adresse[1].replace("'","",regex=True,inplace=True)
Adresse[2].replace("'human_address':","",regex=True,inplace=True)
Adresse[2].replace("'","",regex=True,inplace=True)
Adresse[2].replace('{"address": "','',regex=True,inplace=True)
Adresse[2].replace('"','',regex=True,inplace=True)
Adresse[3].replace('"','',regex=True,inplace=True)
Adresse[3].replace('city: ','',regex=True,inplace=True)
Adresse[3].replace(' ','',regex=True,inplace=True)
Adresse[3] = Adresse[3].str.capitalize()
Adresse[4].replace('"state": "','',regex=True,inplace=True)
Adresse[4].replace('"','',regex=True,inplace=True)
Adresse[5].replace('"zip": "','',regex=True,inplace=True)
Adresse[5].replace('"}','',regex=True,inplace=True)
Adresse[5].replace("'}",'',regex=True,inplace=True)
Adresse.head(3)
# -
# Conversion des variables latitude, longitude et Zip Code en float 64
Adresse[0] = pd.to_numeric(Adresse[0], errors='coerce')
Adresse[1] = pd.to_numeric(Adresse[1], errors='coerce')
Adresse[5] = pd.to_numeric(Adresse[5], errors='coerce')
Adresse.info()
# +
#ON réintègre notre adresse séparée en plusieurs champs dans notre df_2015 et on renomme les variables de gaz a effet de serre
df_2015['Latitude'] = Adresse[0]
df_2015['Longitude'] = Adresse[1]
df_2015['Address'] = Adresse[2]
df_2015['City'] = Adresse[3]
df_2015['State'] = Adresse[4]
df_2015['ZipCode'] = Adresse[5]
df_2015.drop(columns =["Location"], inplace = True)
df_2015.rename(columns={'GHGEmissions(MetricTonsCO2e)': 'TotalGHGEmissions', \
'GHGEmissionsIntensity(kgCO2e/ft2)': 'GHGEmissionsIntensity',\
'Comment':'Comments'}, inplace=True)
df_2015.head(1)
# + [markdown] id="jEELrXuPLMzU"
# ### 2.b Lignes - ID
# + id="lz3P1pQOLMzU"
ID_2015 = set(df_2015['OSEBuildingID'])
ID_2016 = set(df_2016['OSEBuildingID'])
# + id="ErvQgh3kLMzU"
ID_inter = ID_2015.intersection(ID_2016) # Dans 2015 et 2016
ID_only_2015 = ID_2015.difference(ID_2016) # Uniquement dans 2015
ID_only_2016 = ID_2016.difference(ID_2015) # Uniquement dans 2016
# + id="zuq_rckMLMzU" outputId="1f69f259-2759-4a48-fb45-3981f5ccf22a"
meta_col_2015.loc[meta_col_2015['name'] == 'OSEBuildingID', 'description']
# + id="raEn0VGzLMzV" outputId="1a67e4aa-a1ba-4169-f401-87baa2da2f5e"
print(f"Il y a {len(ID_inter)} ID présentes en 2015 et 2016\n\
Il y a {len(ID_only_2015)} ID uniquement en 2015 et {len(ID_only_2016)} ID uniquement en 2016.")
# + [markdown] id="VUR8yjz4LMzV"
# ## 3. COMPLÉTUDE
# + id="HeG9dgdeLMzV"
def show_values_on_bars(axs, vertical=True, space=0.4):
"""
Affiche les valeurs d'un barplot.
"""
def _show_on_single_plot(ax):
if vertical == True:
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height() + space
value = int(p.get_height())
ax.text(_x, _y, value, ha="center")
elif vertical == False:
for p in ax.patches:
_x = p.get_x() + p.get_width() + space
_y = p.get_y() + p.get_height() / 2
value = int(p.get_width())
ax.text(_x, _y, value, ha="left")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
# + id="WyW2dyhPLMzV"
def display_filling_of_col(df, line=0, color='#3556C8', figsize=(8, 5), show_values=False):
df_tmp = pd.DataFrame()
for col in df.columns:
df_tmp[col] = pd.Series(df[col].count())
x = list(df_tmp.T.sort_values(by=0, ascending=False)[0] / df.shape[0] * 100)
y = list(df_tmp.T.sort_values(by=0, ascending=False).index)
fig, ax = plt.subplots(figsize=figsize)
if line == 0:
sns.barplot(x=x,
y=y,
orient='h', color=color)
else:
sns.barplot(x=x[:line],
y=y[:line],
orient='h', color=color)
if show_values == True:
show_values_on_bars(ax, vertical=False)
# + id="uEfbQQK7LMzW"
def filling_rate(dataframe, name='Dataframe'):
"""Calcuates and displays the shape of the dataframe and the filling rate"""
sns.set()
nb_rows, nb_columns = dataframe.shape
nb_data = dataframe.count().sum()
filling_rate = nb_data / (nb_rows * nb_columns)
print(f"The global filling rate of the {name} is : {filling_rate:.2%}")
frequencies = [filling_rate, 1 - filling_rate]
labels = ["Filled data", "Missing data"]
fig, ax = plt.subplots(figsize=(5, 5))
plt.title("Filling of the DataFrame", fontsize=25)
ax.axis("equal")
ax.pie(frequencies,
autopct='%1.2f%%',
colors=['#6C5DD8', '#94E5E2']
)
plt.legend(labels)
# + [markdown] id="kNBhRhjTLMzW"
# ### 3.a 2015
# + id="tCwxmBWFLMzX" outputId="a20c0ec7-d955-4adf-bbf0-fd253843213b"
filling_rate(df_2015)
# + id="rls7YoCrLMzX" outputId="c86cda79-aeed-41bb-a77c-56d01d6f63a8"
display_filling_of_col(df_2015, show_values=True, figsize=(8, 12))
# + [markdown] id="NLtu0EdeLMzX"
# ### 3.b 2016
# + id="NwobomQXLMzX" outputId="bbf50a30-1ef5-4893-9a9f-597b94d4b84c"
filling_rate(df_2016)
# + id="IGgcWvvfLMzY" outputId="e19f035f-6d8f-4e12-ba29-f548ecfa990d"
display_filling_of_col(df_2016, show_values=True, figsize=(8, 12))
# + [markdown] id="ea1CEfyrLMzY"
# ## 4. EXPLICATIONS DES VARIABLES
# + id="f9ZOwnTZLMzY" outputId="22697856-fe86-44a1-fc0c-06ffb5b8e3fe"
inter, only_2015, only_2016
# + [markdown] id="JQVfSFAELMzZ"
# **1. ENERGYSTARScore :
# Note de 1 à 100 qui évalue la performance énergétique globale d'une propriété. Un score de 50 représente la médiane nationale.**
# + id="x9YM4agsLMzZ" outputId="36089ffa-4556-4cc5-865c-eccdacd6aa78"
meta_col_2015[meta_col_2015['name'] == 'ENERGYSTARScore']
# + [markdown] id="SysrRP__LMzZ"
# **2. Electricity(kBtu) :
# La quantité annuelle d'électricité consommée par la propriété sur place, y compris l'électricité achetée au réseau et produite par des systèmes renouvelables sur place, mesurée en milliers d'unités thermiques britanniques (kBtu).**
# + id="MMkbV4dbLMza" outputId="d21a7cd7-fd0c-41db-a672-c68a2e4bdc74"
meta_col_2015[meta_col_2015['name'] == 'Electricity(kBtu)']
# + [markdown] id="lZAN2c0jLMza"
# **3. Electricity(kWh):
# Electricity(kBtu) en kWh.**
# + id="cCScbqtbLMza" outputId="90f94a04-704c-4583-b150-661b944d07a2"
meta_col_2015[meta_col_2015['name'] == 'Electricity(kWh)']
# + [markdown] id="K8XkCIr4LMzb"
# **4. NaturalGas(therms) :
# La quantité annuelle de gaz naturel fourni par les services publics consommée par la propriété, mesurée en thermes.**
# + id="Qkd31EzTLMzb" outputId="53a31649-4a4b-4a4c-b48a-72cdcce4bf4c"
meta_col_2015[meta_col_2015['name'] == 'NaturalGas(therms)']
# + [markdown] id="DnbhBkLDLMzb"
# **5. PropertyGFABuilding(s) :
# Surface totale au sol en pieds carrés entre les surfaces extérieures des murs d’enceinte d’un bâtiment. Cela comprend toutes les zones à l'intérieur du ou des bâtiments, telles que l'espace des locataires, les espaces communs, les cages d'escalier, les sous-sols, le stockage, etc.**
# + id="-ONt3OwRLMzc" outputId="e042a4db-da06-48c2-c895-594d8e3f5a86"
meta_col_2015[meta_col_2015['name'] == 'PropertyGFABuilding(s)']
# + [markdown] id="DuopneOvLMzc"
# **6. PropertyGFAParking :
# Espace total en pieds carrés de tous les types de stationnement (entièrement clos, partiellement clos et ouvert).**
# + id="oMgbnXv5LMzc" outputId="f777cf4e-d8b1-4fb5-a216-070cd27f20f4"
meta_col_2015[meta_col_2015['name'] == 'PropertyGFAParking']
# + [markdown] id="c8NkpAnELMzc"
# **7. SiteEUI(kBtu/sf) :
# L'intensité énergétique du site (IUE) est la consommation énergétique du site d'un établissement divisée par sa surface de plancher brute. La consommation d'énergie du site est la quantité annuelle de toute l'énergie consommée par la propriété sur place, comme indiqué sur les factures de services publics. L'IUE du site est mesurée en milliers d'unités thermiques britanniques (kBtu) par pied carré.**
# + id="wUhx-EVwLMzc" outputId="871f3bab-15df-4f20-b5bd-0f61ea7558d6"
meta_col_2015[meta_col_2015['name'] == 'SiteEUI(kBtu/sf)']
# + [markdown] id="x_ZNQWEYLMzd"
# **8. SiteEUIWN(kBtu/sf) :
# L'intensité d'utilisation de l'énergie du site normalisée selon les conditions météorologiques (WN) correspond à l'énergie du site WN d'une propriété divisée par sa surface de plancher brute (en pieds carrés). L'énergie du site WN est la consommation d'énergie du site que la propriété aurait consommée pendant 30 ans dans des conditions météorologiques moyennes. WN Site EUI est mesuré en mesuré en milliers d'unités thermiques britanniques (kBtu) par pied carré.**
# + id="zjw1I3_wLMzd" outputId="c50df3dc-8185-4898-a929-ec3873b8a037"
meta_col_2015[meta_col_2015['name'] == 'SiteEUIWN(kBtu/sf)']
# + [markdown] id="60_VugbkLMzd"
# **9. SiteEnergyUse(kBtu) :
# La quantité annuelle d'énergie consommée par la propriété à partir de toutes les sources d'énergie.**
# + id="MK4oPYN6LMzd" outputId="f934be35-36e3-4bb0-dca7-8610534aee1f"
meta_col_2015[meta_col_2015['name'] == 'SiteEnergyUse(kBtu)']
# + [markdown] id="AKf5T-i9LMze"
# **10. SourceEUI(kBtu/sf) :
# L'intensité d'utilisation de l'énergie à la source (IUE) est la consommation d'énergie à la source d'une propriété divisée par sa surface de plancher brute. La consommation d'énergie à la source est l'énergie annuelle utilisée pour faire fonctionner la propriété, y compris les pertes liées à la production, au transport et à la distribution. La source EUI est mesurée en milliers d'unités thermiques britanniques (kBtu) par pied carré.**
# + id="UfBpgKkrLMze" outputId="a6fe4c6c-a423-4ec8-fb1f-233cba0a10e2"
meta_col_2015[meta_col_2015['name'] == 'SourceEUI(kBtu/sf)']
# + [markdown] id="OFW0nVElLMze"
# **11. SteamUse(kBtu) :
# La quantité annuelle de vapeur consommée par la propriété sur place, mesurée en milliers d'unités thermiques britanniques (kBtu).**
# + id="aUPoeGTeLMze" outputId="eaf11279-64cb-465e-935d-39915bc726f5"
meta_col_2015[meta_col_2015['name'] == 'SteamUse(kBtu)']
# + [markdown] id="pMScK5wjLMzf"
# **12 . (2015) GHGEmissions(MetricTonsCO2e) // (2016) TotalGHGEmissions :
# La quantité totale d'émissions de gaz à effet de serre, y compris le dioxyde de carbone, le méthane et les gaz d'oxyde nitreux rejetés dans l'atmosphère à la suite de la consommation d'énergie de la propriété, mesurée en tonnes métriques d'équivalent dioxyde de carbone. Ce calcul utilise un facteur d'émissions de GES du portefeuille de ressources de production de Seattle CIty Light. Cela utilise le facteur d'émissions 2015 de Seattle City Light de 52,44 lbs CO2e / MWh. Facteur de vapeur Enwave = 170,17 lb CO2e / MMBtu. Facteur gaz provenant de l'EPA Portfolio Manager = 53,11 kg CO2e / MBtu.**
# + id="Zkzs0oLMLMzf" outputId="cd1af2df-d78a-4c8e-f115-3a8384256920"
meta_col_2015[meta_col_2015['name'] == 'GHGEmissions(MetricTonsCO2e)']
# + [markdown] id="s1KyUxg7LMzf"
# **13. (2015) GHGEmissionsIntensity(kgCO2e/ft2) // (2016) GHGEmissionsIntensity:
# Émissions totales de gaz à effet de serre divisées par la superficie de plancher brute de la propriété, mesurées en kilogrammes d'équivalent dioxyde de carbone par pied carré. Ce calcul utilise un facteur d'émissions de GES du portefeuille de ressources génératrices de Seattle City Light**
# + id="8Ge43du2LMzf" outputId="fa38e222-06e7-4516-d8ec-a68ccca0f8a8"
meta_col_2015[meta_col_2015['name'] == 'GHGEmissionsIntensity(kgCO2e/ft2)']
# + [markdown] id="NXTmqwo_LMzg"
# ## 5. DOUBLONS?
# + id="-AG1xecDLMzg" outputId="c1abad83-49f7-44d9-c2cc-02bafc08298e"
print(f"Il y a {df_2015.shape[0] - df_2015.drop_duplicates('OSEBuildingID', keep='first').shape[0]} doublons.")
# + id="1KGgsCZbLMzg" outputId="83b61efc-187f-4854-a879-7f08af25e748"
print(f"Il y a {df_2016.shape[0] - df_2016.drop_duplicates('OSEBuildingID', keep='first').shape[0]} doublons.")
# + [markdown] id="-W81C0rULMzh"
# ## 6. ANALYSE UNIVARIÉE
# + id="8OqwZEn-LMzh" outputId="d1a69487-842a-4ffc-e40c-05317b8ba983"
df_2015[['SiteEnergyUse(kBtu)', 'TotalGHGEmissions']].describe()
# + id="eUzir5tOLMzh" outputId="db73c779-90f7-4e0d-d799-b51562e04889"
df_2016[['SiteEnergyUse(kBtu)', 'TotalGHGEmissions']].describe()
# + [markdown] id="GwTHmxSqLMzh"
# ### 6.a SiteEnergyUse(kBtu) initiale
# + id="JAvkHtOlLMzi" outputId="13f70400-e322-4efa-907d-f48bac88891e"
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=df_2015['SiteEnergyUse(kBtu)'])
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2015', fontsize=20);
# + id="pVA9jYpgLMzi" outputId="35871db1-4b4f-4692-c042-79a14cec3ab2"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2015, x='SiteEnergyUse(kBtu)')
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2015', fontsize=20);
# + id="FODoy1ppLMzi" outputId="6b0910f0-63eb-4a2f-a4ac-5271d3b8b172"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=df_2016['SiteEnergyUse(kBtu)'])
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2016', fontsize=20);
# + id="6_vY2mAeLMzj" outputId="436edcb3-13a5-46ca-c803-4bd8bfb97ad2"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2016, x='SiteEnergyUse(kBtu)')
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2016', fontsize=20);
# + [markdown] id="aN6TUldeLMzk"
# ### Transformation logarithmique
# + id="zcYbE0UvLMzk" outputId="24c50dac-0760-418a-d610-97ad962213a4"
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=np.log(df_2015['SiteEnergyUse(kBtu)'] + 0.5))
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2015', fontsize=20);
# + id="nCgcGj9yLMzk" outputId="99be8e7a-71a1-4b9e-bb1f-c2ea108d4039"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2015, x=np.log(df_2015['SiteEnergyUse(kBtu)'] + 0.5))
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2015', fontsize=20);
# + id="fauILOVNLMzk" outputId="3342caa0-6079-4659-e248-d8be4201fc59"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=np.log(df_2016['SiteEnergyUse(kBtu)'] + 0.5))
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2016', fontsize=20);
# + id="hvFr2CnYLMzp" outputId="197651a7-b1fc-4ecd-c7fe-10b42f97b9e5"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2016, x=np.log(df_2016['SiteEnergyUse(kBtu)'] + 0.5))
plt.title('CONSOMMATION ÉNERGÉTIQUE EN 2016', fontsize=20);
# + [markdown] id="CIPzdB-5LMzp"
# ### Test de normalité (Shapiro-Wilk)
# H0 : La distribtuion de SiteEnergyUse(kBtu) est normal.
# H1 : La ditribution de SiteEnergyUse(kBtu) n'est pas normal
# + id="cO6Mbr7LLMzq"
def shapiro_wilk(dataframe, feature):
'''This function proceed to the Shapiro-Wilk test (for gaussian distribution).
It takes a dataframe and the name of the feature to test.
It filters for non-null vallues of the feature and print the results.'''
# filtering non-null data for the feature
mask = dataframe[feature].notnull()
data_view = dataframe[mask][feature]
# processing the Shopiro-Wilk test on the filtered data
results = shapiro(data_view)
# Print results
print("Shapiro-Wilk test's statistic value is: W = {}".format(results[0]))
print("Shapiro-Wilk test's p-value is: p = {}".format(results[1]))
print("\nGaussian distribution hypothesis for \'{}\' can be rejected at a risk of {:.2f}%.".format(feature, results[1]*100))
# + id="L1c86YvFLMzq" outputId="2194ea8a-29fb-4383-8685-2ed14dc052db"
shapiro_wilk(df_2015, 'SiteEnergyUse(kBtu)')
# + id="pdbfiaXZLMzq" outputId="9e2e5c23-327b-4c37-f16b-2d2722de2faf"
shapiro_wilk(df_2016, 'SiteEnergyUse(kBtu)')
# + [markdown] id="Vb7aRLekLMzr"
# ### 6.b TotalGHGEmissions
# + id="X0vokteELMzr" outputId="d056fafb-81d5-4bfe-d265-55ae8842f9be"
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=df_2015['TotalGHGEmissions'])
plt.title('ÉMISSION DE CO2 EN 2015', fontsize=20);
# + id="eyfyq4CXLMzr" outputId="2c6bdd19-9a39-4126-bb76-f78f069da0ce"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2015, x='TotalGHGEmissions')
plt.title('ÉMISSION DE CO2 EN 2015', fontsize=20);
# + id="eyQx3z9pLMzs" outputId="420d4612-88c5-45d2-e636-57dc89f67973"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=df_2016['TotalGHGEmissions'])
plt.title('ÉMISSION DE CO2 EN 2016', fontsize=20);
# + id="1PyXOgsiLMzs" outputId="a82f73d0-1eb4-4bc9-a0da-d939a3570784"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2016, x='TotalGHGEmissions')
plt.title('ÉMISSION DE CO2 EN 2016', fontsize=20);
# + [markdown] id="Ana5Q9TULMzt"
# ### Transformation logarithmique
# + id="4sME9njOLMzt" outputId="710b0591-3752-496c-e0a5-40c6c84da8ef"
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=np.log(df_2015['TotalGHGEmissions'] + 1))
plt.title('ÉMISSION DE CO2 EN 2015', fontsize=20);
# + id="ldgnt3-8LMzu" outputId="8badaea0-17ae-43d0-c914-29b98766828d"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2015, x=np.log(df_2015['TotalGHGEmissions'] + 1))
plt.title('ÉMISSION DE CO2 EN 2015', fontsize=20);
# + id="Kn8uBDY6LMzu" outputId="35483316-3bc3-4753-81de-0d80f8b8ceb3"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.boxplot(x=np.log(df_2016['TotalGHGEmissions'] + 1))
plt.title('ÉMISSION DE CO2 EN 2016', fontsize=20);
# + id="xlCEiqvLLMzu" outputId="9d57b1f5-eaf6-4ad7-e816-692047ffd61a"
fig, ax = plt.subplots(figsize=(12, 4))
ax = sns.histplot(df_2016, x=np.log(df_2016['TotalGHGEmissions'] + 1))
plt.title('ÉMISSION DE CO2 EN 2016', fontsize=20);
# + [markdown] id="P7iyip1uLMzu"
# ### Test de normalité (Shapiro-Wilk)
# H0 : La distribtuion de SiteEnergyUse(kBtu) est normal.
# H1 : La ditribution de SiteEnergyUse(kBtu) n'est pas normal
# + id="YetlrypFLMzv" outputId="a2e47153-ee60-45f1-ec93-27572e4c0b1e"
shapiro_wilk(df_2015, 'TotalGHGEmissions')
# + id="8pnJnclKLMzw" outputId="b5b35fb5-d643-48dc-f753-8be1cfe9f37e"
shapiro_wilk(df_2016, 'TotalGHGEmissions')
# + [markdown] id="p3vt3f6aLMzw"
# ## 7. SÉLECTION DES FEATURES
# -
features_selected = {'TotalGHGEmissions'
,'SiteEnergyUse(kBtu)'
,'BuildingType'
,'ComplianceStatus' # état de conformité
,'CouncilDistrictCode' # Propriété District municipal de la ville de Seattle
#,'DataYear' #?
#,'DefaultData'
#,'LargestPropertyUseType' # type de batiment --> primaryPropertyType
#,'LargestPropertyUseTypeGFA' #? --> primaryPropertyType
,'ListOfAllPropertyUseTypes' # all types
,'Neighborhood' # qurtier
,'NumberofBuildings'
,'NumberofFloors'
#,'Outlier'
,'PrimaryPropertyType'
,'PropertyGFABuilding(s)' #surface brute intérieure
,'PropertyGFAParking'
,'PropertyGFATotal'
,'YearBuilt'
,'Latitude'
,'Longitude'
,'OSEBuildingID'}
# ### 7.a Comparaison 2015 - 1016
# +
fig, ax = plt.subplots(figsize=(8, 4.5))
sns.scatterplot(df_2015.loc[df_2015['OSEBuildingID'].map(lambda x: x in ID_inter), 'TotalGHGEmissions']
,df_2016.loc[df_2016['OSEBuildingID'].map(lambda x: x in ID_inter), 'TotalGHGEmissions']
,alpha=0.8)
ax.set_xlabel('Emission de CO2 en 2015', fontsize=15)
ax.set_ylabel('Emission de CO2 en 2016', fontsize=15)
plt.title('EMISSION DE CO2 EN 2015 ET 2016', fontsize=20)
# +
fig, ax = plt.subplots(figsize=(8, 4.5))
sns.scatterplot(df_2015.loc[df_2015['OSEBuildingID'].map(lambda x: x in ID_inter), 'SiteEnergyUse(kBtu)']
,df_2016.loc[df_2016['OSEBuildingID'].map(lambda x: x in ID_inter), 'SiteEnergyUse(kBtu)']
,alpha=0.8)
ax.set_xlabel('Consomation d\'énergie en 2015', fontsize=15)
ax.set_ylabel('Consomation d\'énergie en 2016', fontsize=15)
plt.title('CONSOMATION D\'ÉNERGIE EN 2015 ET 2016', fontsize=20)
# -
df_2015.loc[df_2015['Outlier'].notnull() & df_2015['OSEBuildingID'].map(lambda x: x in ID_inter), 'SiteEnergyUse(kBtu)']
# +
fig, ax = plt.subplots(figsize=(8, 4.5))
sns.scatterplot(df_2015.loc[df_2015['Outlier'].notnull() & df_2015['OSEBuildingID'].map(lambda x: x in ID_inter), 'SiteEnergyUse(kBtu)']
,df_2016.loc[df_2016['Outlier'].notnull() & df_2016['OSEBuildingID'].map(lambda x: x in ID_inter), 'SiteEnergyUse(kBtu)'])
ax.set_xlabel('Consomation d\'énergie en 2015', fontsize=15)
ax.set_ylabel('Consomation d\'énergie en 2016', fontsize=15)
plt.title('CONSOMATION D\'ÉNERGIE EN 2015 ET 2016', fontsize=20)
# +
fig, ax = plt.subplots(figsize=(8, 4.5))
sns.scatterplot(df_2015.loc[df_2015['Outlier'].notnull() & df_2015['OSEBuildingID'].map(lambda x: x in ID_inter), 'TotalGHGEmissions']
,df_2016.loc[df_2016['Outlier'].notnull() & df_2016['OSEBuildingID'].map(lambda x: x in ID_inter), 'TotalGHGEmissions'])
ax.set_xlabel('Consomation d\'énergie en 2015', fontsize=15)
ax.set_ylabel('Consomation d\'énergie en 2016', fontsize=15)
plt.title('CONSOMATION D\'ÉNERGIE EN 2015 ET 2016', fontsize=20)
# -
# ### 7.b Nettoyage des features utiles
#Suppr Outlier
df_2015_clean = df_2015.loc[df_2015['Outlier'].isnull()]
df_2016_clean = df_2016.loc[df_2016['Outlier'].isnull()]
print(f"Lignes de départ 2015 : {df_2015.shape[0]}\n\
Lignes aupprimées 2015 : {df_2015.shape[0] - df_2015_clean.shape[0]}\n\
Lignes actuelles 2015 : {df_2015_clean.shape[0]}\n\n\
Lignes de départ 2016 : {df_2016.shape[0]}\n\
Lignes aupprimées 2016 : {df_2016.shape[0] - df_2016_clean.shape[0]}\n\
Lignes actuelles 2016 : {df_2016_clean.shape[0]}\n")
#On concatène nos 2 df.
df = pd.concat([df_2015_clean, df_2016_clean], axis=0, join='outer',ignore_index = True)
df.shape
#Reccupération des commentaires pour extraire de l'information sur les récentes rénovations
pd.options.display.max_colwidth = 600
Commentaire = df[['OSEBuildingID','DataYear','Comments']][df['Comments'].notna()]
Commentaire
# Drop duplicate : On garde uniquement les données sur une année quand elles sont disponibles sur les deux années.
df.drop_duplicates(subset = ['OSEBuildingID'], keep = 'last', inplace = True)
df.shape[0]
df_selected = df[features_selected]
# On Crée une variable où l'on notifie les bâtiments qui ont eu des travaux récents d'amélioriation de l'efficacité
df_selected['RecentUpgrade'] = 0
#On sélectionne les index des batiments avec des travaux récents d'amélioration et on leur attribue la valeur Yes
Reno = df_selected[(df_selected['OSEBuildingID'] == 595) | (df_selected['OSEBuildingID'] == 792) | (df_selected['OSEBuildingID'] == 26240)].index.values
df_selected.loc[Reno, 'RecentUpgrade'] = 1
df_selected['Neighborhood'].unique()
# On uniformise l'écriture pour ne pas avoir de doublons.
df_selected['Neighborhood'] = df_selected['Neighborhood'].str.capitalize()
Delridge = df_selected.index[df_selected['Neighborhood']== 'Delridge neighborhoods'].tolist()
df_selected.loc[Delridge, 'Neighborhood'] = 'Delridge'
df_selected['Neighborhood'].nunique()
df_selected = df_selected.join(pd.get_dummies(df_selected.Neighborhood, drop_first=True))
df_selected.groupby('PrimaryPropertyType')['OSEBuildingID'].nunique()
# +
# On réduit le nombre de primary property type pour augmenter l'efficacité des algo de prédiction.
print(" nombre avant transformation : " , df_selected['PrimaryPropertyType'].nunique() )
Restau = df_selected.index[df_selected['PrimaryPropertyType']== 'Restaurant\n'].tolist()
df_selected.loc[Restau, 'PrimaryPropertyType'] = 'Restaurant'
Warehouse = df_selected.index[df_selected['PrimaryPropertyType']== 'Non-Refrigerated Warehouse'].tolist()
df_selected.loc[Warehouse, 'PrimaryPropertyType'] = 'Warehouse'
Office = df_selected.index[(df_selected['PrimaryPropertyType']== 'Large Office') | \
(df_selected['PrimaryPropertyType']== 'Small- and Mid-Sized Office') ].tolist()
df_selected.loc[Office, 'PrimaryPropertyType'] = 'Office'
print(" nombre après transformation : " ,df_selected['PrimaryPropertyType'].nunique())
# -
df_selected = df_selected.join(pd.get_dummies(df_selected.PrimaryPropertyType, drop_first=True))
df_selected['BuildingType'].unique()
df_selected = df_selected.join(pd.get_dummies(df_selected.BuildingType, drop_first=True))
df_selected.shape
df_selected['ComplianceStatus'].unique()
df_selected = df_selected[df_selected['ComplianceStatus'] == 1]
df_selected.shape
df_selected[(df_selected['ComplianceStatus'] != 'Error - Correct Default Data') & (df_selected['ComplianceStatus'] != 'Missing Data')].shape
df_selected = df_selected[(df_selected['ComplianceStatus'] != 'Error - Correct Default Data') & (df_selected['ComplianceStatus'] != 'Missing Data')]
df_selected.loc[df_selected['ComplianceStatus'] == "Compliant", 'ComplianceStatus'] = 1
df_selected.loc[df_selected['ComplianceStatus'] == "Non-Compliant", 'ComplianceStatus'] = 0
df_selected.info()
df_selected['CouncilDistrictCode'].unique()
# -> caté
df_selected['CouncilDistrictCode'] = df_selected['CouncilDistrictCode'].map(lambda x: "CSC " + str(x))
df_selected = df_selected.join(pd.get_dummies(df_selected.CouncilDistrictCode, drop_first=True))
# +
#df_selected['ListOfAllPropertyUseTypes'].unique() #no
# -
df_selected['NumberofBuildings'].isnull().sum()
df_selected.dropna(subset=['NumberofBuildings'], axis=0, inplace=True)
df_selected['NumberofFloors'].isnull().sum()
df_selected.dropna(subset=['NumberofFloors'], axis=0, inplace=True)
df_selected['PropertyGFAParking'].describe()
df_selected['PropertyGFATotal'].describe()
df_selected['YearBuilt']
df_selected['Age'] = 2016 - df_selected['YearBuilt']
df_selected.corr()['SiteEnergyUse(kBtu)'].sort_values(ascending=False)
# skewed_value => squared, log, 1/x, boxcox
# koalas, spark-sklearn, mlflow
df_selected.corr()['TotalGHGEmissions'].sort_values(ascending=False)
df_selected.columns
df_selected.drop(columns='ListOfAllPropertyUseTypes', inplace=True)
df_selected.drop(columns='YearBuilt', inplace=True)
df_selected.info()
num_var = ['Longitude',
'Latitude',
'PropertyGFABuilding(s)',
'NumberofBuildings',
'NumberofFloors',
'PropertyGFATotal',
'PropertyGFAParking',
'SiteEnergyUse(kBtu)',
'TotalGHGEmissions']
# Grille des courbes de densité
def densite(df, lines=3, cols=3):
"""
Input : dataframe, lignes, colonnes
Output : grille des courbes de densités des variables numériques du dataframe
"""
df = df.select_dtypes(include='number').copy()
fig, ax = plt.subplots(lines, cols, figsize=(min(15,cols*3),lines*2))
for i,val in enumerate(df.columns.tolist()):
bp = sns.distplot(df[val], hist=False, ax=ax[i//cols, i%cols], kde_kws={'bw':0.1})
bp.set_title("skewness : "+str(round(df[val].skew(),1)), fontsize=12)
bp.set_yticks([])
imax = i
for i in range(imax+1,lines*cols):
ax[i//cols, i%cols].axis('off')
plt.tight_layout()
plt.show()
densite(df_selected[num_var])
# #### NumberofBuildings
df_selected['NumberofBuildings'].map(lambda x: np.log1p(x)).skew()
df_selected['NumberofBuildings'].map(lambda x: np.sqrt(x)).skew()
df_selected['NumberofBuildings'].map(lambda x: 1 / (x+1)).skew()
from scipy.stats import boxcox
pd.Series(boxcox(df_selected['NumberofBuildings'] + 1, lmbda=None)[0]).skew()
sns.distplot(df_selected['NumberofBuildings'].map(lambda x: 1 / (x+1))
,hist=False, kde_kws={'bw':0.1});
sns.distplot(boxcox(df_selected['NumberofBuildings'] + 1, lmbda=None)[0]
,hist=False, kde_kws={'bw':0.1});
# #### PropertyGFABuilding
df_selected['PropertyGFABuilding(s)'].map(lambda x: np.log(x)).skew()
df_selected['PropertyGFABuilding(s)'].map(lambda x: np.sqrt(x)).skew()
df_selected['PropertyGFABuilding(s)'].map(lambda x: 1 / (x)).skew()
pd.Series(boxcox(df_selected['NumberofFloors'] + 1, lmbda=None)[0]).skew()
len(pd.Series(boxcox(df_selected['NumberofFloors'] + 1, lmbda=None)[0]))
# ####
def skew_selector(df, var, zero=False):
if zero == False:
print(f"log : {df[var].map(lambda x: np.log1p(x)).skew()}\n\
racine : {df[var].map(lambda x: np.sqrt(x)).skew()}\n\
inverse : {df[var].map(lambda x: 1 / (x + 1)).skew()}\n\
boxcoc : {pd.Series(boxcox(df[var] + 1, lmbda=None)[0]).skew()}\n\
initiale : {df[var].skew()}")
else:
print(f"log : {df[var].map(lambda x: np.log(x)).skew()}\n\
racine : {df[var].map(lambda x: np.sqrt(x)).skew()}\n\
inverse : {df[var].map(lambda x: 1 / x).skew()}\n\
boxcoc : {pd.Series(boxcox(df[var], lmbda=None)[0]).skew()}\n\
initiale : {df[var].skew()}")
skew_selector(df_selected, 'NumberofBuildings')
skew_selector(df_selected, 'PropertyGFABuilding(s)')
skew_selector(df_selected, 'NumberofFloors')
skew_selector(df_selected, 'PropertyGFATotal')
skew_selector(df_selected, 'PropertyGFAParking')
skew_selector(df_selected, 'SiteEnergyUse(kBtu)')
skew_selector(df_selected, 'TotalGHGEmissions')
df_final = df_selected.copy()
df_selected.to_csv('df.csv', index=False)
df_final.shape
# +
# df_final['NumberofBuildings'] = np.log1p(df_final['NumberofBuildings'])
# df_final['PropertyGFABuilding(s)'] = np.log1p(df_final['PropertyGFABuilding(s)'])
# df_final['NumberofFloors'] = np.log1p(df_final['NumberofFloors'])
# df_final['PropertyGFATotal'] = np.log1p(df_final['PropertyGFATotal'])
# df_final['PropertyGFAParking'] = np.log1p(df_final['PropertyGFAParking'])
# df_final['TotalGHGEmissions'] = np.log1p(df_final['TotalGHGEmissions'])
# df_final['SiteEnergyUse(kBtu)'] = np.log1p(df_final['SiteEnergyUse(kBtu)'])
# +
# df_final['TotalGHGEmissions'] = df_final['TotalGHGEmissions'] + 0.81
# -
df_final['NumberofBuildings'] = boxcox(df_final['NumberofBuildings'] + 1, lmbda=None)[0]
df_final['PropertyGFABuilding(s)'] = boxcox(df_final['PropertyGFABuilding(s)'] + 1, lmbda=None)[0]
df_final['NumberofFloors'] = boxcox(df_final['NumberofFloors'] + 1, lmbda=None)[0]
df_final['PropertyGFATotal'] = boxcox(df_final['PropertyGFATotal'] + 1, lmbda=None)[0]
df_final['PropertyGFAParking'] = boxcox(df_final['PropertyGFAParking'] + 1, lmbda=None)[0]
# df_final['TotalGHGEmissions'] = boxcox(df_final['TotalGHGEmissions'] + 1, lmbda=None)[0]
# df_final['SiteEnergyUse(kBtu)'] = boxcox(df_final['SiteEnergyUse(kBtu)'] + 1, lmbda=None)[0]
df_final['TotalGHGEmissions'] = np.log1p(df_final['TotalGHGEmissions'])
df_final['SiteEnergyUse(kBtu)'] = np.log1p(df_final['SiteEnergyUse(kBtu)'])
df_final.info()
densite(df_final[num_var])
shapiro(df.loc[df['NumberofFloors'].notnull(), 'NumberofFloors'])
shapiro(df_final.loc[df_final['NumberofFloors'].notnull(), 'NumberofFloors'])
shapiro(df.loc[df['NumberofBuildings'].notnull(), 'NumberofBuildings'])
shapiro(df_final.loc[df_final['NumberofBuildings'].notnull(), 'NumberofBuildings'])
df_selected['SiteEnergyUse(kBtu)'].min()
df_selected['TotalGHGEmissions'].sort_values()
df_final['TotalGHGEmissions'].sort_values()
df_final.to_csv('df_clean.csv', index=False)
import dill
dill.dump_session('kernels/p4_explor_kernel.db')
| .ipynb_checkpoints/p4 - Copie-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 12 - contours
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x,y):
# the height function
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X,Y = np.meshgrid(x, y)
# use plt.contourf to filling contours
# X, Y and value for (X,Y) point
plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
# use plt.contour to add contour lines
C = plt.contour(X, Y, f(X, Y), 8, colors='black', linewidth=.5)
# adding label
plt.clabel(C, inline=True, fontsize=10)
plt.xticks(())
plt.yticks(())
plt.show()
# + pycharm={"name": "#%%\n"}
| matplotlibTUT/plt12_contours.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Frida Kahlo Exhibition
#
# You've been hired to work on a retrospective of <NAME>'s work at a major museum. Your job is to put together the audio tour, but in order to do that you need to create a list of each painting featured in the exhibit, the date it was painted, and its spot in the tour.
#
# Use your knowledge of Python lists to create a master list of each painting, its date, and its audio tour ID.
#
# ## Task 1
# First, create a list called `paintings` and add the following titles to it:
#
# `The Two Fridas, My Dress Hangs Here, Tree of Hope, Self Portrait With Monkeys`
#
paintings = ["The Two Fridas", "My Dress Hangs Here", "Tree of Hope", "Self Portrait With Monkeys"]
paintings
# ## Task 2
#
# Next, create a second list called `dates` and give it the following values:
# `1939, 1933, 1946, 1940`
dates = [1939, 1933, 1946, 1940]
dates
# ## Task 3
# It doesn't do much good to have the paintings without their dates, and vice versa.
# Zip together the two lists so that each painting is paired with its date and resave it to the `paintings` variable. Make sure to convert the zipped object into a list using the `list()` function. Print the results to the terminal to check your work.
paintings = list(zip(paintings, dates))
print (paintings)
# ## Task 4
# There were some last minute additions to the show that we need to add to our list. Append the following paintings to our `paintings` list then re-print to check they were added correctly:
# - 'The Broken Column', 1944
# - 'The Wounded Deer', 1946
# - 'Me and My Doll', 1937
#
# Hint: Make sure to append each painting individually and that you're appending them as tuples, not lists.
# +
paintings.append(('The Wounded Deer', 1946))
paintings.append(('Me and My Doll', 1937))
print (paintings)
# -
# ## Task 5
# Since each of these paintings is going to be in the audio tour, they each need a unique identification number.
# But before we assign them a number, we first need to check how many paintings there are in total.
#
# Find the length of the `paintings` list.
print(len(paintings))
# ## Task 6
# Use the `range` method to generate a list of identification numbers that starts at 1 and is equal in length to our list of items.
# Save the list to the variable `audio_tour_number` and check your work by printing the list.
# +
audio_tour_number = list(range(1,8))
print(audio_tour_number)
# -
# ## Task 7
#
# We're finally read to create our master list.
# Zip the `audio_tour_number` list to the `paintings` list and save it as `master_list`.
#
# Hint: Make sure to convert the zipped object into a list using the `list()` function.
master_list = list(zip(audio_tour_number, paintings))
# ## Task 8
# Print the `master_list` to the terminal.
print(master_list)
| fridakahlo/frida_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: pythonjvsc74a57bd0f91a37acb8b7ec9cf4226bd2c6031b734835a38911f51489b8db8d46edbaac5e
# ---
# # bonlime/keras-deeplab-v3-plus
# +
from tensorflow.keras.models import load_model
from utils.losses import *
net = load_model('/home/paula_wilhelm/Amazing-Semantic-Segmentation/weights/DeepLabV3Plus_based_on_MobileNetV2.h5', custom_objects={'loss': focal_loss})
#net.layers[0].input_shape
# /home/paula_wilhelm/Amazing-Semantic-Segmentation/checkpoints/DeepLabV3Plus_based_on_MobileNetV2_miou_0.499798_ep_02.h5
# /home/paula_wilhelm/Amazing-Semantic-Segmentation/weights/DeepLabV3Plus_based_on_MobileNetV2.h5
# -
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
# +
# Generates labels using most basic setup. Supports various image sizes. Returns image labels in same format
# as original image. Normalization matches MobileNetV2
trained_image_width=512
mean_subtraction_value=127.5
image = np.array(Image.open('/home/paula_wilhelm/Amazing-Semantic-Segmentation/imgs/image1.jpg'))
# /home/paula_wilhelm/dataset/test/images/resized_0123.jpg
# /home/paula_wilhelm/Amazing-Semantic-Segmentation/imgs/image1.jpg
# -
# resize to max dimension of images from training dataset
w, h, _ = image.shape
ratio = float(trained_image_width) / np.max([w, h])
resized_image = np.array(Image.fromarray(image.astype('uint8')).resize((int(ratio * h), int(ratio * w))))
# apply normalization for trained dataset images
resized_image = (resized_image / mean_subtraction_value) - 1.
# pad array to square image to match training images
pad_x = int(trained_image_width - resized_image.shape[0])
pad_y = int(trained_image_width - resized_image.shape[1])
resized_image = np.pad(resized_image, ((0, pad_x), (0, pad_y), (0, 0)), mode='constant')
# make prediction
from model import Deeplabv3
deeplab_model = Deeplabv3()
res = deeplab_model.predict(np.expand_dims(resized_image, 0))
labels = np.argmax(res.squeeze(), -1)
# +
# remove padding and resize back to original image
if pad_x > 0:
labels = labels[:-pad_x]
if pad_y > 0:
labels = labels[:, :-pad_y]
labels = np.array(Image.fromarray(labels.astype('uint8')).resize((h, w)))
plt.imshow(labels)
#plt.waitforbuttonpress()
# -
import cv2
img = cv2.imread('/home/paula_wilhelm/Amazing-Semantic-Segmentation/predictions/resized_0001.jpg')
img.shape
| .ipynb_checkpoints/predict-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regression and Other Stories: Newcomb
#
# Posterior predictive checking of Normal model for Newcomb’s speed of light data. See Chapter 11 in Regression and Other Stories.
import arviz as az
from bambi import Model
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pymc3 as pm
from scipy import stats
import statsmodels.formula.api as sm
newcomb = pd.read_csv("https://raw.githubusercontent.com/avehtari/ROS-Examples/master/Newcomb/data/newcomb.txt")
newcomb.head()
# ### Histogram of the data
plt.hist(newcomb.values,bins=30);
# ### Histogram of data with arviz
# Currently raising exception. Issue logged https://github.com/arviz-devs/arviz/issues/1306
# +
# az.plot_dist(newcomb.values, hist_kwargs=dict(bins=30))
# -
# ### Fit a regression model with just the intercept term
model = Model(newcomb)
fit_1 = model.fit('y ~ 1', samples=3000, chains=4)
# ### Simulate from the predictive distribution
# TODO: Add in manual simulation. See below for r code
"""
sims <- as.matrix(fit)
n_sims <- nrow(sims)
n <- length(newcomb$y)
y_rep <- array(NA, c(n_sims, n))
for (s in 1:n_sims)
y_rep[s,] <- rnorm(n, sims[s,1], sims[s,2])
"""
# ### Simulate using built-in function
with model.backend.model:
posterior_predictive = pm.sample_posterior_predictive(model.backend.trace)
# +
# Get 20 random row numbers
n_samples = 20
fig, axes = plt.subplots(5, 4, figsize=(10, 10))
row_nums = stats.randint(0, posterior_predictive["y"].shape[0]-1).rvs(n_samples)
for i, row_num in enumerate(row_nums):
ax = axes.ravel()[i]
ax.hist(posterior_predictive["y"][row_num], fill=False)
# -
# ### Plot kernel density estimate of data and 100 replications using built-in function
#
# Plot PPC does this in ArviZ although something is failing unfortunately
# https://arviz-devs.github.io/arviz/generated/arviz.plot_ppc.html#arviz.plot_ppc
# Add Posterior Predictive object to Inference Data
az.concat(fit_1, az.from_dict(posterior_predictive=posterior_predictive), inplace=True)
az.plot_ppc(fit_1, num_pp_samples=100)
# ### Plot test statistic for data and replicates
# Something looks wrong here
# +
fig, ax = plt.subplots()
min_replicated = [posterior_predictive["y"][row_num].min() for row_num in row_nums]
min_observed = newcomb["y"].min()
ax.hist(min_replicated, bins=20)
ax.axvline(min_observed)
| ROS/Newcomb/newcomb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# <a id='top'></a>
# ## Power calculations concept
#
# 1. Read in the original pilot study data to determine % qualifiers passed.
# 2. Read in a cleaned, reshaped set of data. This data has removed the failed qualifiers.
# 3. Establish a power calculator function. Test with data from FE pg 93.
# 4. Conduct analysis of control vs nonsense names
# 5. Conduct analysis of control vs terrible names
# 6. Average Treatment Effect is set at .5
# 6. $\alpha$ will be set at .05
# 7. Power will be $\ge$ .8
#
# #### Methods used to increase power in simulations
# 1. Manipulated sample size.
#
# ### Results summary:
#
# __Bottom line: Minimum of 130 surveys needed__
#
# This particular data set had no incorrect qualifying answers. Since none were wrong, I used the % of wrong qualifiers from the previous data - 77%
#
# While we hit 80% power at 400 actual surveys taken, need to take into account the number of subjects who do not pass the qualifiers. Assuming that is still approximately 77%, we get the following results:
#
# 1. Control v. Nonsense: $\frac{400}{.77} = 519$ (round to 520)
#
# 2. Control v. Terrible: $\frac{400}{.77} = 519$ (round to 520)
#
# This would be 780 observations (260 * 3). Given there are 6 questions per survey, we can calculate that to be $\frac{780}{6} = 130$ subjects needed.
#
# __For us to detect an ATE of .5 at a 95% confidence level 80% of the time we ran this experiment, we would need at least 130 surveys__.
#
#
#
# [Control and Nonsense](#nonsense)
#
# [Control and Terrible](#terrible)
#
# +
library(data.table)
# read in original data to calculate % qualifying questions correct.
pilot.original <- data.table(read.csv("final_test_data.csv"))
# read in the cleaned, reshaped data set
pilot.data <- data.table(read.csv("final_test_data_rs.csv"))
# power calculator function. Given ATE, sigma, alpha, and N,
# calculates the power.
power_calculator <- function(ate=.5, sigma, alpha=0.05, N) {
lowertail <- (abs(ate)*sqrt(N))/(2*sigma)
uppertail <- -1*lowertail
beta <- pnorm(lowertail- qnorm(1-alpha/2), lower.tail=TRUE) +
1- pnorm(uppertail- qnorm(1-alpha/2), lower.tail=FALSE)
return(beta)
}
# function check - inputs are from FE, pg 93. Result = .80 as per book.
print(paste('Power should be .8. Result = ',
round(power_calculator(5, 20, .05, 500), digits=2)))
# id the wrong answers to the qualfying questions
pilot.original <-pilot.original[, QID2.wrong:= .(QID2 != "3")]
pilot.original <-pilot.original[, QID3.wrong:= .(QID3 != "Wednesday")]
pilot.original <-pilot.original[, QID4.wrong:= .(QID4 != "Obama")]
# calc the % of wrong answers
bad.quals <- nrow(pilot.original[(QID2.wrong | QID3.wrong | QID4.wrong)]) / nrow(pilot.original)
print(paste('Percent of wrong qualifiers: ', round(bad.quals, digits=2) * 100))
# calc the % of correct answers
good.quals <- 1 - bad.quals
print(paste('Percent of correct qualifiers: ', round(good.quals, digits=2) * 100))
good.quals <- .77 # keeping previous assumption given there were no incorrect responses in this data.
print(paste('Adjusted percent of correct qualifiers: ', round(good.quals, digits=2) * 100))
# -
# <a id='nonsense'></a>
# ## Control and Nonsense Names
#
# [Return to Top](#top)
# +
# read in the data into its separate groups of control name, nonsense name
control.data <- pilot.data[treatment == 0]
treat1.data <- pilot.data[treatment == 1]
# sample sizes to test
sample_sizes <- seq(from=375, to=425, by=25)
# function to calculate power for a randomized sampling of data
betas <- function(sampl){
# sample from the control subset
x <- control.data[sample(1:nrow(control.data), size=sampl,
replace=TRUE)]
# use pooled sd for the rating by funding category
sigma_x <- sqrt((var(x[funding_id == 0]$rating) +
var(x[funding_id == 1]$rating) +
var(x[funding_id == 2]$rating) +
var(x[funding_id == 4]$rating) +
var(x[funding_id == 5]$rating) +
var(x[funding_id == 6]$rating) +
var(x[funding_id == 7]$rating)) / 7)
# get number of rows in control sample
n_x <- nrow(x)
# sample the treatment 1 (nonsense name) dataset
y <- treat1.data[sample(1:nrow(treat1.data), size=sampl,
replace=TRUE)]
# use pooled sd for the rating by funding category
sigma_y <- sqrt((var(y[funding_id == 0]$rating) +
var(y[funding_id == 1]$rating) +
var(y[funding_id == 2]$rating) +
var(y[funding_id == 3]$rating) +
var(y[funding_id == 4]$rating) +
var(y[funding_id == 6]$rating) +
var(y[funding_id == 7]$rating)) / 7)
# calculate the avg std deviation for the formula
sigma <- mean(sigma_x, sigma_y)
# get number of rows in nonsense name group
n_y <- nrow(y)
# number of total rows
n <- n_x + n_y
# calculate power
ctl_t1 <- power_calculator(sigma = sigma, N=n)
# return the beta (power) value
ctl_t1
}
for (sampl in sample_sizes){
print(paste('Sample size: ', sampl))
beta_sims <- replicate(1000, betas(sampl))
print(paste('Avg Power Calc: ', mean(beta_sims)))
sample_size_needed = sampl / good.quals
print(paste('Sample size needed factoring in % bad qualifying answers: ',
round(sample_size_needed, digits = 0)))
cat('\n')
}
# -
# <a id='terrible'></a>
# ## Control and Terrible Names
#
# [Return to Top](#top)
# +
treat2.data <- pilot.data[treatment == 2]
sample_sizes <- seq(from=375, to=425, by=25)
betas <- function(sampl){
# sample from the subset given the provided sample size
x <- control.data[sample(1:nrow(control.data), size=sampl,
replace=TRUE)]
# use pooled sd for sigma
sigma_x <- sqrt((var(x[funding_id == 0]$rating) +
var(x[funding_id == 1]$rating) +
var(x[funding_id == 2]$rating) +
var(x[funding_id == 4]$rating) +
var(x[funding_id == 5]$rating) +
var(x[funding_id == 6]$rating) +
var(x[funding_id == 7]$rating)) / 7)
n_x <- nrow(x)
# sample the treatment 2 (terrible name) dataset
y <- treat2.data[sample(1:nrow(treat2.data), size=sampl,
replace=TRUE)]
# use pooled sd for the rating by funding category
sigma_y <- sqrt((var(y[funding_id == 0]$rating) +
var(y[funding_id == 1]$rating) +
var(y[funding_id == 2]$rating) +
var(y[funding_id == 3]$rating) +
var(y[funding_id == 4]$rating) +
var(y[funding_id == 5]$rating) +
var(y[funding_id == 6]$rating) +
var(y[funding_id == 7]$rating)) / 8)
# calculate the avg std deviation for the formula
sigma <- mean(sigma_x, sigma_y)
# get number of rows in terrible name group
n_y <- nrow(y)
# number of total rows
n <- n_x + n_y
# calculate power
ctl_t2 <- power_calculator(sigma = sigma, N=n)
# return the beta (power) value
ctl_t2
}
for (sampl in sample_sizes){
print(paste('Sample size: ', sampl))
beta_sims <- replicate(1000, betas(sampl))
print(paste('Avg Power Calc: ', mean(beta_sims)))
sample_size_needed = sampl / good.quals
print(paste('Sample size needed factoring in % bad qualifying answers: ', round(sample_size_needed, digits = 0)))
cat('\n')
}
| Power Calcs 07.24.16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/partha1189/ml/blob/master/tokenizerImdb_subwords.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="7lOqtMsJxKxI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="87f6f2c1-7151-489c-b6b1-4cb3e0f24f3b"
import tensorflow as tf
print(tf.__version__)
# + id="dL6AYRdrxfpp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350, "referenced_widgets": ["7537c00f6f9942f9ac9e35c1dff3e7fa", "fdd4b1d180a6445eb5623a29350f48d9", "fa8f56a26e3943ba89d4918b38ab4dad", "522379d6ecf44f6fa9d2129900bb134a", "4bbdae57861842fcac3b176a24712d75", "b53fcf15f5f24f8aa55032af110bdd02", "779221c24de142bbbfbde953bcf34297", "<KEY>", "<KEY>", "a66da23dc7ec4c2bafd286da666f0df3", "e4ade67ef0604fda899b040a85923ce9", "03b751aa0fd343c0aed9a477968c8c38", "<KEY>", "27a65ce61c4a4a8d8d85ead68d700467", "<KEY>", "<KEY>", "1c8ee3590b614b02b4ae0458234daee7", "<KEY>", "956ef00942594a4baae265fa2708504f", "32eef88db59e4dbabf00f51fabf74566", "40e6c09cc69a453b9b9db51c98a7c795", "45331ff48be943ea8de4c0396ed121e0", "689dcd240d224e8e8de3c4dd780d181f", "<KEY>", "661e5fa1b13c429fa524135c04f964ed", "fa48c93e79ef4c87bcab5efdfd3a7c90", "<KEY>", "<KEY>", "49664b6e50324240bd9e92c1630d03e4", "0bcae490797d4f33a17fec542e38c4c5", "<KEY>", "ab150d7806624482ad43425670a5a93b", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "af4afd0ca93e4802ba7db07e81917ff4", "<KEY>", "3e9a2d9b346445ffba112b7f0e0d6382", "<KEY>", "<KEY>", "fe32e4618da741bb9940a2d73ae430a9", "<KEY>", "<KEY>", "<KEY>", "201addacac8740089f339d8b87e02704", "<KEY>", "c1ad4cac34604d3e84bfc689172e0d22", "<KEY>", "248d0a9f40ae4960b92d9c4e7dae3b80", "89771388bf2f46129fd40f00307fe400", "<KEY>", "5f36f4ec6f914089ab92a013d3af0e3e", "<KEY>", "0a0702dcf17f48d0bd1f0be48692de0a", "<KEY>", "<KEY>", "38c0fe9f919044f7a7f4735d73f68224", "<KEY>", "<KEY>", "3f495628ac4d45ff94edf9818b928e25", "bdae9dd60b0c44f388b3972c48baaa16"]} outputId="7964d45b-ae19-4150-dc8f-d66dc1b3ecd5"
import tensorflow_datasets as tfds
imdb, info = tfds.load("imdb_reviews/subwords8k", with_info = True, as_supervised = True)
# + id="M1U9E24RyGbr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ddb80780-229f-4d96-a238-91e51d60cd45"
imdb
# + id="mRg7HUTeyqzN" colab_type="code" colab={}
train_data, test_data = imdb['train'], imdb['test']
# + id="kC-r4AsSy0uV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="be9e2e2a-7214-433c-be1d-1169fd652b15"
info
# + id="Gi9Jhihzy79W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="f368d581-7cce-49c4-fd85-92c882dc43b5"
info.features
# + id="yJvkN_UEzFVd" colab_type="code" colab={}
tokenizer = info.features['text'].encoder
# + id="A5VUMsPZ0oQK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b5daf1bd-0c63-480f-bee1-e5edea13a25a"
print(tokenizer.vocab_size)
# + id="SzAsC5iZ0uWF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="99b6355b-b1b0-433c-c5f9-b0fa8f511283"
print(tokenizer.subwords)
# + id="Rh9ze2KC0xJ9" colab_type="code" colab={}
sample_string = 'TensorFlow, from basics to mastery'
# + id="yHR3vdB-02P1" colab_type="code" colab={}
tokenized_string = tokenizer.encode(sample_string)
# + id="ixEt37170_f3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8ee13c49-a2dc-4d58-fbaa-759ecb93e07d"
print ('Tokenized string is {}'.format(tokenized_string))
# + id="-YpDbzsL1BzU" colab_type="code" colab={}
original_string = tokenizer.decode(tokenized_string)
# + id="Ky9Ta6h01N1s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2d419be2-32e7-43eb-f3fd-df1fe25d6759"
print ('The original string: {}'.format(original_string))
# + id="CIIghDb-1PWd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="95209476-5980-48a5-e5da-a8f04a8671d8"
tokenizer.decode([4043])
# + id="h8itwsU81bck" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="d91e6ecd-6dc3-473c-a647-598bcecd9030"
for ts in tokenized_string:
print('{} ----> {}'.format(ts, tokenizer.decode([ts])))
# + id="GZDwbfWT1tB0" colab_type="code" colab={}
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_dataset = train_data.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.padded_batch(BATCH_SIZE, tf.compat.v1.data.get_output_shapes(train_dataset))
test_dataset = test_data.padded_batch(BATCH_SIZE, tf.compat.v1.data.get_output_shapes(test_data))
# + id="EMdG-CEq2SGh" colab_type="code" colab={}
embedding_dim = 64
# + id="Xuj-hIK45-MK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="233035f4-8054-4150-c3e2-a101827fe00e"
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
# + id="gCj0WvN36vWV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="51c1f959-57a5-4466-e0a2-217c47381fbf"
num_epochs = 10
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
history = model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset)
# + id="iOTS4Hzx6yML" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="fd4a03a6-5c14-4dba-89c6-3baad12c520d"
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
# + id="yBZIUQKh7PUl" colab_type="code" colab={}
| tokenizerImdb_subwords.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# ## Introduction
# + jupyter={"outputs_hidden": true} tags=[]
from IPython.display import YouTubeVideo
YouTubeVideo(id="uTHihJiRELc", width="100%")
# -
# In this chapter, we will look at the relationship between graphs and linear algebra.
#
# The deep connection between these two topics is super interesting,
# and I'd like to show it to you through an exploration of three topics:
#
# 1. Path finding
# 1. Message passing
# 1. Bipartite projections
#
# ## Preliminaries
#
# Before we go deep into the linear algebra piece though,
# we have to first make sure some ideas are clear.
#
# The most important thing that we need
# when treating graphs in linear algebra form
# is the **adjacency matrix**.
# For example, for four nodes joined in a chain:
import networkx as nx
nodes = list(range(4))
G1 = nx.Graph()
G1.add_nodes_from(nodes)
G1.add_edges_from(zip(nodes, nodes[1:]))
# we can visualize the graph:
nx.draw(G1, with_labels=True)
# and we can visualize its adjacency matrix:
# + tags=[]
import nxviz as nv
m = nv.MatrixPlot(G1)
m.draw()
# -
# and we can obtain the adjacency matrix as a NumPy array:
# + tags=[]
A1 = nx.to_numpy_array(G1, nodelist=sorted(G1.nodes()))
A1
# -
# ### Symmetry
#
# Remember that for an undirected graph,
# the adjacency matrix will be symmetric about the diagonal,
# while for a directed graph,
# the adjacency matrix will be _asymmetric_.
# ## Path finding
#
# In the Paths chapter, we can use the breadth-first search algorithm
# _to find a shortest path between any two nodes_.
#
# As it turns out, using adjacency matrices, we can answer a related question,
# which is _how many paths exist of length K between two nodes_.
#
# To see how, we need to see the relationship between matrix powers and graph path lengths.
#
# Let's take the adjacency matrix above,
# raise it to the second power,
# and see what it tells us.
# + tags=[]
import numpy as np
np.linalg.matrix_power(A1, 2)
# -
# > The diagonal element is the number of path you can take from any node and come back to such node with travelling of 2 (notice that outer node (0, and 3) has 1 while while those inner nodes (1,2) has 2 (i.e., from 1 u can either travel toward 0 or 2 to come back to 1 with 2 traversal))
#
# > This corresponds to `degree` of that node !!
# ### Exercise: adjacency matrix power?
#
# > What do you think the values in the adjacency matrix are related to?
# > If studying in a group, discuss with your neighbors;
# > if working on this alone, write down your thoughts.
# + tags=[]
import sys
if not (r'C:\Users\pui_s\Documents\concordia-bootcamps\Network-Analysis-Made-Simple' in sys.path):
sys.path.insert(0, r'C:\Users\pui_s\Documents\concordia-bootcamps\Network-Analysis-Made-Simple')
from nams.solutions.linalg import adjacency_matrix_power
from nams.functions import render_html
render_html(adjacency_matrix_power())
# -
# ### Higher matrix powers
#
# The semantic meaning of adjacency matrix powers
# is preserved even if we go to higher powers.
# For example, if we go to the 3rd matrix power:
# + tags=[]
np.linalg.matrix_power(A1, 3)
# -
# You should be able to convince yourself that:
#
# 1. There's no way to go from a node back to itself in 3 steps, thus explaining the diagonals, and
# 1. The off-diagonals take on the correct values when you think about them in terms of "ways to go from one node to another".
# ### With directed graphs?
#
# Does the "number of steps" interpretation hold
# with directed graphs?
# Yes it does!
# Let's see it in action.
# + tags=[]
G2 = nx.DiGraph()
G2.add_nodes_from(nodes)
G2.add_edges_from(zip(nodes, nodes[1:]))
nx.draw(G2, with_labels=True)
# -
# ### Exercise: directed graph matrix power
#
# > Convince yourself that the resulting adjacency matrix power
# > contains the same semantic meaning
# > as that for an undirected graph,
# > that is,
# > _the number of ways to go from "row" node to "column" node
# > in K steps_.
# > (I have provided three different matrix powers for you.)
# + tags=[]
A2 = nx.to_numpy_array(G2)
np.linalg.matrix_power(A2, 2)
# -
# > This tells us that there is only 1 path traveling 2 edges from node 0 to node 1
#
# > 0 path from 2 -> 0
# + tags=[]
np.linalg.matrix_power(A2, 3)
# + tags=[]
np.linalg.matrix_power(A2, 4)
# -
# <hline>
# ## Message Passing
#
# Let's now dive into the second topic here,
# that of message passing.
#
# To show how message passing works on a graph,
# let's start with the directed linear chain,
# as this will make things easier to understand.
#
# ### "Message" representation in matrix form
#
# Our graph adjacency matrix contains nodes ordered in a particular fashion
# along the rows and columns.
# We can also create a "message" matrix $M$,
# using the same ordering of nodes along the rows,
# with columns instead representing a "message"
# that is intended to be "passed" from one node to another:
G2 = nx.DiGraph()
G2.add_nodes_from(nodes)
G2.add_edges_from(zip(nodes, nodes[1:]))
nx.draw(G2, with_labels=True)
# + tags=[]
M = np.array([1, 0, 0, 0])
M
# -
# Notice where the position of the value `1` is - at the first node.
#
# If we take M and matrix multiply it against A2, let's see what we get:
# + tags=[]
msg2 = M @ A2
msg2
# -
# The message has been passed onto the next node!
# And if we pass the message one more time:
# + tags=[]
msg3 = M @ A2 @ A2
msg3
# -
# Now, the message lies on the 3rd node!
#
# We can make an animation to visualize this more clearly.
# _There are comments in the code to explain what's going on!_
# + tags=[]
def propagate(G, msg, n_frames):
"""
Computes the node values based on propagation.
Intended to be used before or when being passed into the
anim() function (defined below).
:param G: A NetworkX Graph.
:param msg: The initial state of the message.
:returns: A list of 1/0 representing message status at
each node.
"""
# Initialize a list to store message states at each timestep.
msg_states = []
# Set a variable `new_msg` to be the initial message state.
new_msg = msg
# Get the adjacency matrix of the graph G.
A = nx.to_numpy_array(G)
# Perform message passing at each time step
for i in range(n_frames):
msg_states.append(new_msg)
new_msg = new_msg @ A
# Return the message states.
return msg_states
# + tags=[]
from IPython.display import HTML
import matplotlib.pyplot as plt
from matplotlib import animation
def update_func(step, nodes, colors):
"""
The update function for each animation time step.
:param step: Passed in from matplotlib's FuncAnimation. Must
be present in the function signature.
:param nodes: Returned from nx.draw_networkx_edges(). Is an
array of colors.
:param colors: A list of pre-computed colors.
"""
nodes.set_array(colors[step].ravel())
return nodes
def anim(G, initial_state, n_frames=4):
"""
Animation function!
"""
# First, pre-compute the message passing states over all frames.
colors = propagate(G, initial_state, n_frames)
# Instantiate a figure
fig = plt.figure()
# Precompute node positions so that they stay fixed over the entire animation
pos = nx.kamada_kawai_layout(G)
# Draw nodes to screen
nodes = nx.draw_networkx_nodes(G, pos=pos, node_color=colors[0].ravel(), node_size=20)
# Draw edges to screen
ax = nx.draw_networkx_edges(G, pos)
# Finally, return the animation through matplotlib.
return animation.FuncAnimation(fig, update_func, frames=range(n_frames), fargs=(nodes, colors))
# Initialize the message
msg = np.zeros(len(G2))
msg[0] = 1
# Animate the graph with message propagation.
HTML(anim(G2, msg, n_frames=4).to_html5_video())
# -
# ## Bipartite Graphs & Matrices
#
# The section on message passing above assumed unipartite graphs, or at least graphs for which messages can be meaningfully passed between nodes.
#
# In this section, we will look at bipartite graphs.
#
# Recall from before the definition of a bipartite graph:
#
# - Nodes are separated into two partitions (hence 'bi'-'partite').
# - Edges can only occur between nodes of different partitions.
#
# Bipartite graphs have a natural matrix representation, known as the **biadjacency matrix**. Nodes on one partition are the rows, and nodes on the other partition are the columns.
#
# NetworkX's `bipartite` module provides a function for computing the biadjacency matrix of a bipartite graph.
# Let's start by looking at a toy bipartite graph, a "customer-product" purchase record graph, with 4 products and 3 customers. The matrix representation might be as follows:
# + tags=[]
# Rows = customers, columns = products, 1 = customer purchased product, 0 = customer did not purchase product.
cp_mat = np.array([[0, 1, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, 1]])
# -
# From this "bi-adjacency" matrix, one can compute the projection onto the customers, matrix multiplying the matrix with its transpose.
# + tags=[]
c_mat = cp_mat @ cp_mat.T # c_mat means "customer matrix"
c_mat
# -
# What we get is the connectivity matrix of the customers, based on shared purchases.
# The diagonals are the degree of the customers in the original graph,
# i.e. the number of purchases they originally made,
# and the off-diagonals are the connectivity matrix, based on shared products.
# To get the products matrix, we make the transposed matrix the left side of the matrix multiplication.
# + tags=[]
p_mat = cp_mat.T @ cp_mat # p_mat means "product matrix"
p_mat
# -
# You may now try to convince yourself that the diagonals are the number of times a customer purchased that product, and the off-diagonals are the connectivity matrix of the products, weighted by how similar two customers are.
# ### Exercises
#
# In the following exercises, you will now play with a customer-product graph from Amazon. This dataset was downloaded from [UCSD's Julian McAuley's website](http://jmcauley.ucsd.edu/data/amazon/), and corresponds to the digital music dataset.
#
# This is a bipartite graph. The two partitions are:
#
# - `customers`: The customers that were doing the reviews.
# - `products`: The music that was being reviewed.
#
# In the original dataset (see the original JSON in the `datasets/` directory), they are referred to as:
#
# - `customers`: `reviewerID`
# - `products`: `asin`
# + tags=[]
from nams import load_data as cf
G_amzn = cf.load_amazon_reviews()
# -
# Remember that with bipartite graphs, it is useful to obtain nodes from one of the partitions.
# + tags=[]
from nams.solutions.bipartite import extract_partition_nodes
# + tags=[]
customer_nodes = extract_partition_nodes(G_amzn, "customer")
mat = nx.bipartite.biadjacency_matrix(G_amzn, row_order=customer_nodes)
# -
# You'll notice that this matrix is extremely large! There are 5541 customers and 3568 products,
# for a total matrix size of $5541 \times 3568 = 19770288$, but it is stored in a sparse format because only 64706 elements are filled in.
# + tags=[]
mat
# -
# ### Example: finding customers who reviewed the most number of music items.
#
# Let's find out which customers reviewed the most number of music items.
#
# To do so, you can break the problem into a few steps.
#
# First off, we compute the customer projection using matrix operations.
# + tags=[]
customer_mat = mat @ mat.T
# -
# Next, get the diagonals of the customer-customer matrix. Recall here that in `customer_mat`, the diagonals correspond to the degree of the customer nodes in the bipartite matrix.
#
# SciPy sparse matrices provide a `.diagonal()` method that returns the diagonal elements.
# + tags=[]
# Get the diagonal.
degrees = customer_mat.diagonal()
# -
# Finally, find the index of the customer that has the highest degree.
# + tags=[]
cust_idx = np.argmax(degrees)
cust_idx
# -
# We can verify this independently by sorting the customer nodes by degree.
# + tags=[]
import pandas as pd
import janitor
# There's some pandas-fu we need to use to get this correct.
deg = (
pd.Series(dict(nx.degree(G_amzn, customer_nodes)))
.to_frame()
.reset_index()
.rename_column("index", "customer")
.rename_column(0, "num_reviews")
.sort_values('num_reviews', ascending=False)
)
deg.head()
# -
# Indeed, customer 294 was the one who had the most number of reviews!
# ### Example: finding similar customers
#
# Let's now also compute which two customers are similar, based on shared reviews. To do so involves the following steps:
#
# 1. We construct a sparse matrix consisting of only the diagonals. `scipy.sparse.diags(elements)` will construct a sparse diagonal matrix based on the elements inside `elements`.
# 1. Subtract the diagonals from the customer matrix projection. This yields the customer-customer similarity matrix, which should only consist of the off-diagonal elements of the customer matrix projection.
# 1. Finally, get the indices where the weight (shared number of between the customers is highest. (*This code is provided for you.*)
# + tags=[]
import scipy.sparse as sp
# + tags=[]
# Construct diagonal elements.
customer_diags = sp.diags(degrees)
# Subtract off-diagonals.
off_diagonals = customer_mat - customer_diags
# Compute index of most similar individuals.
np.unravel_index(np.argmax(off_diagonals), customer_mat.shape)
# -
# ## Performance: Object vs. Matrices
#
# Finally, to motivate why you might want to use matrices rather than graph objects to compute some of these statistics, let's time the two ways of getting to the same answer.
# ### Objects
#
# Let's first use NetworkX's built-in machinery to find customers that are most similar.
# + tags=[]
from time import time
start = time()
# Compute the projection
G_cust = nx.bipartite.weighted_projected_graph(G_amzn, customer_nodes)
# Identify the most similar customers
most_similar_customers = sorted(G_cust.edges(data=True), key=lambda x: x[2]['weight'], reverse=True)[0]
end = time()
print(f'{end - start:.3f} seconds')
print(f'Most similar customers: {most_similar_customers}')
# -
# ### Matrices
#
# Now, let's implement the same thing in matrix form.
# + tags=[]
start = time()
# Compute the projection using matrices
mat = nx.bipartite.matrix.biadjacency_matrix(G_amzn, customer_nodes)
cust_mat = mat @ mat.T
# Identify the most similar customers
degrees = customer_mat.diagonal()
customer_diags = sp.diags(degrees)
off_diagonals = customer_mat - customer_diags
c1, c2 = np.unravel_index(np.argmax(off_diagonals), customer_mat.shape)
end = time()
print(f'{end - start:.3f} seconds')
print(f'Most similar customers: {customer_nodes[c1]}, {customer_nodes[c2]}, {cust_mat[c1, c2]}')
# -
# On a modern PC, the matrix computation should be about 10-50X faster
# using the matrix form compared to the object-oriented form.
# (The web server that is used to build the book
# might not necessarily have the software stack to do this though,
# so the time you see reported might not reflect the expected speedups.)
# I'd encourage you to fire up a Binder session or clone the book locally
# to test out the code yourself.
#
# You may notice that it's much easier to read the "objects" code,
# but the matrix code way outperforms the object code.
# This tradeoff is common in computing, and shouldn't surprise you.
# That said, the speed gain alone is a great reason to use matrices!
# ## Acceleration on a GPU
#
# If your appetite has been whipped up for even more acceleration
# and you have a GPU on your daily compute,
# then you're very much in luck!
#
# The [RAPIDS.AI](https://rapids.ai) project has a package called [cuGraph](https://github.com/rapidsai/cugraph),
# which provides GPU-accelerated graph algorithms.
# As over release 0.16.0, all cuGraph algorithms will be able to accept NetworkX graph objects!
# This came about through online conversations on GitHub and Twitter,
# which for us, personally, speaks volumes to the power of open source projects!
#
# Because cuGraph does presume that you have access to a GPU,
# and because we assume most readers of this book might not have access to one easily,
# we'll delegate teaching how to install and use cuGraph to the cuGraph devs and [their documentation][docs].
# Nonetheless, if you do have the ability to install and use the RAPIDS stack,
# definitely check it out!
#
# [docs]: https://docs.rapids.ai/api/cugraph/stable/api.html
| notebooks/04-advanced/02-linalg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import loadmat
import pyabf
import os
# %matplotlib inline
# # What do we want to do?
# We’ll need to create an operational definition of the "bout:
#
# 1.) We’ll need to set a minimum threshold over baseline in which the bout begins.
#
# - Let’s start with 0.008V.
# - Note: This may need to be actively adjusted per animal given variance in noise each day.
#
# 2.) Once the threshold goes back to baseline (under threshold) we’ll have to set a minimum duration for how long it must stay under threshold for the bout to end.
#
# - Let’s start with 1 seconds.
#
#
#
# For analysis of each bout, we’ll want metrics generated for:
#
# 1) Duration of bout
#
# 2) Peak velocity
#
# 3) AVG velocity
#
# 4) Peak acceleration
#
# 5) AVG acceleration
#
#
# # Libraries
#
# We'll be using pyabf to import our data directly from abf files, numpy to do any kind of data manipulations, and matplotlib for any visualizations.
# Our modifiable parameters are below. These are `voltage_cutoff`, which is the minimum voltage we want to flag as movement, the `minimum_duration`, which is the minimum duration a bout has to under the cutoff in order to end a bout and `ts_step`, which is the time step in seconds (usually .0001, but different for the non-training data).
voltage_cutoff = 0.008
minumum_duration = 1
ts_step = 0.001 # Time step in seconds
sampling_rate_hz = int(1/ts_step)
sampling_rate_hz
# # Import Data
# First, we're going to use the pyabf library to read the abf file directly. Let's explore the output file a bit.
filepath = 'C:\\Users\\ameet\\Box Sync\\BevanLab\\Projects\\treadmill_analysis\\ABF FILES\\2013_06_03_0005.abf'
# +
abf = pyabf.ABF(filepath)
abf.setSweep(sweepNumber = 0, channel = 1)
print(abf.sweepY) # displays sweep data (ADC)
print(abf.sweepX) # displays sweep times (seconds)
print(abf.sweepC) # displays command waveform (DAC)
sweepY = -(np.array(abf.sweepY))
sweepX = np.array(abf.sweepX)
# -
# %matplotlib inline
plt.plot(sweepX, sweepY)
# In the plot above, we plot Voltage against Seconds, where we see a single movement bout. Let's see what happens if we just start and end a bout whenever Voltage is above .1
# +
above_cutoff = sweepY > voltage_cutoff
plt.vlines(np.arange(sampling_rate_hz * 30)[above_cutoff[:sampling_rate_hz * 30].reshape(-1)]/sampling_rate_hz, ymin = 0, ymax= 1.2, color = 'orange')
plt.plot(np.arange(sampling_rate_hz* 30)/sampling_rate_hz, np.abs(sweepY[:sampling_rate_hz * 30]))
plt.title('First 30 Seconds of Velocity Encoder Data')
plt.xlabel('Seconds')
plt.ylabel('Voltage')
plt.show()
# -
# That actually works pretty well, but we have to account for those small gaps - how do we do that? We need to implement the minimum length of time our velocity encoder has to fall below the threshold. This is encoded by the `minimum_duration`. To get the number of timesteps that we need to look out for, we need to multiply `minimum_duration` by `sampling_rate_hz`. Then, for every `TRUE` value in `above_cutoff` (this is a boolean array with `TRUE` if it's above the cutoff and `FALSE` if it's below), we check if the next `minimum_duration` * `sampling_rate_hz` time steps are `FALSE`. If they are, we can stop the movement epoch. If they aren't, we continue until the next `TRUE`, where we start checking again.
above_cutoff = sweepY > voltage_cutoff
plt.vlines(np.arange(sampling_rate_hz * 30)[above_cutoff[:sampling_rate_hz * 30].reshape(-1)]/sampling_rate_hz, ymin = 0, ymax= 1.2, color = 'orange')
plt.plot(np.arange(sampling_rate_hz* 30)/sampling_rate_hz, np.abs(sweepY[:sampling_rate_hz * 30]))
plt.title('First 30 Seconds of Velocity Encoder Data')
plt.xlabel('Seconds')
plt.ylabel('Voltage')
plt.show()
# +
True_followed_by_false = above_cutoff[:-1] * (1 - above_cutoff[1:]) # this gives us an array of boolean values whenever there is a True followed by a False
indices = np.where(True_followed_by_false == 1)[0] + 1 # this converts the array of boolean values to a list of indices
for i, idx in enumerate(indices):
"""
We want to check if the next minumum_duration of seconds are
all False
"""
end_of_array = above_cutoff.shape[0] - 1
curr_slice = above_cutoff[idx: min(idx + int(minumum_duration * sampling_rate_hz), end_of_array)]
if np.sum(curr_slice) > 0: # if there are more than 10 times it breaks the cutoff
if i + 1 != len(indices):
end = min((minumum_duration * sampling_rate_hz + 1), (indices[i + 1] - idx))
else:
end = min((minumum_duration * sampling_rate_hz + 1), len(above_cutoff) - idx - 1)
above_cutoff[idx: idx + int(end)] = np.array([True] * int(end))
# -
# # Let's write some plotting code.
def plot_trace(start, end, sampling_rate_hz, above_cutoff, sweepY, save_name = ''):
# %matplotlib inline
start_idx = sampling_rate_hz * start
end_idx = sampling_rate_hz * end
xaxis = np.arange(sampling_rate_hz * (end - start))
plt.vlines(start + xaxis[above_cutoff[start_idx:end_idx].reshape(-1)]/sampling_rate_hz, ymin = 0, ymax= .1, color = 'orange')
plt.plot(start + xaxis/sampling_rate_hz, sweepY[start_idx:end_idx], label = 'Signal')
plt.title('Velocity Encoder Data from {} to {} seconds'.format(start, end))
plt.xlabel('Seconds')
#plt.ion()
plt.ylabel('Voltage')
plt.legend()
if save_name != '':
plt.savefig(save_name)
plt.show()
# The function `plot_trace` takes in two arguments in seconds: start and end - these are currently set to 0 and 240 in the block below. The rest of the arguments won't need to be changed.
# %matplotlib inline
plot_trace(0, 240, sampling_rate_hz, above_cutoff, sweepY, save_name = '')
# # Some preliminary before metrics
#
# Before we start calulating metrics, let's make life a bit easier for us. We want to convert our boolean arrays to sets of indices. Effectively, currently, we have an array that is `True` for every time point that is in the epoch and `False` otherwise. Instead, let's get a list of start and end indices - `[start, end, start, end]`
# +
from itertools import accumulate, groupby
if above_cutoff[0]:
indices = [0] + list(accumulate(sum(1 for _ in g) for _,g in groupby(above_cutoff)))
else:
indices = list(accumulate(sum(1 for _ in g) for _,g in groupby(above_cutoff)))
to_remove = []
for idx in range(len(indices) - 1):
if idx % 2 == 1 and indices[idx + 1] - indices[idx] < 0:
to_remove.append(idx)
to_remove.append(idx + 1)
to_remove.append(len(indices) - 1)
counter = 0
for idx in to_remove:
indices.pop(idx - counter)
counter += 1
# -
# Here's an interactive plot - this might error on other computers, let me know if you want to run this and it doesn't work
# +
# %matplotlib widget
indices_seconds = [i/sampling_rate_hz for i in indices]
plt.vlines(indices_seconds, color = 'orange', ymin = 0, ymax = .1)
plt.plot(sweepX, sweepY)
# -
# Let's convert our continuous velocity trace to a continous acceleration trace. This should just be the first difference. IE. every element subtracted from every subsequent element (over time which is just one).
acceleration = np.diff(sweepY) * sampling_rate_hz
# %matplotlib inline
plt.plot(sweepX[:-1], acceleration)
def duration_of_all_bouts(indices):
durations = []
for i in range(len(indices) - 1):
if i%2 == 0:
start, end = indices[i], indices[i + 1]
start = start/sampling_rate_hz
end = end/sampling_rate_hz
durations.append(end - start)
return durations
def max_velocity_bouts(indices):
max_vels = []
for i in range(len(indices) - 1):
if i%2 == 0:
start, end = indices[i], indices[i + 1]
max_vels.append(max(sweepY[int(start): int(end)]) * 100)
return max_vels
def avg_velocity_bouts(indices):
avg_vels = []
for i in range(len(indices) - 1):
if i%2 == 0:
start, end = indices[i], indices[i + 1]
avg_vels.append(np.average(sweepY[int(start): int(end)]) * 100)
return avg_vels
def avg_velocity(sweepY):
return np.nanmean(sweepY) * 100
def max_velocity(sweepY):
return max(sweepY) * 100
# ### For acceleration, it looks like our data is a bit oversampled, so I've added a quick downsampler. The actual data is 10x less sampled so maybe it'll be fine without this, so I'll add examples with and without
# +
def average_downsampler(data, step = 10):
if step == 0:
return data
return np.array([np.average(data[i:i+step]) for i in range(0, len(data), step)])
def average_downsampler_numpy(data, step = 10):
data = np.append(data, [np.nan] * (step - len(data) % step) )
data = data.reshape(-1, step)
return np.nanmean(data, axis = 1)
# -
def get_acceleration_vector(velocity, sampling_rate_hz, downsampling_factor = 1):
"""
This gets the acceleration vector of a velocity vector by doing the first difference.
It applies an averaging downsampler.
Note: the acceration vector will always be 1 less than the input velocity vector, since
it's using the first difference. Note: having a downsampling factor of 1, does nothing.
It also takes in the sampling_rate in hz
"""
return np.diff(average_downsampler_numpy(velocity, step = downsampling_factor)) * (sampling_rate_hz/downsampling_factor)
# +
def avg_acceleration(sweepY, downsampling_factor = 10, conversion_factor = 100):
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor = 1)
return np.nanmean(acceleration) * conversion_factor
def max_acceleration(sweepY, downsampling_factor = 10, conversion_factor = 100):
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor = 1)
return max(acceleration) * conversion_factor
def max_acc_bouts(indices, downsampling_factor = 10, conversion_factor = 100):
max_acc = []
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor)
for i in range(len(indices) - 1):
if i % 2 == 0:
start, end = indices[i], indices[i + 1]
start, end = int(start//downsampling_factor), int(end//downsampling_factor)
if start == end:
continue
max_acc.append(max(acceleration[start: end]) * conversion_factor)
return max_acc
def avg_acc_bouts(indices, downsampling_factor = 10, conversion_factor = 100):
avg_acc = []
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor)
for i in range(len(indices) - 1):
if i % 2 == 0:
start, end = indices[i], indices[i + 1] - 1
start, end = int(start//downsampling_factor), int(end//downsampling_factor)
if start == end:
continue
avg_acc.append(np.average(acceleration[int(start): int(end)]) * conversion_factor)
return avg_acc
def avg_acceleration(sweepY, downsampling_factor = 10, conversion_factor = 100):
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor)
return np.nanmean(acceleration) * conversion_factor
def max_acceleration(sweepY, downsampling_factor = 10, conversion_factor = 100):
acceleration = get_acceleration_vector(sweepY, sampling_rate_hz, downsampling_factor)
return max(acceleration) * conversion_factor
# -
# # Velocity
# First, let's look at max velocity and average velocity for both bouts and overall, everything is in $cm/sec$
max_velocity(sweepY)
avg_velocity(sweepY)
max_velocity_bouts(indices)
avg_velocity_bouts(indices)
# # Acceleration
# Now let's move on to acceleration. All acceleration metrics have a variable called `downsampling_factor`, which will change how downsampled the acceleration vector is. `downsampling_factor = 1` keeps the original vector. Everything is in cm/sec^2.
max_acceleration(sweepY, downsampling_factor = 100)
avg_acceleration(sweepY, downsampling_factor = 100)
max_acc_bouts(indices, downsampling_factor = 10)
avg_acc_bouts(indices, downsampling_factor = 10)
# # Duration
#
# Lastly, let's look at the duration of each of the bouts
duration_of_all_bouts(indices)
# # Misc Plots
# %matplotlib inline
plt.boxplot(max_velocity_bouts(indices))
plt.title('Max Velocity Bouts')
plt.ylabel('cm/sec')
plt.show()
# +
# %matplotlib inline
plt.boxplot(avg_velocity_bouts(indices))
plt.title('Avg Velocity Bouts')
plt.ylabel('cm/sec')
#avg_velocity_bouts(indices)
# -
plt.boxplot(duration_of_all_bouts(indices))
plt.title('Bout Durations')
plt.ylabel('Seconds')
| Velocity Epochs Treadmill.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # データサイエンス100本ノック(構造化データ加工編) - Python
# ## はじめに
# - 初めに以下のセルを実行してください
# - 必要なライブラリのインポートとデータベース(PostgreSQL)からのデータ読み込みを行います
# - pandas等、利用が想定されるライブラリは以下セルでインポートしています
# - その他利用したいライブラリがあれば適宜インストールしてください("!pip install ライブラリ名"でインストールも可能)
# - 処理は複数回に分けても構いません
# - 名前、住所等はダミーデータであり、実在するものではありません
# +
import os
import pandas as pd
import numpy as np
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
import math
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import RandomUnderSampler
df_customer = pd.read_csv(
"data/customer.csv",
dtype={
"gender_cd": str,
"postal_cd": str,
},
parse_dates=["birth_day", "application_date"]
)
df_category = pd.read_csv("data/category.csv", dtype=str)
df_product = pd.read_csv(
"data/product.csv",
dtype={
"category_major_cd": str,
"category_medium_cd": str,
"category_small_cd": str,
}
)
df_receipt = pd.read_csv("data/receipt.csv")
df_store = pd.read_csv("data/store.csv", dtype={"prefecture_cd": str})
df_geocode = pd.read_csv("data/geocode.csv", dtype={"street": str})
# -
# # 演習問題
# ---
# > P-001: レシート明細のデータフレーム(df_receipt)から全項目の先頭10件を表示し、どのようなデータを保有しているか目視で確認せよ。
# ---
# > P-002: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、10件表示させよ。
# ---
# > P-003: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、10件表示させよ。ただし、sales_ymdはsales_dateに項目名を変更しながら抽出すること。
# ---
# > P-004: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。
# > - 顧客ID(customer_id)が"CS018205000001"
# ---
# > P-005: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。
# > - 顧客ID(customer_id)が"CS018205000001"
# > - 売上金額(amount)が1,000以上
# ---
# > P-006: レシート明細データフレーム「df_receipt」から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上数量(quantity)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。
# > - 顧客ID(customer_id)が"CS018205000001"
# > - 売上金額(amount)が1,000以上または売上数量(quantity)が5以上
# ---
# > P-007: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。
# > - 顧客ID(customer_id)が"CS018205000001"
# > - 売上金額(amount)が1,000以上2,000以下
# ---
# > P-008: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。
# > - 顧客ID(customer_id)が"CS018205000001"
# > - 商品コード(product_cd)が"P071401019"以外
# ---
# > P-009: 以下の処理において、出力結果を変えずにORをANDに書き換えよ。
#
# `df_store.query('not(prefecture_cd == "13" | floor_area > 900)')`
# ---
# > P-010: 店舗データフレーム(df_store)から、店舗コード(store_cd)が"S14"で始まるものだけ全項目抽出し、10件だけ表示せよ。
# ---
# > P-011: 顧客データフレーム(df_customer)から顧客ID(customer_id)の末尾が1のものだけ全項目抽出し、10件だけ表示せよ。
# ---
# > P-012: 店舗データフレーム(df_store)から横浜市の店舗だけ全項目表示せよ。
# ---
# > P-013: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の先頭がアルファベットのA〜Fで始まるデータを全項目抽出し、10件だけ表示せよ。
# ---
# > P-014: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の末尾が数字の1〜9で終わるデータを全項目抽出し、10件だけ表示せよ。
# ---
# > P-015: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の先頭がアルファベットのA〜Fで始まり、末尾が数字の1〜9で終わるデータを全項目抽出し、10件だけ表示せよ。
# ---
# > P-016: 店舗データフレーム(df_store)から、電話番号(tel_no)が3桁-3桁-4桁のデータを全項目表示せよ。
# ---
# > P-17: 顧客データフレーム(df_customer)を生年月日(birth_day)で高齢順にソートし、先頭10件を全項目表示せよ。
# ---
# > P-18: 顧客データフレーム(df_customer)を生年月日(birth_day)で若い順にソートし、先頭10件を全項目表示せよ。
# ---
# > P-19: レシート明細データフレーム(df_receipt)に対し、1件あたりの売上金額(amount)が高い順にランクを付与し、先頭10件を抽出せよ。項目は顧客ID(customer_id)、売上金額(amount)、付与したランクを表示させること。なお、売上金額(amount)が等しい場合は同一順位を付与するものとする。
# ---
# > P-020: レシート明細データフレーム(df_receipt)に対し、1件あたりの売上金額(amount)が高い順にランクを付与し、先頭10件を抽出せよ。項目は顧客ID(customer_id)、売上金額(amount)、付与したランクを表示させること。なお、売上金額(amount)が等しい場合でも別順位を付与すること。
# ---
# > P-021: レシート明細データフレーム(df_receipt)に対し、件数をカウントせよ。
# ---
# > P-022: レシート明細データフレーム(df_receipt)の顧客ID(customer_id)に対し、ユニーク件数をカウントせよ。
# ---
# > P-023: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)と売上数量(quantity)を合計せよ。
# ---
# > P-024: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も新しい売上日(sales_ymd)を求め、10件表示せよ。
# ---
# > P-025: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も古い売上日(sales_ymd)を求め、10件表示せよ。
# ---
# > P-026: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も新しい売上日(sales_ymd)と古い売上日を求め、両者が異なるデータを10件表示せよ。
# ---
# > P-027: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の平均を計算し、降順でTOP5を表示せよ。
# ---
# > P-028: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の中央値を計算し、降順でTOP5を表示せよ。
# ---
# > P-029: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに商品コード(product_cd)の最頻値を求めよ。
# ---
# > P-030: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の標本分散を計算し、降順でTOP5を表示せよ。
# ---
# > P-031: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の標本標準偏差を計算し、降順でTOP5を表示せよ。
# ---
# > P-032: レシート明細データフレーム(df_receipt)の売上金額(amount)について、25%刻みでパーセンタイル値を求めよ。
# ---
# > P-033: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の平均を計算し、330以上のものを抽出せよ。
# ---
# > P-034: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに売上金額(amount)を合計して全顧客の平均を求めよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。
#
# ---
# > P-035: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに売上金額(amount)を合計して全顧客の平均を求め、平均以上に買い物をしている顧客を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、データは10件だけ表示させれば良い。
# ---
# > P-036: レシート明細データフレーム(df_receipt)と店舗データフレーム(df_store)を内部結合し、レシート明細データフレームの全項目と店舗データフレームの店舗名(store_name)を10件表示させよ。
# ---
# > P-037: 商品データフレーム(df_product)とカテゴリデータフレーム(df_category)を内部結合し、商品データフレームの全項目とカテゴリデータフレームの小区分名(category_small_name)を10件表示させよ。
# ---
# > P-038: 顧客データフレーム(df_customer)とレシート明細データフレーム(df_receipt)から、各顧客ごとの売上金額合計を求めよ。ただし、買い物の実績がない顧客については売上金額を0として表示させること。また、顧客は性別コード(gender_cd)が女性(1)であるものを対象とし、非会員(顧客IDが'Z'から始まるもの)は除外すること。なお、結果は10件だけ表示させれば良い。
# ---
# > P-039: レシート明細データフレーム(df_receipt)から売上日数の多い顧客の上位20件と、売上金額合計の多い顧客の上位20件を抽出し、完全外部結合せよ。ただし、非会員(顧客IDが'Z'から始まるもの)は除外すること。
# ---
# > P-040: 全ての店舗と全ての商品を組み合わせると何件のデータとなるか調査したい。店舗(df_store)と商品(df_product)を直積した件数を計算せよ。
# ---
# > P-041: レシート明細データフレーム(df_receipt)の売上金額(amount)を日付(sales_ymd)ごとに集計し、前日からの売上金額増減を計算せよ。なお、計算結果は10件表示すればよい。
# ---
# > P-042: レシート明細データフレーム(df_receipt)の売上金額(amount)を日付(sales_ymd)ごとに集計し、各日付のデータに対し、1日前、2日前、3日前のデータを結合せよ。結果は10件表示すればよい。
# ---
# > P-043: レシート明細データフレーム(df_receipt)と顧客データフレーム(df_customer)を結合し、性別(gender)と年代(ageから計算)ごとに売上金額(amount)を合計した売上サマリデータフレーム(df_sales_summary)を作成せよ。性別は0が男性、1が女性、9が不明を表すものとする。
# >
# > ただし、項目構成は年代、女性の売上金額、男性の売上金額、性別不明の売上金額の4項目とすること(縦に年代、横に性別のクロス集計)。また、年代は10歳ごとの階級とすること。
# ---
# > P-044: 前設問で作成した売上サマリデータフレーム(df_sales_summary)は性別の売上を横持ちさせたものであった。このデータフレームから性別を縦持ちさせ、年代、性別コード、売上金額の3項目に変換せよ。ただし、性別コードは男性を'00'、女性を'01'、不明を'99'とする。
# ---
# > P-045: 顧客データフレーム(df_customer)の生年月日(birth_day)は日付型(Date)でデータを保有している。これをYYYYMMDD形式の文字列に変換し、顧客ID(customer_id)とともに抽出せよ。データは10件を抽出すれば良い。
# ---
# > P-046: 顧客データフレーム(df_customer)の申し込み日(application_date)はYYYYMMDD形式の文字列型でデータを保有している。これを日付型(dateやdatetime)に変換し、顧客ID(customer_id)とともに抽出せよ。データは10件を抽出すれば良い。
# ---
# > P-047: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)はYYYYMMDD形式の数値型でデータを保有している。これを日付型(dateやdatetime)に変換し、レシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。
# ---
# > P-048: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)は数値型のUNIX秒でデータを保有している。これを日付型(dateやdatetime)に変換し、レシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。
# ---
# > P-049: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)を日付型(timestamp型)に変換し、"年"だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。
# ---
# > P-050: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)を日付型(timestamp型)に変換し、"月"だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。なお、"月"は0埋め2桁で取り出すこと。データは10件を抽出すれば良い。
# ---
# > P-051: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)を日付型(timestamp型)に変換し、"日"だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。なお、"日"は0埋め2桁で取り出すこと。データは10件を抽出すれば良い。
# ---
# > P-052: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計の上、売上金額合計に対して2000円以下を0、2000円超を1に2値化し、顧客ID、売上金額合計とともに10件表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。
# ---
# > P-053: 顧客データフレーム(df_customer)の郵便番号(postal_cd)に対し、東京(先頭3桁が100〜209のもの)を1、それ以外のものを0に2値化せよ。さらにレシート明細データフレーム(df_receipt)と結合し、全期間において買い物実績のある顧客数を、作成した2値ごとにカウントせよ。
# ---
# > P-054: 顧客データデータフレーム(df_customer)の住所(address)は、埼玉県、千葉県、東京都、神奈川県のいずれかとなっている。都道府県毎にコード値を作成し、顧客ID、住所とともに抽出せよ。値は埼玉県を11、千葉県を12、東京都を13、神奈川県を14とすること。結果は10件表示させれば良い。
# ---
# > P-055: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、その合計金額の四分位点を求めよ。その上で、顧客ごとの売上金額合計に対して以下の基準でカテゴリ値を作成し、顧客ID、売上金額と合計ともに表示せよ。カテゴリ値は上から順に1〜4とする。結果は10件表示させれば良い。
# >
# > - 最小値以上第一四分位未満
# > - 第一四分位以上第二四分位未満
# > - 第二四分位以上第三四分位未満
# > - 第三四分位以上
# ---
# > P-056: 顧客データフレーム(df_customer)の年齢(age)をもとに10歳刻みで年代を算出し、顧客ID(customer_id)、生年月日(birth_day)とともに抽出せよ。ただし、60歳以上は全て60歳代とすること。年代を表すカテゴリ名は任意とする。先頭10件を表示させればよい。
# ---
# > P-057: 前問題の抽出結果と性別(gender)を組み合わせ、新たに性別×年代の組み合わせを表すカテゴリデータを作成せよ。組み合わせを表すカテゴリの値は任意とする。先頭10件を表示させればよい。
# ---
# > P-058: 顧客データフレーム(df_customer)の性別コード(gender_cd)をダミー変数化し、顧客ID(customer_id)とともに抽出せよ。結果は10件表示させれば良い。
# ---
# > P-059: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、合計した売上金額を平均0、標準偏差1に標準化して顧客ID、売上金額合計とともに表示せよ。標準化に使用する標準偏差は、不偏標準偏差と標本標準偏差のどちらでも良いものとする。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。
# ---
# > P-060: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、合計した売上金額を最小値0、最大値1に正規化して顧客ID、売上金額合計とともに表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。
# ---
# > P-061: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、合計した売上金額を常用対数化(底=10)して顧客ID、売上金額合計とともに表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。
# ---
# > P-062: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、合計した売上金額を自然対数化(底=e)して顧客ID、売上金額合計とともに表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。
# ---
# > P-063: 商品データフレーム(df_product)の単価(unit_price)と原価(unit_cost)から、各商品の利益額を算出せよ。結果は10件表示させれば良い。
# ---
# > P-064: 商品データフレーム(df_product)の単価(unit_price)と原価(unit_cost)から、各商品の利益率の全体平均を算出せよ。
# ただし、単価と原価にはNULLが存在することに注意せよ。
# ---
# > P-065: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。ただし、1円未満は切り捨てること。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。
# ---
# > P-066: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。今回は、1円未満を四捨五入すること(0.5については偶数方向の丸めで良い)。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。
# ---
# > P-067: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。今回は、1円未満を切り上げること。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。
# ---
# > P-068: 商品データフレーム(df_product)の各商品について、消費税率10%の税込み金額を求めよ。 1円未満の端数は切り捨てとし、結果は10件表示すれば良い。ただし、単価(unit_price)にはNULLが存在することに注意せよ。
# ---
# > P-069: レシート明細データフレーム(df_receipt)と商品データフレーム(df_product)を結合し、顧客毎に全商品の売上金額合計と、カテゴリ大区分(category_major_cd)が"07"(瓶詰缶詰)の売上金額合計を計算の上、両者の比率を求めよ。抽出対象はカテゴリ大区分"07"(瓶詰缶詰)の購入実績がある顧客のみとし、結果は10件表示させればよい。
# ---
# > P-070: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過日数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。
# ---
# > P-071: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過月数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。1ヶ月未満は切り捨てること。
# ---
# > P-072: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過年数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い。(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。1年未満は切り捨てること。
# ---
# > P-073: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からのエポック秒による経過時間を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。なお、時間情報は保有していないため各日付は0時0分0秒を表すものとする。
# ---
# > P-074: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、当該週の月曜日からの経過日数を計算し、売上日、当該週の月曜日付とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値でデータを保持している点に注意)。
# ---
# > P-075: 顧客データフレーム(df_customer)からランダムに1%のデータを抽出し、先頭から10件データを抽出せよ。
# ---
# > P-076: 顧客データフレーム(df_customer)から性別(gender_cd)の割合に基づきランダムに10%のデータを層化抽出データし、性別ごとに件数を集計せよ。
# ---
# > P-077: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客単位に合計し、合計した売上金額の外れ値を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、ここでは外れ値を平均から3σ以上離れたものとする。結果は10件表示させれば良い。
# ---
# > P-078: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客単位に合計し、合計した売上金額の外れ値を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、ここでは外れ値を第一四分位と第三四分位の差であるIQRを用いて、「第一四分位数-1.5×IQR」よりも下回るもの、または「第三四分位数+1.5×IQR」を超えるものとする。結果は10件表示させれば良い。
# ---
# > P-079: 商品データフレーム(df_product)の各項目に対し、欠損数を確認せよ。
# ---
# > P-080: 商品データフレーム(df_product)のいずれかの項目に欠損が発生しているレコードを全て削除した新たなdf_product_1を作成せよ。なお、削除前後の件数を表示させ、前設問で確認した件数だけ減少していることも確認すること。
# ---
# > P-081: 単価(unit_price)と原価(unit_cost)の欠損値について、それぞれの平均値で補完した新たなdf_product_2を作成せよ。なお、平均値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。
# ---
# > P-082: 単価(unit_price)と原価(unit_cost)の欠損値について、それぞれの中央値で補完した新たなdf_product_3を作成せよ。なお、中央値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。
# ---
# > P-083: 単価(unit_price)と原価(unit_cost)の欠損値について、各商品の小区分(category_small_cd)ごとに算出した中央値で補完した新たなdf_product_4を作成せよ。なお、中央値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。
# ---
# > P-084: 顧客データフレーム(df_customer)の全顧客に対し、全期間の売上金額に占める2019年売上金額の割合を計算せよ。ただし、販売実績のない場合は0として扱うこと。そして計算した割合が0超のものを抽出せよ。 結果は10件表示させれば良い。また、作成したデータにNAやNANが存在しないことを確認せよ。
# ---
# > P-085: 顧客データフレーム(df_customer)の全顧客に対し、郵便番号(postal_cd)を用いて経度緯度変換用データフレーム(df_geocode)を紐付け、新たなdf_customer_1を作成せよ。ただし、複数紐づく場合は経度(longitude)、緯度(latitude)それぞれ平均を算出すること。
#
# ---
# > P-086: 前設問で作成した緯度経度つき顧客データフレーム(df_customer_1)に対し、申込み店舗コード(application_store_cd)をキーに店舗データフレーム(df_store)と結合せよ。そして申込み店舗の緯度(latitude)・経度情報(longitude)と顧客の緯度・経度を用いて距離(km)を求め、顧客ID(customer_id)、顧客住所(address)、店舗住所(address)とともに表示せよ。計算式は簡易式で良いものとするが、その他精度の高い方式を利用したライブラリを利用してもかまわない。結果は10件表示すれば良い。
# $$
# 緯度(ラジアン):\phi \\
# 経度(ラジアン):\lambda \\
# 距離L = 6371 * arccos(sin \phi_1 * sin \phi_2
# + cos \phi_1 * cos \phi_2 * cos(\lambda_1 − \lambda_2))
# $$
# ---
# > P-087: 顧客データフレーム(df_customer)では、異なる店舗での申込みなどにより同一顧客が複数登録されている。名前(customer_name)と郵便番号(postal_cd)が同じ顧客は同一顧客とみなし、1顧客1レコードとなるように名寄せした名寄顧客データフレーム(df_customer_u)を作成せよ。ただし、同一顧客に対しては売上金額合計が最も高いものを残すものとし、売上金額合計が同一もしくは売上実績の無い顧客については顧客ID(customer_id)の番号が小さいものを残すこととする。
# ---
# > P-088: 前設問で作成したデータを元に、顧客データフレームに統合名寄IDを付与したデータフレーム(df_customer_n)を作成せよ。ただし、統合名寄IDは以下の仕様で付与するものとする。
# >
# > - 重複していない顧客:顧客ID(customer_id)を設定
# > - 重複している顧客:前設問で抽出したレコードの顧客IDを設定
# ---
# > P-閑話: df_customer_1, df_customer_nは使わないので削除する。
# ---
# > P-089: 売上実績のある顧客に対し、予測モデル構築のため学習用データとテスト用データに分割したい。それぞれ8:2の割合でランダムにデータを分割せよ。
# ---
# > P-090: レシート明細データフレーム(df_receipt)は2017年1月1日〜2019年10月31日までのデータを有している。売上金額(amount)を月次で集計し、学習用に12ヶ月、テスト用に6ヶ月のモデル構築用データを3セット作成せよ。
# ---
# > P-091: 顧客データフレーム(df_customer)の各顧客に対し、売上実績のある顧客数と売上実績のない顧客数が1:1となるようにアンダーサンプリングで抽出せよ。
# ---
# > P-092: 顧客データフレーム(df_customer)では、性別に関する情報が非正規化の状態で保持されている。これを第三正規化せよ。
# ---
# > P-093: 商品データフレーム(df_product)では各カテゴリのコード値だけを保有し、カテゴリ名は保有していない。カテゴリデータフレーム(df_category)と組み合わせて非正規化し、カテゴリ名を保有した新たな商品データフレームを作成せよ。
# ---
# > P-094: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。
# >
# > - ファイル形式はCSV(カンマ区切り)
# > - ヘッダ有り
# > - 文字コードはUTF-8
# ---
# > P-095: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。
# >
# > - ファイル形式はCSV(カンマ区切り)
# > - ヘッダ有り
# > - 文字コードはCP932
# ---
# > P-096: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。
# >
# > - ファイル形式はCSV(カンマ区切り)
# > - ヘッダ無し
# > - 文字コードはUTF-8
# ---
# > P-097: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。
# >
# > - ファイル形式はCSV(カンマ区切り)
# > - ヘッダ有り
# > - 文字コードはUTF-8
# ---
# > P-098: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。
# >
# > - ファイル形式はCSV(カンマ区切り)
# > - ヘッダ無し
# > - 文字コードはUTF-8
# ---
# > P-099: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。
# >
# > - ファイル形式はTSV(タブ区切り)
# > - ヘッダ有り
# > - 文字コードはUTF-8
# ---
# > P-100: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。
# >
# > - ファイル形式はTSV(タブ区切り)
# > - ヘッダ有り
# > - 文字コードはUTF-8
# # これで100本終わりです。おつかれさまでした!
| docker/work/preprocess_knock_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 12</font>
#
# ## Download: http://github.com/dsacademybr
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# ## Detecção de Emoções em Imagens com Inteligência Artificial
# ## Teste
# !pip install -q tensorflow==1.15.2
from scipy import misc
import numpy as np
import matplotlib.cm as cm
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
from matplotlib import pyplot as plt
import matplotlib as mat
import matplotlib.image as mpimg
from modulos import utils
from modulos.utils import testResult
from tensorflow.python.framework import ops
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
import sklearn as sk
# %matplotlib inline
np.__version__
tf.__version__
mat.__version__
sk.__version__
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
emotion = {0:'anger',
1:'disgust',
2:'fear',
3:'happy',
4:'sad',
5:'surprise',
6:'neutral'}
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
sess = tf.InteractiveSession()
new_saver = tf.train.import_meta_graph('modelo/model.ckpt-900.meta')
new_saver.restore(sess, 'modelo/model.ckpt-900')
tf.get_default_graph().as_graph_def()
x = sess.graph.get_tensor_by_name("input:0")
y_conv = sess.graph.get_tensor_by_name("output:0")
img = mpimg.imread('images_teste/image05.jpg')
gray = rgb2gray(img)
plt.imshow(gray, cmap = plt.get_cmap('gray'))
plt.show()
image_0 = np.resize(gray,(1,48,48,1))
tResult = testResult()
num_evaluations = 50
for i in range(0, num_evaluations):
result = sess.run(y_conv, feed_dict={x:image_0})
label = sess.run(tf.argmax(result, 1))
label = label[0]
label = int(label)
tResult.evaluate(label)
tResult.display_result(num_evaluations)
# Para adquirir conhecimento técnico sólido e especializado em Deep Learning, Visão Computacional, Processamento de Linguagem Natural e outros temas relacionados à Inteligência Artificial, confira nosso programa completo: <a href="https://www.datascienceacademy.com.br/pages/formacao-inteligencia-artificial">Formação Inteligência Artificial</a>.
# # Fim
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| pyfund/Cap12/DSA-Python-Cap12-02-Deep-Learning-Teste.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Training and Quality control for nuclear segmentation
#
# StarDist uses a shape representation based on star-convex polygons for nuclei in an image to predict the presence and the shape of these nuclei. This network is based on an adapted U-Net network architecture.
#
# To train and test the network, we use a dataset from the 2018 DSB nuclear segmentation challenge as well as few more nuclei that we have labelled from widefield images of DAPI stained tissue sections.
# +
# import libraries
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import imageio as imio
import os
from glob import glob
from tqdm import tqdm
from tifffile import imread
from csbdeep.utils import Path, normalize
from csbdeep.utils.tf import limit_gpu_memory
from stardist import fill_label_holes, gputools_available
from stardist.matching import matching, matching_dataset
from stardist.models import Config2D, StarDist2D
from src.utlis.data_augmentation import augmenter
from src.utlis.segmentation_stardist_model import segment_objects_stardist2d
from stardist.matching import matching_dataset
# -
os.path.join(os.path.dirname(os.getcwd()),'example_data/nuc_labels.tif')
os.getcwd()
# +
# setup the paths
path_to_raw_image='/home/pathy_s/Documents/ground_truths/raw_train/'
path_to_GT_labels='/home/pathy_s/Documents/ground_truths/GT_train/'
path_to_raw_image_test='/home/pathy_s/Documents/ground_truths/raw_test/'
path_to_GT_labels_test='/home/pathy_s/Documents/ground_truths/GT_test/'
#temp='/home/pathy_s/Documents/ground_truths/temp'
path_to_model='/home/pathy_s/Documents/Tissue_Analysis/models/'
output_dir= "/home/pathy_s/Documents/ground_truths/v2/"
path_to_output_segmented_images_train="/home/pathy_s/Documents/ground_truths/Segmented_labels_train/"
path_to_output_segmented_images_test="/home/pathy_s/Documents/ground_truths/Segmented_labels_test/"
#Path(temp).mkdir(parents=True, exist_ok=True)
Path(output_dir).mkdir(parents=True, exist_ok=True)
Path(path_to_output_segmented_images_train).mkdir(parents=True, exist_ok=True)
Path(path_to_output_segmented_images_test).mkdir(parents=True, exist_ok=True)
# -
#read in the training data which would of pairs of input image and label instances.
X = sorted(glob(path_to_raw_image +'*.tif'))
Y = sorted(glob(path_to_GT_labels +'*.tif'))
assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))
X = list(map(imread,X))
Y = list(map(imread,Y))
#Normalize images and fill small label holes.
X = [normalize(x,1,99.8,axis=(0,1)) for x in tqdm(X)]
Y = [fill_label_holes(y) for y in tqdm(Y)]
#Split into train and validation datasets.
rng = np.random.RandomState(42)
ind = rng.permutation(len(X))
n_val = max(1, int(round(0.15 * len(ind))))
ind_train, ind_val = ind[:-n_val], ind[-n_val:]
X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]
X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
print('number of images: %3d' % len(X))
print('- training: %3d' % len(X_trn))
print('- validation: %3d' % len(X_val))
# +
#set up the configuration
conf = Config2D (
n_rays = 32,
grid = (2,2),
use_gpu = True and gputools_available(),
n_channel_in = 1)
limit_gpu_memory(None, allow_growth=True)
#Setup and train the model
model = StarDist2D(conf, name='Nuclear_segmentation_v1', basedir=path_to_model)
history=model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter)
#optimize NMS threshold
model.optimize_thresholds(X_val, Y_val)
# convert the history.history dict to a pandas DataFrame:
lossData = pd.DataFrame(history.history)
# -
# ### Quality checks
#
# We check the visualise the loss curves for the training and validation sets to make sure that the training is complete.
# +
plt.figure(figsize=(15,10))
plt.subplot(2,1,1)
plt.plot(range(400),lossData['loss'], label='Training loss')
plt.plot(range(400),lossData['val_loss'], label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (linear scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.subplot(2,1,2)
plt.semilogy(range(400),lossData['loss'], label='Training loss')
plt.semilogy(range(400),lossData['val_loss'], label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (log scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.show()
lossData.to_csv(output_dir+"training_losses.csv")
# -
# Further, we also obtain object level errors using IoU based metrics
# +
#perform the segmentation task
segment_objects_stardist2d(image_dir = path_to_raw_image,
output_dir_labels = path_to_output_segmented_images_train,
use_pretrained = False,
output_dir_ijroi = False,
model_name='Nuclear_segmentation_v1',
model_dir = path_to_model)
#read in the images and results
X = sorted(glob(path_to_raw_image+"*.tif"))
Y = sorted(glob(path_to_GT_labels+"*.tif"))
Y_pred = sorted(glob(path_to_output_segmented_images_train +"*.tif"))
X = list(map(imread,X))
Y = list(map(imread,Y))
Y_pred = list(map(imread,Y_pred))
#Compute the segmentation efficiency metrics for the whole dataset
#IoU thresholds to compute
taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
stats = [matching_dataset(Y, Y_pred, thresh=t, show_progress=False) for t in tqdm(taus)]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(15,5))
for m in ('precision', 'recall', 'accuracy', 'f1', 'mean_true_score', 'mean_matched_score', 'panoptic_quality'):
ax1.plot(taus, [s._asdict()[m] for s in stats], '.-', lw=2, label=m)
ax1.set_xlabel(r'IoU threshold $\tau$')
ax1.set_ylabel('Metric value')
ax1.grid()
ax1.legend()
for m in ('fp', 'tp', 'fn'):
ax2.plot(taus, [s._asdict()[m] for s in stats], '.-', lw=2, label=m)
ax2.set_xlabel(r'IoU threshold $\tau$')
ax2.set_ylabel('Number #')
ax2.grid()
ax2.legend();
seg_efficiency_tot=pd.DataFrame(stats)
seg_efficiency_tot.to_csv(output_dir+"train_segmentation_efficiencies_whole_dataset.csv")
# -
# Finally we use a test dataset of images that the model has not see evaluate the model
# +
#perform the segmentation task
segment_objects_stardist2d(image_dir = path_to_raw_image_test,
output_dir_labels = path_to_output_segmented_images_test,
use_pretrained = False,
output_dir_ijroi = False,
model_name='Nuclear_segmentation_v1',
model_dir = path_to_model)
#read in the images and results
X = sorted(glob(path_to_raw_image_test+"*.tif"))
Y = sorted(glob(path_to_GT_labels_test+"*.tif"))
Y_pred = sorted(glob(path_to_output_segmented_images_test +"*.tif"))
X = list(map(imread,X))
Y = list(map(imread,Y))
Y_pred = list(map(imread,Y_pred))
#Compute the segmentation efficiency metrics for the whole dataset
#IoU thresholds to compute
taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
stats = [matching_dataset(Y, Y_pred, thresh=t, show_progress=False) for t in tqdm(taus)]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(15,5))
for m in ('precision', 'recall', 'accuracy', 'f1', 'mean_true_score', 'mean_matched_score', 'panoptic_quality'):
ax1.plot(taus, [s._asdict()[m] for s in stats], '.-', lw=2, label=m)
ax1.set_xlabel(r'IoU threshold $\tau$')
ax1.set_ylabel('Metric value')
ax1.grid()
ax1.legend()
for m in ('fp', 'tp', 'fn'):
ax2.plot(taus, [s._asdict()[m] for s in stats], '.-', lw=2, label=m)
ax2.set_xlabel(r'IoU threshold $\tau$')
ax2.set_ylabel('Number #')
ax2.grid()
ax2.legend();
#Save the results
seg_efficiency_tot.to_csv(output_dir+"test_segmentation_efficiencies_whole_dataset.csv")
# -
pd.DataFrame( {"Level" : ['whole_dataset'],
"Recall_0.7_IoU" : [seg_efficiency_tot['recall'][thresholds.index(0.7)]],
"Average_F1" : [np.mean(seg_efficiency_tot['f1'])],
"Accuracy" : [seg_efficiency_tot['accuracy'][thresholds.index(0.7)]]})
| python_notebooks/training_stardist_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## _*H2 ground state energy computation using Iterative QPE*_
#
# This notebook demonstrates using Qiskit Aqua Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using IQPE (Iterative Quantum Phase Estimation) algorithm. It is compared to the same energies as computed by the ExactEigensolver
#
# This notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the qiskit_aqua_chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.
#
# This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires.
# +
import numpy as np
import pylab
from qiskit import LegacySimulators
from qiskit_aqua_chemistry import AquaChemistry
import time
# Input dictionary to configure Qiskit Aqua Chemistry for the chemistry problem.
aqua_chemistry_dict = {
'driver': {'name': 'PYSCF'},
'PYSCF': {'atom': '', 'basis': 'sto3g'},
'operator': {'name': 'hamiltonian', 'transformation': 'full', 'qubit_mapping': 'parity'},
'algorithm': {'name': ''},
'initial_state': {'name': 'HartreeFock'},
}
molecule = 'H .0 .0 -{0}; H .0 .0 {0}'
algorithms = [
{
'name': 'IQPE',
'num_iterations': 16,
'num_time_slices': 3000,
'expansion_mode': 'trotter',
'expansion_order': 1,
},
{
'name': 'ExactEigensolver'
}
]
backends = [
LegacySimulators.get_backend('qasm_simulator'),
None
]
start = 0.5 # Start distance
by = 0.5 # How much to increase distance by
steps = 20 # Number of steps to increase by
energies = np.empty([len(algorithms), steps+1])
hf_energies = np.empty(steps+1)
distances = np.empty(steps+1)
# +
import concurrent.futures
import multiprocessing as mp
import copy
def subrountine(i, aqua_chemistry_dict, d, backend, algorithm):
solver = AquaChemistry()
aqua_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2)
aqua_chemistry_dict['algorithm'] = algorithm
result = solver.run(aqua_chemistry_dict, backend=backend)
return i, d, result['energy'], result['hf_energy']
# +
start_time = time.time()
max_workers = max(4, mp.cpu_count())
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = []
for j in range(len(algorithms)):
algorithm = algorithms[j]
backend = backends[j]
for i in range(steps+1):
d = start + i*by/steps
future = executor.submit(
subrountine,
i,
copy.deepcopy(aqua_chemistry_dict),
d,
backend,
algorithm
)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
i, d, energy, hf_energy = future.result()
energies[j][i] = energy
hf_energies[i] = hf_energy
distances[i] = d
print(' --- complete')
print('Distances: ', distances)
print('Energies:', energies)
print('Hartree-Fock energies:', hf_energies)
print("--- %s seconds ---" % (time.time() - start_time))
# -
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
pylab.plot(distances, energies[j], label=algorithms[j]['name'])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy')
pylab.legend(loc='upper right')
pylab.show()
pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock')
pylab.plot(distances, np.subtract(energies[0], energies[1]), label='IQPE')
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('Energy difference from ExactEigensolver')
pylab.legend(loc='upper right')
pylab.show()
| community/aqua/chemistry/h2_iqpe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Python Crash Course - Part 1 - Fundamentals
# 
#
# [-blueviolet?style=plastic)](https://www.youtube.com/c/teachjing)
#
# **Requires** 
#
# # Introduction
#
# This 📔 notebook takes you through the 👨🎓 learning the basic fundamentals of Python. Its an 👩💻 interactive self-paced tutorial within notebook. This course will have you learning variables, data types, loops in no time. 😋
#
# ## Table of Contents
# - Getting Started
# - Variables
# - Data Types
# - Collections
# - Block Indentation
#
# 
# Be sure to click the table of contents under Menu -> View -> TOC to easily navigate the notebook.
#
# 
# - [Microsoft Learn - Python learning modules] (https://docs.microsoft.com/learn/browse/?terms=python)
# - [Python.org - The Python Standard Library] (https://docs.python.org/3/library/index.html)
# - [Jupyter Notebooks Crash Course] (https://tacc.github.io/CSC2017Institute/docs/day1/jupyter.html)
#
#
# + [markdown] nteract={"transient": {"deleting": false}}
# # Getting Started
#
# If you haven't already, you may want to understand how to use Jupyter notebooks and more specifically Azure Notebooks by using this guide "Getting Started with Azure ML Notebooks and Microsoft Sentinel"
#
# [](https://docs.microsoft.com/azure/sentinel/notebook-get-started)
# <br><br>
# 
# The order on when you run the each cells are important. Some cells will have a dependency on another cell in order to work.<br>
# Make sure you identify if the cell requires other cells to run, otherwise you may run into errors.
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Navigation
#
# <center><b>Keyboard Shortcuts</b></center>
#
# | key | description
# |---|---|
# |🔼🔽|Up/Down keys will navigate cell blocks|
# | `a` | Add a cell above highlighted cell |
# | `b` | Add a cell below highlighted cell |
# | `d` `d` | Delete a cell |
# | `c` | Copy a cell into clipboard |
# | `x` | Cut a cell into clipboard |
# | `v` | Paste a cell below selected cell |
# | `z` | Undo your boo boo|
# | `Enter`| Edit a cell |
# | `Escape` | Escape current cell in order to navigate|
# | `Ctrl` + `Enter` | Runs current cell and stays on the current cell. |
# | `Shift` + `Enter` | Runs current cell and moves once cell down after running |
# | `m` | Convert current cell to `markdown` cell format |
# | `y` | Convert current cell to `code` cell format |
# + [markdown] nteract={"transient": {"deleting": false}}
# Use your table contents to quickly browse to certain topics or areas of interest.
#
# Lets get 😎fancy by showing you how to quickly navigate a notebook. You will notice that in whatever cell your working in, it is `highlighted` indicating its your current or selected cell.
#
# 
# - 🗺 Navigating - So lets press the `escape` key and navigate up and down by using our arrows 🔼 🔽
# - ➕ Adding Cells - Now, lets hit the `a` or `b` key. This will either add a cell above/below your currently selected cell when navigating.
# - ❌ Deleting - Highlight any cell when you are navigating and then press `d` `d`. (Make sure you aren't editing a cell `escape`)
# - 📝 Editing - To edit a cell, just navigate to the cell and press `enter` which puts you in edit mode.
# - ✂ Cutting/Pasting - Highlight a cell and press `c` or `x` to copy/cut a cell. Then navigate to where you want to paste it and press `v` which will paste the cell below your selected/currentEditing - To edit a cell, just navigate to the cell and press `enter` which puts you in edit mode.
# - Undo - Oops, you made a mistake? 😓 Press z to undo that boo boo 💩.
#
# ☝ Above is a helpful reference guide. Try to get familiar with the ⌨ keyboard shortcuts to help you navigate, add, copy, paste, delete, and run cells.
#
# 
# Think you can try navigating the rest of the notebook without using your mouse 🐭 at all. <br>
# Remember to navigate using your arrows 🔼 🔽 and the `enter`/`escape` key to enter/exit a cell.
# + [markdown] nteract={"transient": {"deleting": false}}
# ## 👋🏾 Hello World
# In the code cell below, enter `"Hello World"` enclosed in some quotation marks.
#
# ```python
# "Hello World"
# ```
#
# This simply just prints/output the `input` you provided which was enclosed double quotes `"`.
# + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
"<NAME>"
# + [markdown] nteract={"transient": {"deleting": false}}
# But sometimes, you may want to print multiple outputs and control/modify the output. So more often you will utilize the `print` function. <code>print("Hello World")</code>
#
# ```python
# print("Hello World")
# ```
#
# 
# Can you modify `Hello World` and print something else out?
# + gather={"logged": 1634574547862} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
print("Hello World")
print("Hello Jing")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Cell types
#
# 📓 Notebook cells can be in two different formats, markdown or code cells.
# - 📃 Markdown - allows you to comment and document your notebook
# - [Markdown Guide](https://www.markdownguide.org/) - The Markdown Guide is a free and open-source reference guide that explains how to use Markdown to format virtually any document.
# - 👩🏾💻 Code Cells - code cells are the actual runnable code.
#
#
# 
# - Press the `escape` key to ensure you are not editing a code cell and then press either `m` or `y` to convert a cell to either format.
# - How would you add a code cell `below` this cell and convert it to a markdown cell using what shortcuts you have learned, ?
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Comments
# You will want to make it a habit to comment your code so you and others can otherstand what its purpose is.
#
# Single line comments is using a `#` character and any following string after will be ignored
# ```python
# # This is a single line comment.
# print ("Hello")
# ```
# + gather={"logged": 1633410144162} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# This is a single line comment and will be ignored
print ("Hello")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Using Help
# The python `help`🆘 function is used to display the documentation of modules, functions, classes, keywords, etc.
#
# 
# ```python
# help([object])
# ```
#
# 
# ```python
# help(print)
# ```
# + gather={"logged": 1634574759177} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
help("print")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Importing modules
# A module is a file containing Python definitions and statements. There will be times you need to do something specific and most likely someone has already created a module you can use.
# In most cases you can save alot of time if there is a module that already exists you can leverage to help you get to your desired outcome faster 🚀. And who doesn't like to save time ⌚.
#
# -
# ### 👍 Make sure package is installed
# If you ever want to use a module, you first must install it. The most common way is to use a native package manager like `pip` to install the module so you can `import` it. Remember you can't use what you don't have!
#
# Most of the time to install any module, you can reference the name your trying to import.
# ```python
# # !pip install <package_name>
# # Example: !pip install numpy
# ```
#
#
# + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# !pip install numpy
# + [markdown] nteract={"transient": {"deleting": false}}
# 
# There is also a magic `%pip` command that will ensure you install the module in the right kernel (rather than the instance of Python that launched the notebook)
#
# Be sure its the **first** line you have on your cell block.
# + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# %pip install numpy
# + [markdown] nteract={"transient": {"deleting": false}}
#
# <div style="background-color:#ffc107">
# <b><p style="color:#000000">
# Sometimes the above command might not work 😫. It could because of mismatch if versions, OS, where notebook is hosted. In that case, you will use the following command to ensure pip installs the module on the python kernel you're running Jupyter Notebooks.
# </p></b>
# </div>
#
# ```python
# # Install a pip package in the current Jupyter kernel
# import sys
# # # !{sys.executable} -m pip install <package_name>
# ```
#
# Or open a terminal 🖥 session and run this command so pip will load the module in the proper python kernel.
# ```python
# python -m pip install <package_name>
# # Example: python -m pip install numpy
# ```
# + [markdown] nteract={"transient": {"deleting": false}}
# ### ⏬ Import the modules you need
#
# Once you confirm pip has installed the module ✔, those modules can now be used in the current notebook 📒.
#
# You will import any module by using the `import` command along with the `name` of the module you want to import
#
# Sometimes packages have different names than their importable name. `E.g. pip install scikit-learn -> import sklearn`
#
# ```python
# import datetime
# print(datetime.datetime.now())
# # output: 2021-10-15 17:15:45.247946
# ```
#
# If you don't require the whole module library, its always best practice to specify exactly what you want to import
#
# ```python
# from datetime import datetime
# print(datetime.now())
# # output: 2021-10-15 17:15:45.247946
# ```
#
# Notice also the line is shortened because you can call directly what you imported `datetime` instead of the full module path `datetime.datetime`
# + gather={"logged": 1634649336218}
from datetime import datetime
print(datetime.now())
# -
# **Importing module using `as`**
#
# 
# To make it easier to call modules, shorten the module name by using `as` to define an `alias`.
#
# ```python
# from datetime import datetime as d
# print(d.now())
# ```
#
#
# + gather={"logged": 1634575052281}
from datetime import datetime as dt
print(dt.now())
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Restart the Kernel
#
# If you are not able to use the module, it may require restarting the kernel. You can do that by importing `IPython` and restarting it or click on the restart icon 🔃
#
# 
# You will lose all cached variables when you restart the kernel.
# + gather={"logged": 1634333674289} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
import IPython
IPython.Application.instance().kernel.do_shutdown(True) #automatically restarts kernel from cell.
# + [markdown] nteract={"transient": {"deleting": false}}
# # Variables
#
# A variable is a value that can change, depending on conditions or on information passed to the program
#
# 
# - The Python interpreter automatically picks the most suitable built-in data-type for the variable if no type is provided
# - You cannot use python keywords as a variable name. You can see a list of keywords by typing `import keyword; print(keyword.kwlist)`
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Creating variables and assigning values
#
# We first will assign ```Hello World``` to the variables ```s1,s2```. We will then print the variables by referencing it in print.
# Try it below:
# ```python
# s1 = "Hello"
# s2 = "World"
# print(s1,s2)
# ```
# 
# How would you print `Microsoft Sentinel` using what you know?
# + gather={"logged": 1634575257676} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
s1 = "Hello"
s2 = "World"
s3 = "!"
print(s1,s2,s3)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Assigning multiple variables
# - You can assign multiple values to multiple variables in one line.
# - **Note** that there must be the same number of arguments on the right and left sides of the = operator:
#
# 
# ```python
# a, b, c = 1, 2, 3
# print(a, b, c)
# ```
# Exercise: Try assigning an additional variable with value to this list
# + gather={"logged": 1634575444941} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a, b, c = 1, 2, 3
print(a, b, c)
# + [markdown] nteract={"transient": {"deleting": false}}
#  Remember pretend the `mouse 🖱` has feelings and doesn't want to be touched, Practice using your `keyboard ⌨` **only**.
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Temporary variables
#
# It is common to have temporary or dummy variable assignments for a cell. It is conventional to use the underscore for these type of unwanted values.
#
# 
# ```python
# a, b = 1, 2 ## assigns a,b the value 1,2
# _ = a+b ## _ is temporary assignment that adds a and b together
# _temp = b-a
# print(a,b, "equals", _) ## prints the output
# ```
#
# 
# Exercise: change the unwanted variable `_` to the result of `b-a`
# + gather={"logged": 1634326294129} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a, b = 1, 2
_ = a+b
_temp = "1" + "1"
print(a,b, "equals", _)
print("_temp equals", _temp, "since 1 has quotes around it indicating a string data type")
# + [markdown] nteract={"transient": {"deleting": false}}
# # Data Types
#
# |Data Type|Description|Example|
# |------|------|------|
# |int|Integers, floating-point, complex numbers|1,2,3,4,5|
# |bool|an expression that results in a value of either true/false.|True/False|
# |str|strings of characters| "John"|
# |list| mutable sequence that group various objects together | [1,True,"John"]
# |dict| Dictionary | {'name':'red','age':10}|
# |tuple|similar to list but they are immutable.|(123,'hello')|
# |set|unordered and mutable collections of arbitrary but hashable Python objects|{"apple","banana","orange"}|
#
# 
# - [Documentation - python.org - Data Types] (https://docs.python.org/3/library/datatypes.html)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## String
# Strings can be declared a number of ways.
#
# Single quotes
# ```python
# print('This string is in single quotes')
# ```
#
# Double quotes
# ```python
# print("This string is in double quotes")
# ```
#
# Triple double quotes
# ```python
# print(
# """
# This multi-line string
# is in triple double quotes
# """)
# ```
#
# 
# Why would you want different ways of declaring a string?
# + gather={"logged": 1634324553119} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print('This string is in single quotes')
print("This string is in double quotes")
print("""
This multi-line string
is in triple quotes""")
# -
# **Answer**: Because this allows you to embed quotes inside a string by declaring it with another way. You cannot use the same quotes you used to declare the string because it would end the string and the output would not be what you wanted.
# <br><br>
#
# ```python
# ## This is wrong
# print(" I am a string with "quotes" ")
#
# ## This is right
# print(' I am a string with "quotes" ')
# print(" I am a string with 'quotes' ")
# print(""" I am a string with 'two' types of "quotes" """)
# Output
# ```
#
# 
# Press `escape` if you need to and navigate ⬇. Press `enter` to edit the cell and try running the current cell. Can you fix it using what we learned?
print(" I am getting pretty good with "Azure Notebooks" ")
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Escaping special characters
# If your string has special characters or punctuation, you may have problems using it. In order to do that, you will need to use the backslash `\` key. This tells python to ignore the character that follows the backslash.
# ```python
# print ('It\'s getting \'hot\' in here')
# ```
#
# 
# Try removing the backslash and run the cell. What happens?
# + gather={"logged": 1633361887581} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print ('It\'s getting \'hot\' in here')
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Inserting values into a string and evaluating expressions
#
# ```python
# age = 30
# print(f"You are {age} years old. {1 + 1}")
# ```
#
# Notice how we prefix `f` in front the string `"You are {age} years old."`. This allows us to evaluate any expression within curly braces `{ }`. In this case, we grab the value for variable `age` and insert it into the string when evaluated.
# + gather={"logged": 1634329664549} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
age = 30
print(f"You are {age} years old. {1 + 1}")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Numeric data
#
# There are three types of numbers used in Python: integers, floating, and complex numbers.
#
# ```python
# x = 5 # int
# y = 10.024 # float
# z = 5j # complex
# ```
#
# 
# Run the cell below `(shift-enter)`. Make a guess what the `type()` command does?
# + gather={"logged": 1633446430017} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
s = "I am a string"
x = 5 # int
y = 10.024 # float
z = 5j # complex
print ("Class", "\t\t\t", "Value")
print (type(s), "\t\t", s)
print (type(x), "\t\t", x)
print (type(y), "\t", y)
print (type(z), "\t", z)
# + [markdown] nteract={"transient": {"deleting": false}}
# Here are some arithmetic operations you can perform
#
# | Operator | Name | Description |
# |--------------|----------------|--------------------------------------------------------|
# | ``a + b`` | Addition | Sum of ``a`` and ``b`` |
# | ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |
# | ``a * b`` | Multiplication | Product of ``a`` and ``b`` |
# | ``a / b`` | True division | Quotient of ``a`` and ``b`` |
# | ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |
# | ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |
# | ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |
# | ``-a`` | Negation | The negative of ``a`` |
# + gather={"logged": 1633099458326} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## Arithmatic Operations Examples
print(True)
print(5 + 5)
print(5 - 3)
print(10 / 2)
print(5.0 / 2)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Boolean
#
# <center>Comparison Operations</center>
#
# |Operation|Description|Operation|Description|
# |---|---|---|---|
# |a == b|a equal to b|a != b|a not equal to b|
# |a < b|a less than b|a > b|a greater than b|
# |a <= b|a less than or equal to b|a >= b|a greater than or equal to b|
#
# ```python
# 5.0 == 5
# '3' == 3
# True == True
# True == False
# True == 1
# True == 0
# '🍎' == '🍏'
# ```
#
# 
# ▶ Run the following 👇🏾 cell block and identify why each expression either resolves true or false.
# + gather={"logged": 1634328816465} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print("5.0 == 5,", 5.0 == 5)
print("'5' == 5,", '5' == 5)
print("True == True,", True == True)
print("True == False,", True == False)
print("not True == False,", not True == False)
print("True == 1,", True == 1)
print("True == 0,", True == 0)
print("'🍎' == '🍏',", '🍎' == '🍏')
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Combining Boolean values
#
# You can combine boolean values by using `and`,`or`, and `not`
#
# ```python
# 5 > 1 and "🍎" == "🍌"
# ```
#
# Here `5 > 1` evaluates to true, but `"apple" == "banana` evaluates to false. This then results to false.
#
# 
# Run the following cell 👇🏾, and then see if you can make the result of both conditions evaluate to `True`.
# + gather={"logged": 1634328112352} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
5 > 1 and "🍎" == "🍌"
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Converting Data Types
#
# There may be situations you will need to convert a data type to another in order for the script to work.
# ```python
#
# int_to_float = float(10) ## Convert integer to floating number
# str_to_float = float("200.1243") ## Convert string to floating number
#
# print (int_to_float)
# print (str_to_float)
# ```
#
# 
# Can you try converting a number/string with a decimal to an integer? What happens?
# + gather={"logged": 1634328930669} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
str_to_float = float("200.1243")
print (str_to_float)
# + [markdown] nteract={"transient": {"deleting": false}}
# Here are some more data types to practice with
# + gather={"logged": 1634327770558} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## Converting Data Types
x = (5/2)
print("Type:", type(x), ", Output", (x))
## Convert to string
x = str(5/2)
print("Type:", type(x), ", Output", (x))
## Convert to int
x = int(5/2)
print("Type:", type(x), ", Output", (x))
## Convert from int to bool
x = bool(0)
print("Type:", type(x), ", Output", (x))
# + [markdown] nteract={"transient": {"deleting": false}}
# # Collections
#
# <center><b>There are four collection data types in Python</b></center><br>
#
# |Name|sort|indexed|mutable|duplicates|
# |---|---|---|---|---|
# |List|✅ ordered|✅ indexed|✅changeable|✅Allows duplicate members
# |Tuple|✅ ordered|✅ indexed|❌unchangeable|✅Allows duplicate members
# |Set|❌ unordered|❌Unindexed|✅changeable|❌No duplicate members
# |Dictionary|✅ ordered|✅ indexed|✅changeable|✅No duplicate keys, but duplicate values are ok
#
# There will be situations where a certain collection type makes more sense than another. Example is you want a set of distinct items with no duplicates `set`. Or you want a collection that you can grab the `age` or `name` of a person in a collection `dictionary`.
# + [markdown] nteract={"transient": {"deleting": false}}
# ## List
# |Name|description|sort|indexed|mutable|duplicates|
# |---|---|---|---|---|---|
# |List|ordered sequence of values|✅ ordered|✅ indexed|✅changeable|✅Allows duplicate members
#
# 
# - A list is a sequence, where each element is assigned a position (index)
# - First position is 0. You can access each position using
#
# Allright, first lets create a list
#
# 
# ```python
# list_name = ['value1','value2','value3','value4','value5']
# ```
#
# Notice how a list is enclosed in square brackets `[ ]`.
#
# 
# Run the cell below to create a list, which also will be referenced in the following cells. Any variables created in the session, could be referenced later as long as the kernel was not restarted.
# + gather={"logged": 1634328996269} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
my_list = [1,2,3,4,5,6,7,8,9,10]
my_list
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Access items from list
# You can access items in a list with the same `[ ]` square brackets, but inside you put the index location of what items you want returned.
#
#
# Lets print the third item in the list
# - Note: The very first position in a list is `0`, so the third item in the list is pulled by referencing `2`
#
# ```python
# print(myList[2])
# ```
#
# 
# 🏃🏾♀️ Run the following cell, but how would you print the last item on the list?
# + gather={"logged": 1634329009048} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print(my_list[2])
# + [markdown] nteract={"transient": {"deleting": false}}
# You are ✔ correct if you answered:
# ```python
# print(my_list[9])
# # 10
# ```
#
# But you can also print the last item on the list by using a `-1`
# ```python
# print(my_list[-1])
# # 10
# ```
#
# 
# Using what you know, how would you get the 2nd from the last item on the list
# + gather={"logged": 1634329028401}
print(my_list[-1])
# + [markdown] nteract={"transient": {"deleting": false}}
# 
# You would reference the index `-2` since any negative value starts from the right. `print(my_list[-2])`
# -
# ### Adding item to the list
#
# You can add item to the list using the `append()` method.
# + gather={"logged": 1634329190852}
my_list.append(1000);
print(my_list)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Modify a list
#
# You may want to modify values in a list
# ```python
# myList[2] = 5000 #changed the 3rd item in list from 3 to 'changed'
# ```
# + gather={"logged": 1634329201769} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
my_list[2] = 5000
print(my_list)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Removing item from list
#
# ```python
# del(my_list[1]) ## deletes the 2nd item (1) in the list since we start with 0 first which is the number 2
# ```
#
# 
# Remove the first two items on the list using a range `[<start>:<finish>]`
# + gather={"logged": 1633447341129} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
del(my_list[1])
print(my_list)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Merging Lists together
#
# 
# - When you want to merge two lists together, you can simply use `+`
# - Note: There are still duplicates when lists are merged.
#
# Lets create list x,y and another variable `myList` that is a combination of both.
# ```python
# new_list = [20, 21, 22, 23, 24];
# my_list = my_list + new_list
# ```
#
# 
# Can you add more items to the list or even make another list and merge all 3 together using what we learned?
# + gather={"logged": 1634329208702} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Concatenation
new_list = [20, 21, 22, 23, 24];
my_list = my_list + new_list
print(my_list)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Slicing and Dicing a list
# There may be situations where you don't want the first item in a list or only want the first 5. You will need to reference the `range` you want after the list variable.
#
# 
# ```python
# my_list[<start_here>:<end_here>]
# ```
#
# Lets slice (extract) the first and second item in the list
# ```python
# print(my_listt[0:2]) ## 0 identifies where to start (value is grabbed). The 2 indicates the location you want to stop. (the value at the end location isn't pulled).
# ```
# + gather={"logged": 1634329273002} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print("Full list:", my_list)
print("Spliced List:", my_list[0:2]) ## Slice the first and second item in the list
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Checking if list contains a value
#
# You can check if a list contains a item by using `in`
#
# 
# ```python
# 'search_word' in list
# ```
# <br>
#
# 
#
# 1. Modify the line and search for the number `10` in the list.
# 2. How would you an `and` conditional to check for two values
# + gather={"logged": 1634329308659}
1 in my_list
# -
# ### Counting
#
# Count how many items in a list by using `len(<list>)`
# ```python
# len(my_list)
# ```
#
# Count how many times an item is in the list `list_name.count("<what_to_search_for>")`
# ```python
# my_list.count(5)
# ```
#
# 
# Count how many times the number 5 shows up in the list.
# + gather={"logged": 1634329710277} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
how_many_items = len(my_list) ## Gets item count and assigns it to `how_many_items` variable
how_many_times = my_list.count(1) ## Counts how many times `1` shows up in `myList`
print(my_list)
print("The list has", how_many_items, "items.")
print("The number 1 shows up", how_many_times, "times.")
# -
# ### Getting the highest value in a list
# To get the highest number in the list you will use the `max()` function. You cannot calculate max if the values are not all numeric.
# + gather={"logged": 1634329715569} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
#Get the highest numeric year in the list
print(my_list)
print( max(my_list) )
# -
# ### Lists can have various data types
# Elements in the lists can be different data types, but be careful of performing any calculation with mixed data types.
# ```python
# mylist = ['string1', 1, "string2", True]
# ```
multitype_list = ['string1', 1, "string2", True]
print(multitype_list)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Sets
# |Name|sort|indexed|mutable|duplicates|
# |---|---|---|---|---|
# |Set|❌ unordered|❌Unindexed|✅changeable|❌No duplicate members
#
# Sets are similar to list but is an unordered collection data type that is iterable, mutable and has no duplicate elements.
# - Unordered and sets cannot change once the set has been created.
# + [markdown] nteract={"transient": {"deleting": false}}
# Lets create a set from the list and notice duplicates are removed
# ```python
# basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
# #> {'orange', 'banana', 'pear', 'apple'}
# ```
#
# 
# Modify the line and add another fruit to the basket.
# + gather={"logged": 1634329825416} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
print(basket)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Create set from string
# ```python
# a = set('abracadabra') ## Notice duplicates are removed.
# #> {'a', 'r', 'b', 'c', 'd'}
# ```
#
# Run the following cell and do you understand whats happening ? Why is the list shorter 🤔?
# + gather={"logged": 1634329981497} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a = set('abracadabra')
print(a)
fruits = set( {'🍎','🍏','🍏','🍊','🍌','🍏','🌮','🍉','🍌'} )
print(b)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Add item to set
# ```python
# a.add('z')
# #> {'a', 'c', 'r', 'b', 'z', 'd'}
# ```
# + gather={"logged": 1634330089711} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a.add('z')
print(a)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Making a set immutable (Cant be modified)
# If you ever want to declare a list and ensure the list is not modified, you can freeze the list using `frozenset(<set_name>)`
#
# ```python
# b = frozenset('asdfagsa')
# #> frozenset({'f', 'g', 'd', 'a', 's'})
# ```
#
# 
# Notice we add `Dallas` to the set `cities`. Uncomment `#` the commented line `#frozen_cities.add("Tampa")` and run the cell again. What happens?
# +
## Create a cities set and adds Dallas to the set
cities = {"Houston", "New York","San Diego"}
cities.add("Dallas")
print(cities)
frozen_cities = frozenset(cities)
#frozen_cities.add("Tampa")
print(frozen_cities)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Dictionary
# |Name|sort|indexed|mutable|duplicates|
# |---|---|---|---|---|
# |Dictionary|❌ unordered|✅ indexed|✅changeable|✅No duplicate members
#
# Dictionary consists of key-value pairs `{<key> : <value>}`. It is enclosed by curly braces {} and values can be assigned and accessed
# using square brackets `[]`.
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Create a dictionary
#
# Lets create a dictionary object with a name and age key
# ```python
# person={
# 'name' : 'bob',
# 'gender' : 'male',
# 'age' : 30
# }
# ```
# + gather={"logged": 1634330143091} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
person={
'name' : 'bob',
'gender' : 'male',
'age' : 30
}
print(person)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Getting a property
#
# Lets grab the `age` property from this dictionary object by reference the right `key` which in this case its the string `"age"` enclosed in double quotes .
# ```python
# print(person['age'])
# ```
#
# + gather={"logged": 1633447842051} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print(person['age'])
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Get list of keys/values
#
# To get the list of keys or values only, you would specify the method
#
# Get list of keys
# ```python
# print( person.keys() )
# # output: dict_keys(['name', 'gender', 'age'])
# ```
#
# Get list of values
# ```python
# print( person.values() )
# # output: dict_values(['bob', 'male', 30])
# ```
# + gather={"logged": 1633448011300} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
print(person.keys())
print(person.values())
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Adding to dictionary
#
# To add to the list, you simply just reference an unused `key name` in the dictionary object
# ```python
# person['location'] = 'US'
# # {'name': 'bob', 'gender': 'male', 'age': 30, 'location': 'US'}
# ```
#
# 
# Try adding another item to the dictionary object.
# + gather={"logged": 1633448116142} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
person['location'] = 'US' ## This adds a location with the value US to the dictionary object
print(person)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Tuple
# |Name|sort|indexed|mutable|duplicates|
# |---|---|---|---|---|
# |Tuple|✅ ordered|✅ indexed|❌unchangeable|✅Allows duplicate members
#
# Tuples are like `lists` except cannot be changed
#
# ```python
# tuple = (123,'hello')
# print(tuple) #will output whole tuple. (123,'hello')
# print(tuple[0]) #will output first value. (123)
# ```
# + gather={"logged": 1634330171271} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
tuple = (123,'hello')
print(tuple)
print(tuple[0])
print(tuple[1])
# + [markdown] nteract={"transient": {"deleting": false}}
# <br>After running the previous cell, try updating the tuple by running the cell below
# ```python
# tuple[1] = "update"
# ```
#
# 
# What happens when you run the cell and do you know why it happened based on how `tuples` work?
# + jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
tuple[1] = "update"
# + [markdown] nteract={"transient": {"deleting": false}}
# # Block Indentation
# Python uses indentation to define and control function, loops, and conditions.
# This makes Python easy to read, but the user must play close attention to the whitespace (space/tabs) usage. Always use 4 spaces for indentation and most code editors are able to convert tabs to spaces to reduce any error.
#
# <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyYAAAFQCAIAAADFsf2QAACi0klEQVR4Aez9B5hVRbaHD/vk/OXv80tzvckbnBlnrne8M3Ov1xlHZBBFFBUDoyJGGMIoDkkk02RoMk3OOeccBIEmSmxyzmlQGQxjVf3fYzXrOb3PPofTex86sX5PPTx96tSuvXft3dTba61adc/nP8jdMalUKpVKpVKpYiKXSqVSqVQqlUqRS6VSqVQqlUqRS6VSqVQqlUqRS6VSqVQqlUqlyKVSqVQqlUqlyKU<KEY>//<KEY>ZLI<KEY>" width="300"/>
#
#
# 
# Do not mix tab and spaces. Stay consistent. Indentation Errors will occur or cause the program to do something unexpected
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Functions
# A function is a block of code which only runs when it is called. This is helpful to run repeatable blocks of code you plan on using often.
#
# 
# - You can pass data, known as parameters, into a function.
# - A function can return data as a result.
#
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Creating a functions
# Declare the function name with
# Syntax
# ```python
# def <function_name>()
# ```
# Then any lines you want to belong to the function is associated by using 4 empty spaces
#
# 
# ```python
# def ping(): # Declare the function with `def <name>():`
# print("pong")
# ```
# + gather={"logged": 1634330313560} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def ping():
print("pong 🏐")
# -
# ### Call a function
# Call your custom function you declared by referencing the name of the function `ping()`
# + gather={"logged": 1634330316599}
ping()
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Passing information to functions
# Information can be passed into functions as arguments.
# ```python
# def greeting(name): ## we declare the parameter `name` as a variable that will be used in the nested code
# print("Hello",name) ## we print Hello with the value of the name provided
#
# greeting("John") ## we call the function greeting and pass the argument `John` as the name
# ```
#
# 
# How would you pass another value to the greeting function and print it out on the response?
# - Example: Print a response like, "Hello `<name>`, You are `<age>` years old"
# + gather={"logged": 1634330363368} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def greeting(name):
print("Hello", name)
greeting("John")
# + [markdown] nteract={"transient": {"deleting": false}}
# You can also pass multiple arguments by separating each value with a comma
# ```python
# def greeting(name, age):
# print("Hello", name, "You are", age, "years old.")
#
# greeting("John", 30)
# ```
#
# 
# Can you pass third argument and print it in the output when the function is called?
# + gather={"logged": 1633380946549} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def greeting(name, age):
print("Hello", name, "You are", age, "years old.")
greeting("John", 30)
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Commenting in functions
# Docstrings are not like regular comments. They are stored in the function documentation. You can then use `help()` to get some helpful guidance on the particular function
#
# Comment your function
# ```python
# def ping():
# """ This function replies pong. This is custom documentation you can build to describe the function """
# return "pong"
#
# ping()
# ```
#
# Get help details on function
# ```python
# help(ping)
# ```
#
# + gather={"logged": 1633363079900} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def ping():
""" This function replies pong. This is custom documentation you can build to describe the function """
return "pong"
ping()
help(ping)
# -
# ### Multi-level indentation and nesting
#
# You can nest conditions, loops, and functions within each other.
# If a function is not indented to the same level it will not be considers as part of the parent class
#
# ```python
# def separateFunction(provided_list):
# #Loops are also indented and nested conditions start a new indentation
#
# for i in provided_list: ## loop through each item in the list
#
# if i == 1: ## We will check each `item` in the list for the value of `1`.
# return True
# return False
#
# separateFunction([2,3,5,6,1])
# ```
#
# This will call the function `separateFunction()` and provide the list ``` [2,3,5,6,1]``` as an input argument for the function `separateFunction()`
#
# 
# Try searching for another `item` in the array, or add an additional argument in `separateFunction()` to search against that argument instead of the static value `1`.
# + gather={"logged": 1631911040290} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def separateFunction(provided_list):
## loop through each item in
for item in provided_list:
## We will check each `item` in the list for the value of `1`.
if item == 1:
return True
return False
separateFunction([2,3,5,6,1])
# + [markdown] nteract={"transient": {"deleting": false}}
# Passing values into a function
# -
# ### Default values in a function
#
# You can provide default values if you want to assume certain values. Then if input `IS` provided, it overrides the static default value.
#
# First lets create another function that has a `default value` for the greeting. We also will call `help()` function to get the documentation of the `greet()` function.
# + gather={"logged": 1634330462945} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
def greet(name="anonymous", greeting="Hello"):
"""
Print a greeting to the user `name` Optional parameter `greeting` can change what they're greeted with.
Example: greet("John","Sup")
"""
print("{} {}".format(greeting, name))
help(greet)
# -
# Lets call the function first by providing no input or arguments
#
# ```python
# greet()
# ```
# + gather={"logged": 1634330467905}
greet()
# -
# This time lets call the function `greet()` but also provide the arguments `name='john'` and `greeting='sup'`
#
# We can pass arguments based on the position in the list
# ```python
# greet("john", "sup")
# # output: sup john
# ```
#
# Or we can directly specify what value is for what argument (which now doesn't need to be in the proper list position)
# ```python
# greet(greet="sup", name="bob")
# # output: sup bob
# ```
#
# 
# Try passing different arguments in different positions. Or modify the function and add another argument that will be used in the response
# + gather={"logged": 1633374319230} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## Call the greet function and pass a name
greet(greeting="sup", name="bob")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Conditional statements
# + [markdown] nteract={"transient": {"deleting": false}}
# if/else/elif Example
# ```python
# if conditionA:
# statementA
# elif conditionB:
# statementB
# else:
# statementD
#
# this line will always be executed (after the if/else)
# ```
# + [markdown] nteract={"transient": {"deleting": false}}
# Exercise: Run the cell below `shift-enter`, How would you modify the if statement to evaluate to false?
# -
# ### Creating a conditional statement
# You will nest lines that you want included in each condition evaluated
#
# 
# Run the following command and see if you can make the expression evaluate false?
# + gather={"logged": 1634330597569} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a = 5; ## Initializes variables a,b with values 2,4
b = 2;
#If block starts here
if a > b: # If the value of a > b, nested indented lines will run
print("✅", a)
else: # if above if statement evaluates to false, the following nested indented lines will run.
print("❌", b)
b = 2;
# + [markdown] nteract={"transient": {"deleting": false}}
# ### One-liner if statement (no indentation)
#
# 
# Most style guides and linters (error checking) discourage using this as its not the best practice 👎. Try using lines and spaces as its easier to read for everyone else 💓
#
# You can write the if statement to one line if there isn't a need for nesting multiple lines within the statement.
#
# ```python
# a = 5; ## initializes variables a,b with values 2,4
# b = 2;
#
# if a > b : print(True, ",", a) ## Evaluate if a > b, If its true run this line, else run the else statement.
# else: print(False, ",", b)
# ```
# + gather={"logged": 1633380445511} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
a = 5;
b = 2;
if a > b : print(True, ",", a)
else: print(False, ",", b)
# -
# ### 👍 Even shorter if statement for default values.
#
# You don't have to write a whole `conditional statement` if all you want is to check if a value exist or use a `default value` if the condition evaluates to `false`
#
# ```python
# a = 5
# b = 3
# result = "A is bigger than B" if (a>b) else "A is less than B"
#
# print(result)
# ```
#
# The value of `result` is `"A is bigger than B"` only if the expression in the following if statement evaluates to `True`, otherwise the value following `else` is used.
#
# 
# Try writing another statement that maybe evaluates a string or collection.
# + gather={"logged": 1634330860589}
a = 5
b = 3
result = "A is bigger than B" if (a>b) else "A is less than B"
print(result)
# -
# ### Using conditional statement to validate an input provided
#
# Example of searching a `list` based on the `input` provided
# 
# ```python
# mylist = [1,2,3,4,5]
#
# userInput = int(input("Enter a number from 1-10:"))
# if userInput in mylist:
# print(userInput, "is in the list `mylist`")
# ```
#
# When this cell is ran, the `userInput` variable will call the `input()` function and request input from the user. After the input is provided, the cell will take the input value and check if it it matches any item in the list `my_list`.
# + gather={"logged": 1634330888235} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
my_list = [1,2,3,4,5]
userInput = int(input("Enter a number from 1-10:"))
if userInput in my_list:
print(userInput, "is in the list `mylist`")
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Loops
# Loops are simply computer instruction that will keep repeating until a specified condition is reached.
# + [markdown] nteract={"transient": {"deleting": false}}
# ### For Loops
# - Used for interating over a sequence (list, tuple, dictionary, set) and runs a set of statements, once for each item in the list. The loop will end when it reaches the last item on the list its iterating.
#
# #### Simple For Loop
# ```python
# print("== For Loops ==")
# for x in range(0, 3): ## Iterate from 0 to 3
# print("Let's go %d" % (x)) ## print `Let's go` and the populates the value of the variable `x`
# ```
#
# We will loop starting at `0` and end at `3`. On each iteration, we will print the current value of `x`
#
# 
# How would you make the for loop iterate 10 times instead of 3?
# + gather={"logged": 1634331086566} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## For Loops
print("== For Loops ==")
for x in range(0, 3):
print(f"Let's go {x}")
# + [markdown] nteract={"transient": {"deleting": false}}
# You can process any iterable (string, list, dict, etc) with a for loop
# ```python
# my_list = [5,10,15,20,25]
# for item in my_list:
# print(f"{item} * 10 = {item * 10}")
# ```
#
# 
#
# 1. First we create the list of 5 values `[5,10,15,20,25]`
# 2. then we will use the `for` loop to iterate through each item in the list and print the string for each item.
# + gather={"logged": 1634701337837} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
my_list = [5,10,15,20,25]
for item in my_list:
print(f"{item} * 10 = {item * 10}")
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Nested for Loop
# ```python
# print("\n== Nested For Loops ==")
# for x in range(0, 3):
# for y in range(0,2):
# print("Let's go %d %d" % (x,y))
# ```
#
# 
# Can you nest another `condition or loop` nested inside this `for loop` and output the values?
# + gather={"logged": 1634331190170} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## Nested for loop
print("\n== Nested For Loops ==")
for x in range(0, 3):
for y in range(0,2):
print(f"Let's go {x} {y}")
# -
# #### Using For loop to iterate through array of dictionary objects
#
# ```python
# states = [
# {"name" : "Alabama", "capital" : "Montgomery", "country": "United States"},
# {"name" : "Colorado", "capital" : "Denver", "country": "United States"},
# {"name" : "Texas", "capital" : "Austin", "country": "United States"},
# {"name" : "New Mexico", "capital" : "Sante Fe", "country": "United States"},
# ]
#
# for state in states:
# print(f"{state['name']} is located in the {state['country']} and it's capital is {state['capital']}")
# ```
#
# 
#
# 1. First we initialize a `list(array)` of dictionary objects with key value pairs `name, capital, country`
# 2. Then we use a `for loop` to iterate through each state and reference the `keys` within each dictionary item being iterated to retrieve the values.
#
# 
# Can you add another `key` to each dictionary object and print that key value when its been iterated in the `for` loop?
# + gather={"logged": 1634702009335}
## Sort list by key name
states = [
{"name" : "Alabama", "capital" : "Montgomery", "country": "United States"},
{"name" : "Colorado", "capital" : "Denver", "country": "United States"},
{"name" : "Texas", "capital" : "Austin", "country": "United States"},
{"name" : "New Mexico", "capital" : "Sante Fe", "country": "United States"},
]
for state in states:
print(f"{state['name']} is located in the {state['country']} and it's capital is {state['capital']}")
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Iterating through dictionary key/values
#
# Let's use the same list, but iterate it a different way. Lets iterate each state and print the `key/value pair` for each item.
# ```python
# for state in states:
# for key, value in state.items():
# print(f"State:{state['name']}, Key:{key}, value:{value}")
# ```
#
# 
#
# 1. We first will iterate through each `state` in the `states` list.
# 2. We then grab each `key and value` from `state.items()`
# 3. We reference directly the `key and value` variables instead of calling `state[<key>]`
# + gather={"logged": 1634703058805} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
for state in states:
for key, value in state.items():
print(f"State:{state['name']}, Key:{key}, value:{value}")
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Using For loop to enumerate through dictionary keys
#
# Or maybe you want the index location of each key in the list. You can enumerate the dictionary
# ```python
# ## enumerate through states
# for state in states:
# for index, key in enumerate(state): ## use the enummerate function which will convert the dictionary object a list with an numeric index location you can reference.
# print(f"State:{state['name']}, index:{index}, key:{key}, value:{state[key]}")
# ```
#
# 
#
# 1. We first will iterate through each `state` in the `states` list.
# 2. We iterate through each state then grab the `index and key` from `enumerate(state)`
# 3. We now can reference the index location of the `keys` and use the `key` to reference properties in the `state` variable.
# + gather={"logged": 1634703554340} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## enumerate through states
for state in states:
for index, key in enumerate(state): ## use the enummerate function which will convert the dictionary object a list with an numeric index location you can reference.
print(f"State:{state['name']}, index:{index}, key:{key}, value:{state[key]}")
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Using Loop to modify list values
#
# You can use a loop to iterate through a list and modify values or execute lines for each item being looped.
#
# ```python
# from datetime import datetime ## Imports the datetime module needed to get current date.
# years = [1955,1987,1978,2019,1967,1955] ## defines the array of years to loop through
#
# for year in years:
# currentDate = datetime.today().year ## grabs the current date
# age = currentDate - year ## subtracts the value of x(year) from the current date
# print(f"date:{currentDate}, year:{year}, You are {age} years old." ) ## replaces all placeholders in curly braces with variable
# ```
#
# 
# Import the datetime module and create the `years` array
#
# 1. Import the datetime module to be used in the years array.
# 2. `years` array is created that will used as the input to the `for` loop
# 2. This example will iterate through each `year` in the years list `[1955,1987,1978,2019,1967,1955] `.
# 1. Inside this loop we first will calculate the `currentDate` variable by calling the imported `datetime` function and extracting the `year`.
# 2. We can then calculate the age by subtracting the `currentDate` for each `year` being looped.
# 3. Then we will print a line that outputs these variables being iterated.
# 3. The last command to run is `printing` out the loop has completed.
#
# + gather={"logged": 1634331446201} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## This type of string formatting will require Python >= 3.6
from datetime import datetime ## Imports the datetime module needed to get current date.
years = [1955,1987,1978,2019,1967,1955] ## defines the array of years to loop through
## This is the loop that will iterate through each year in the years array.
print ("--For loop start--")
for year in years:
currentDate = datetime.today().year ## grabs the current date
age = currentDate - year ## subtracts the value of x(year) from the current date
print(f"date:{currentDate}, year:{year}, You are {age} years old." ) ## replaces all placeholders in curly braces with variable
print(years) ## This statement is printed outside of the while loop
# + [markdown] nteract={"transient": {"deleting": false}}
# ### While Loop
# Most of the time you will be using for loops. This loop will continue to execute the nested statements (indented) in the while loop until a specified condition is true
#
# Example
# ```python
# i = 0 # Initialization
# print("While Loop starting")
# while (i < 10): # Condition
# print(f"{i} is less than 10. Looping") # do_something
# i = i + 1 # Why do we need this?
#
# print("While Loop has ended") ## Last line to run. notice its not nested in the loop so it runs last.
# ```
#
# 
# This `while loop` example will run the lines nested in the while loop until `i` reaches a value that is greater than `10`
# + gather={"logged": 1633114658670} jupyter={"outputs_hidden": false} nteract={"transient": {"deleting": false}}
## While Loop
i = 0 # Initialization
print("While Loop starting")
while (i < 10): # Condition
print(f"{i} is less than 10. Looping") # do_something
i = i + 1 # Why do we need this?
print("While Loop has ended") ## Last line to run. notice its not nested in the loop so it runs last.
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Finished
#
# You are done 🎈🏁🐱👓 with part 1 of this Python Crash Course and you are well on your way to knowing Python.
#
# Have some cake to celebrate 🍰. If you are still thirsty🧉 for more, you should now be able to step into the other Notebooks 📓 and it may actually make sense!
#
# Remember to check back as the 2nd part of the series will focus on building on top of this knowledge by learning how to manipulate data using:
#
# 
#
# We will use this knowledge to manipulate and restructure data in order to render charts and visualize output for investigation or sending it.
#
# Keep learning! ❤
| A Python Crash Course - Part 1 - Fundamentals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Chapter 19 – Training and Deploying TensorFlow Models at Scale**
# _This notebook contains all the sample code in chapter 19._
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# </td>
# <td>
# <a target="_blank" href="https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" /></a>
# </td>
# </table>
# # Setup
# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
#
# +
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Is this notebook running on Colab or Kaggle?
IS_COLAB = "google.colab" in sys.modules
IS_KAGGLE = "kaggle_secrets" in sys.modules
if IS_COLAB or IS_KAGGLE:
# !echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" > /etc/apt/sources.list.d/tensorflow-serving.list
# !curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
# !apt update && apt-get install -y tensorflow-model-server
# %pip install -q -U tensorflow-serving-api
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
if IS_KAGGLE:
print("Go to Settings > Accelerator and select GPU.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deploy"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# -
# # Deploying TensorFlow models to TensorFlow Serving (TFS)
# We will use the REST API or the gRPC API.
# ## Save/Load a `SavedModel`
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.
X_test = X_test[..., np.newaxis].astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
# +
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
# -
np.round(model.predict(X_new), 2)
model_version = "0001"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
# +
import shutil
shutil.rmtree(model_name)
# -
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
# !saved_model_cli show --dir {model_path}
# !saved_model_cli show --dir {model_path} --tag_set serve
# !saved_model_cli show --dir {model_path} --tag_set serve \
# --signature_def serving_default
# !saved_model_cli show --dir {model_path} --all
# Let's write the new instances to a `npy` file so we can pass them easily to our model:
np.save("my_mnist_tests.npy", X_new)
input_name = model.input_names[0]
input_name
# And now let's use `saved_model_cli` to make predictions for the instances we just saved:
# !saved_model_cli run --dir {model_path} --tag_set serve \
# --signature_def serving_default \
# --inputs {input_name}=my_mnist_tests.npy
np.round([[1.1347984e-04, 1.5187356e-07, 9.7032893e-04, 2.7640699e-03, 3.7826971e-06,
7.6876910e-05, 3.9140293e-08, 9.9559116e-01, 5.3502394e-05, 4.2665208e-04],
[8.2443521e-04, 3.5493889e-05, 9.8826385e-01, 7.0466995e-03, 1.2957400e-07,
2.3389691e-04, 2.5639210e-03, 9.5886099e-10, 1.0314899e-03, 8.7952529e-08],
[4.4693781e-05, 9.7028232e-01, 9.0526715e-03, 2.2641101e-03, 4.8766597e-04,
2.8800720e-03, 2.2714981e-03, 8.3753867e-03, 4.0439744e-03, 2.9759688e-04]], 2)
# ## TensorFlow Serving
# Install [Docker](https://docs.docker.com/install/) if you don't have it already. Then run:
#
# ```bash
# docker pull tensorflow/serving
#
# export ML_PATH=$HOME/ml # or wherever this project is
# docker run -it --rm -p 8500:8500 -p 8501:8501 \
# -v "$ML_PATH/my_mnist_model:/models/my_mnist_model" \
# -e MODEL_NAME=my_mnist_model \
# tensorflow/serving
# ```
# Once you are finished using it, press Ctrl-C to shut down the server.
# Alternatively, if `tensorflow_model_server` is installed (e.g., if you are running this notebook in Colab), then the following 3 cells will start the server:
os.environ["MODEL_DIR"] = os.path.split(os.path.abspath(model_path))[0]
# + magic_args="--bg" language="bash"
# nohup tensorflow_model_server \
# --rest_api_port=8501 \
# --model_name=my_mnist_model \
# --model_base_path="${MODEL_DIR}" >server.log 2>&1
# -
# !tail server.log
# +
import json
input_data_json = json.dumps({
"signature_name": "serving_default",
"instances": X_new.tolist(),
})
# -
repr(input_data_json)[:1500] + "..."
# Now let's use TensorFlow Serving's REST API to make predictions:
# +
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status() # raise an exception in case of error
response = response.json()
# -
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
# ### Using the gRPC API
# +
from tensorflow_serving.apis.predict_pb2 import PredictRequest
request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))
# +
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)
# -
response
# Convert the response to a tensor:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)
# Or to a NumPy array if your client does not include the TensorFlow library:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
shape = [dim.size for dim in outputs_proto.tensor_shape.dim]
y_proba = np.array(outputs_proto.float_val).reshape(shape)
y_proba.round(2)
# ## Deploying a new model version
# +
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
# -
model_version = "0002"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
# **Warning**: You may need to wait a minute before the new model is loaded by TensorFlow Serving.
# +
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status()
response = response.json()
# -
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
# # Deploy the model to Google Cloud AI Platform
# Follow the instructions in the book to deploy the model to Google Cloud AI Platform, download the service account's private key and save it to the `my_service_account_private_key.json` in the project directory. Also, update the `project_id`:
project_id = "onyx-smoke-242003"
# +
import googleapiclient.discovery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "my_service_account_private_key.json"
model_id = "my_mnist_model"
model_path = "projects/{}/models/{}".format(project_id, model_id)
model_path += "/versions/v0001/" # if you want to run a specific version
ml_resource = googleapiclient.discovery.build("ml", "v1").projects()
# -
def predict(X):
input_data_json = {"signature_name": "serving_default",
"instances": X.tolist()}
request = ml_resource.predict(name=model_path, body=input_data_json)
response = request.execute()
if "error" in response:
raise RuntimeError(response["error"])
return np.array([pred[output_name] for pred in response["predictions"]])
Y_probas = predict(X_new)
np.round(Y_probas, 2)
# # Using GPUs
# **Note**: `tf.test.is_gpu_available()` is deprecated. Instead, please use `tf.config.list_physical_devices('GPU')`.
#tf.test.is_gpu_available() # deprecated
tf.config.list_physical_devices('GPU')
tf.test.gpu_device_name()
tf.test.is_built_with_cuda()
# +
from tensorflow.python.client.device_lib import list_local_devices
devices = list_local_devices()
devices
# -
# # Distributed Training
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
batch_size = 100
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
distribution = tf.distribute.MirroredStrategy()
# Change the default all-reduce algorithm:
#distribution = tf.distribute.MirroredStrategy(
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# Specify the list of GPUs to use:
#distribution = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
# Use the central storage strategy instead:
#distribution = tf.distribute.experimental.CentralStorageStrategy()
#if IS_COLAB and "COLAB_TPU_ADDR" in os.environ:
# tpu_address = "grpc://" + os.environ["COLAB_TPU_ADDR"]
#else:
# tpu_address = ""
#resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu_address)
#tf.config.experimental_connect_to_cluster(resolver)
#tf.tpu.experimental.initialize_tpu_system(resolver)
#distribution = tf.distribute.experimental.TPUStrategy(resolver)
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
metrics=["accuracy"])
# -
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
model.predict(X_new)
# Custom training loop:
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
distribution = tf.distribute.MirroredStrategy()
with distribution.scope():
model = create_model()
optimizer = keras.optimizers.SGD()
with distribution.scope():
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)
input_iterator = distribution.make_dataset_iterator(dataset)
@tf.function
def train_step():
def step_fn(inputs):
X, y = inputs
with tf.GradientTape() as tape:
Y_proba = model(X)
loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.experimental_run(step_fn, input_iterator)
mean_loss = distribution.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
return mean_loss
n_epochs = 10
with distribution.scope():
input_iterator.initialize()
for epoch in range(n_epochs):
print("Epoch {}/{}".format(epoch + 1, n_epochs))
for iteration in range(len(X_train) // batch_size):
print("\rLoss: {:.3f}".format(train_step().numpy()), end="")
print()
# -
# ## Training across multiple servers
# A TensorFlow cluster is a group of TensorFlow processes running in parallel, usually on different machines, and talking to each other to complete some work, for example training or executing a neural network. Each TF process in the cluster is called a "task" (or a "TF server"). It has an IP address, a port, and a type (also called its role or its job). The type can be `"worker"`, `"chief"`, `"ps"` (parameter server) or `"evaluator"`:
# * Each **worker** performs computations, usually on a machine with one or more GPUs.
# * The **chief** performs computations as well, but it also handles extra work such as writing TensorBoard logs or saving checkpoints. There is a single chief in a cluster, typically the first worker (i.e., worker #0).
# * A **parameter server** (ps) only keeps track of variable values, it is usually on a CPU-only machine.
# * The **evaluator** obviously takes care of evaluation. There is usually a single evaluator in a cluster.
#
# The set of tasks that share the same type is often called a "job". For example, the "worker" job is the set of all workers.
#
# To start a TensorFlow cluster, you must first define it. This means specifying all the tasks (IP address, TCP port, and type). For example, the following cluster specification defines a cluster with 3 tasks (2 workers and 1 parameter server). It's a dictionary with one key per job, and the values are lists of task addresses:
cluster_spec = {
"worker": [
"machine-a.example.com:2222", # /job:worker/task:0
"machine-b.example.com:2222" # /job:worker/task:1
],
"ps": ["machine-c.example.com:2222"] # /job:ps/task:0
}
# Every task in the cluster may communicate with every other task in the server, so make sure to configure your firewall to authorize all communications between these machines on these ports (it's usually simpler if you use the same port on every machine).
#
# When a task is started, it needs to be told which one it is: its type and index (the task index is also called the task id). A common way to specify everything at once (both the cluster spec and the current task's type and id) is to set the `TF_CONFIG` environment variable before starting the program. It must be a JSON-encoded dictionary containing a cluster specification (under the `"cluster"` key), and the type and index of the task to start (under the `"task"` key). For example, the following `TF_CONFIG` environment variable defines the same cluster as above, with 2 workers and 1 parameter server, and specifies that the task to start is worker #1:
# +
import os
import json
os.environ["TF_CONFIG"] = json.dumps({
"cluster": cluster_spec,
"task": {"type": "worker", "index": 1}
})
os.environ["TF_CONFIG"]
# -
# Some platforms (e.g., Google Cloud ML Engine) automatically set this environment variable for you.
# TensorFlow's `TFConfigClusterResolver` class reads the cluster configuration from this environment variable:
# +
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
resolver.cluster_spec()
# -
resolver.task_type
resolver.task_id
# Now let's run a simpler cluster with just two worker tasks, both running on the local machine. We will use the `MultiWorkerMirroredStrategy` to train a model across these two tasks.
#
# The first step is to write the training code. As this code will be used to run both workers, each in its own process, we write this code to a separate Python file, `my_mnist_multiworker_task.py`. The code is relatively straightforward, but there are a couple important things to note:
# * We create the `MultiWorkerMirroredStrategy` before doing anything else with TensorFlow.
# * Only one of the workers will take care of logging to TensorBoard and saving checkpoints. As mentioned earlier, this worker is called the *chief*, and by convention it is usually worker #0.
# +
# %%writefile my_mnist_multiworker_task.py
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import time
# At the beginning of the program
distribution = tf.distribute.MultiWorkerMirroredStrategy()
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
print("Starting task {}{}".format(resolver.task_type, resolver.task_id))
# Only worker #0 will write checkpoints and log to TensorBoard
if resolver.task_id == 0:
root_logdir = os.path.join(os.curdir, "my_mnist_multiworker_logs")
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
run_dir = os.path.join(root_logdir, run_id)
callbacks = [
keras.callbacks.TensorBoard(run_dir),
keras.callbacks.ModelCheckpoint("my_mnist_multiworker_model.h5",
save_best_only=True),
]
else:
callbacks = []
# Load and prepare the MNIST dataset
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis] / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
with distribution.scope():
model = keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, validation_data=(X_valid, y_valid),
epochs=10, callbacks=callbacks)
# -
# In a real world application, there would typically be a single worker per machine, but in this example we're running both workers on the same machine, so they will both try to use all the available GPU RAM (if this machine has a GPU), and this will likely lead to an Out-Of-Memory (OOM) error. To avoid this, we could use the `CUDA_VISIBLE_DEVICES` environment variable to assign a different GPU to each worker. Alternatively, we can simply disable GPU support, like this:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# We are now ready to start both workers, each in its own process, using Python's `subprocess` module. Before we start each process, we need to set the `TF_CONFIG` environment variable appropriately, changing only the task index:
# +
import subprocess
cluster_spec = {"worker": ["127.0.0.1:9901", "127.0.0.1:9902"]}
for index, worker_address in enumerate(cluster_spec["worker"]):
os.environ["TF_CONFIG"] = json.dumps({
"cluster": cluster_spec,
"task": {"type": "worker", "index": index}
})
subprocess.Popen("python my_mnist_multiworker_task.py", shell=True)
# -
# That's it! Our TensorFlow cluster is now running, but we can't see it in this notebook because it's running in separate processes (but if you are running this notebook in Jupyter, you can see the worker logs in Jupyter's server logs).
#
# Since the chief (worker #0) is writing to TensorBoard, we use TensorBoard to view the training progress. Run the following cell, then click on the settings button (i.e., the gear icon) in the TensorBoard interface and check the "Reload data" box to make TensorBoard automatically refresh every 30s. Once the first epoch of training is finished (which may take a few minutes), and once TensorBoard refreshes, the SCALARS tab will appear. Click on this tab to view the progress of the model's training and validation accuracy.
# %load_ext tensorboard
# %tensorboard --logdir=./my_mnist_multiworker_logs --port=6006
# That's it! Once training is over, the best checkpoint of the model will be available in the `my_mnist_multiworker_model.h5` file. You can load it using `keras.models.load_model()` and use it for predictions, as usual:
# +
from tensorflow import keras
model = keras.models.load_model("my_mnist_multiworker_model.h5")
Y_pred = model.predict(X_new)
np.argmax(Y_pred, axis=-1)
# -
# And that's all for today! Hope you found this useful. 😊
# # Exercise Solutions
# ## 1. to 8.
#
# See Appendix A.
# ## 9.
# _Exercise: Train a model (any model you like) and deploy it to TF Serving or Google Cloud AI Platform. Write the client code to query it using the REST API or the gRPC API. Update the model and deploy the new version. Your client code will now query the new version. Roll back to the first version._
# Please follow the steps in the <a href="#Deploying-TensorFlow-models-to-TensorFlow-Serving-(TFS)">Deploying TensorFlow models to TensorFlow Serving</a> section above.
# # 10.
# _Exercise: Train any model across multiple GPUs on the same machine using the `MirroredStrategy` (if you do not have access to GPUs, you can use Colaboratory with a GPU Runtime and create two virtual GPUs). Train the model again using the `CentralStorageStrategy `and compare the training time._
# Please follow the steps in the [Distributed Training](#Distributed-Training) section above.
# # 11.
# _Exercise: Train a small model on Google Cloud AI Platform, using black box hyperparameter tuning._
# Please follow the instructions on pages 716-717 of the book. You can also read [this documentation page](https://cloud.google.com/ai-platform/training/docs/hyperparameter-tuning-overview) and go through the example in this nice [blog post](https://towardsdatascience.com/how-to-do-bayesian-hyper-parameter-tuning-on-a-blackbox-model-882009552c6d) by <NAME>.
| 19_training_and_deploying_at_scale.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Ruby 3.0.0
# language: ruby
# name: ruby
# ---
# ## Inspect the Portfolio
# +
require_relative "../lib/iruby_init"
# -
# Inspect the paper trading accounts
puts G.advisor.to_human
puts "-----------------"
puts G.clients.to_human
# There are 5 active accounts.
#
# Most interesting: What is the Net-Liqudation-Value
#
# * List all possible AccountValue-keys
# +
G.get_account_data
G.clients.last.account_data_scan( /NetLiq/ ).key # => account_data_scan( /NetLiquidation$/ ).value }
# -
# We are interested in :NetLiquidation only
net_liquidation = G.clients.last.account_data_scan( /NetLiquidation$/ ).first
puts net_liquidation.value.to_f
# Next: calculate the margin utilisation
possible_margin_keys = G.clients.last.account_data_scan( /Margin/ ).key
# +
margin = G.clients.last.account_data_scan( /FullInitMargin/ ).value.to_f.max
puts " Margin: #{margin} \n Net Liquidation: #{net_liquidation.value.to_f} #{net_liquidation.currency}"
puts " Margin Utilisation = #{(net_liquidation.value.to_f / margin).round(2) } %"
# -
# Next: List all Positions
puts G.clients.map{|c| [c.account, c.portfolio_values.as_table]}
| account/inspect_portfolio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Protik Real Estate ~ Price Predictor
import pandas as pd
housing= pd.read_csv("data.csv")
housing.head()
housing.info()
housing['CHAS'].value_counts()
housing.describe()
# %matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
# # Train-Test splitting
# for learing only
import numpy as np
def split_train_test(data, test_ratio):
shuffled= np.random.permutation(len(data))
test_set_size=int(len(data)* test_ratio)
test_indices= shuffled[:test_set_size]
train_indices= shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set= split_train_test(housing, 0.2)
print(f"Rows in train set : {len(train_set)} \nRows in test set : {len(test_set)}")
from sklearn.model_selection import train_test_split
train_set,test_set= train_test_split(housing,test_size=0.2, random_state=42)
print(f"Rows in train set : {len(train_set)} \nRows in test set : {len(test_set)}")
from sklearn.model_selection import StratifiedShuffleSplit
split=StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing,housing['CHAS']):
strat_train_set= housing.loc[train_index]
strat_test_set= housing.loc[test_index]
strat_test_set['CHAS'].value_counts()
strat_train_set['CHAS'].value_counts()
# # looking for correlation
corr_matrix= housing.corr()
corr_matrix['MEDV'].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes=["MEDV", "RM", "ZN", "LSTAT"]
scatter_matrix(housing[attributes], figsize=(12,8))
housing.plot(kind="scatter", x="RM", y="MEDV", alpha=0.8)
# # tring out attribute combinations
housing["TAXRM"]= housing['TAX']/housing['RM']
housing.head()
corr_matrix= housing.corr()
corr_matrix['MEDV'].sort_values(ascending=False)
housing.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=0.8)
housing = strat_train_set.drop("MEDV", axis=1)
housing_lebels= strat_train_set["MEDV"].copy()
# # Creating pipeline
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline= Pipeline([
('imputer', SimpleImputer(strategy="median")),
('std_scaler', StandardScaler())
])
housing=strat_train_set.copy()
#should do after doing train test spliting
housing.describe()
housing_tr= my_pipeline.fit_transform(housing)
housing_tr #it's numpy array
# # selecting a desired model for protik real estates
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
model= RandomForestRegressor()
# model=LinearRegression()
# model= DecisionTreeRegressor()
model.fit(housing_tr, housing_lebels)
some_data= housing.iloc[:5]
some_lebels =housing_lebels.iloc[:5]
prepared_data= my_pipeline.transform(some_data)
model.predict(prepared_data)
some_lebels
# # Evaluating the model
from sklearn.metrics import mean_squared_error
housing_predictions= model.predict(housing_tr)
mse= mean_squared_error(housing_lebels, housing_predictions)
rmse= np.sqrt(mse)
rmse
# # Using better evaluation technique - Cross validation
from sklearn.model_selection import cross_val_score
scores= cross_val_score(model, housing_tr, housing_lebels, scoring="neg_mean_squared_error", cv=10)
rmse_scores= np.sqrt(-scores)
rmse_scores
housing_tr.shape
def print_scores(scores):
print("Scores : ", scores)
print("Mean : ", scores.mean())
print("Standard deviation : ", scores.std())
print_scores(rmse_scores)
# # Saving the model
from joblib import dump,load
dump(model,'Protik.joblib')
# ## Testing the model on test data
x_test= strat_test_set.drop("MEDV", axis=1)
y_test= strat_test_set["MEDV"].copy()
x_test_prepared= my_pipeline.fit_transform(x_test)
final_predictions = model.fit(x_test_prepared, y_test)
final_predictions = model.predict(x_test_prepared)
final_mse= mean_squared_error(y_test, final_predictions)
final_rmse= np.sqrt(final_mse)
final_rmse
print(final_predictions, list(y_test))
# # Model Using for prediction
#first have to load the file of model
import numpy as np
from joblib import dump,load
model=load('Protik.joblib')
#generating a data entries for testing or checking model
prepared_data[0]
features= np.array([[-0.43942006, 3.12628155, -1.12165014, -0.27288841, -1.42262747,
-0.24141041, -1.31238772, 2.61111401, -1.0016859 , -0.5778192 ,
-0.97491834, 0.41164221, -0.86091034, -0.06501087]])
model.predict(features)
| Protik Real Estate Co. Price prediction project/Protik Real Estate Co. Price prediction project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/01_leafmap_intro.ipynb)
# [](https://gishub.org/leafmap-binder)
#
# Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
# +
# # !pip install leafmap
# -
import os
from leafmap import leafmap
# **Using local files**
#
# Download the sample png from https://i.imgur.com/06Q1fSz.png to your `Downloads` folder.
filepath = '06Q1fSz.png'
if not os.path.exists(filepath):
leafmap.download_from_url("https://i.imgur.com/06Q1fSz.png", filepath)
# +
m = leafmap.Map(center=(25, -115), zoom=4)
image = leafmap.ImageOverlay(
url=filepath,
bounds=((13, -130), (32, -100))
)
m.add_layer(image);
m
# -
# **Using remote files**
# +
m = leafmap.Map(center=(25, -115), zoom=4)
image = leafmap.ImageOverlay(
url="https://i.imgur.com/06Q1fSz.png",
bounds=((13, -130), (32, -100))
)
m.add_layer(image);
m
# -
# Update image url
image.url = "https://i.imgur.com/J9qCf4E.png"
| examples/notebooks/33_image_overlay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.4 ('BU_work')
# language: python
# name: python3
# ---
# # <span style="color:#bce35b;font-size:35px"><b>Week 9 & 10</b></span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <span style="color:#8c8c8c"><NAME></span>
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
# ***
# ***
# Importing libraries
# +
from datetime import date, datetime as dttm, timedelta
from pprint import pprint
# from contextlib import closing
from dask import dataframe as dd, bag as db, array as da
# import sqlite3
# from fuzzywuzzy import fuzz, process
# import lxml
from bs4 import BeautifulSoup
# import sys
# import scipy
# import io
# import math
import re
# from itertools import permutations, dropwhile, zip_longest
# from collections import deque as deq
import requests
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
import os
import keyring
from http import HTTPStatus
import API_Keys
from IPython.display import Image
import tweepy
# import seaborn as sns
os.environ['KAGGLE_USERNAME'] = 'hakunapatata'
os.environ['KAGGLE_KEY'] = keyring.get_password(
service_name='kaggle_api', username=os.environ['KAGGLE_USERNAME'])
# %matplotlib inline
# matplotlib default settings
plt.style.use('dark_background')
mpl.rcParams.update({'lines.linewidth': 3})
mpl.rcParams.update({'axes.labelsize': 14})
mpl.rcParams.update({'axes.titlesize': 16})
mpl.rcParams.update({'axes.titleweight': 'bold'})
mpl.rcParams.update({'figure.autolayout': True})
mpl.rcParams.update(
{'axes.grid': True, 'grid.color': '#424242', 'grid.linestyle': '--'})
# creation random number generator object
rng_seed = 777
rng = np.random.default_rng(rng_seed)
# pandas dataframe options
pd.set_option('display.max_columns', None)
# -
# see list of current settings
# plt.rcParams.keys()
# <br>
#
# Import custom module <br><br>
# <span style="font-size:13px">~/Bellevue_University/Python/HakunaPatata.py</span>
# +
rel_path_parts = os.getcwd().split(os.sep)
rel_path = ''
for count, dir in enumerate(rel_path_parts):
if count == 0:
rel_path = rel_path + f"{dir}"
else:
rel_path = rel_path + f"\\{dir}"
if dir == "Bellevue_University":
break
else:
continue
custom_mod_path = rel_path + f"\\Python"
class_path = rel_path + f"\\DSC 540 - Data Preparation"
os.chdir(custom_mod_path) # change cwd to directory with custom Python module (~/Bellevue_University/Python/HakunaPatata.py)
import HakunaPatata as HP
os.chdir(class_path) # once imported, change back to class working directory.
# -
# <br>
#
# ***
#
# # <span style="color:#bce35b">Week Activities</span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <!-- <span style="color:#8c8c8c"><NAME></span> -->
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
# <br>
#
# ***
#
# ## <span style="color:#ae8bd5">Activity 9</span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <!-- <span style="color:#8c8c8c"><NAME></span> -->
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
#
# Top 100 eBooks from Gutenberg (previous day)
# +
url = r'https://www.gutenberg.org/browse/scores/top'
s = requests.Session()
r = s.get(url, timeout=10)
print(f"Connection status: {HTTPStatus(r.status_code).phrase}")
# -
# <br>
#
# Looks like I need to pull all of the list elements under the h2 (header#2) tag where the id for the h2 tag is 'books-last1'. Then get the child \<li> tags for the parent \<ol> tag which is a sibling to the \<h2> tag.
# +
soup = BeautifulSoup(r.text, 'lxml')
top100_yesterday_li = soup.find('h2', id="books-last1").find_next_sibling('ol').find_all('li')
top100_yesterday = []
for rank, li in enumerate(top100_yesterday_li, start=1):
book_num = re.search(r'\d+', li.a.get('href')).group()
book_name = re.sub(r'\(\d+\)', '', li.a.text)
book_link = f"https://www.gutenberg.org/ebooks/{book_num}"
top100_yesterday.append( (rank, book_num, book_name, book_link) )
top_100_yesterday_df = pd.DataFrame(top100_yesterday, columns=['RANK','BOOK_NUM','BOOK_NAME','BOOK_LINK'], ).set_index('RANK')
top_100_yesterday_df
# -
# <br>
#
# ***
#
# ## <span style="color:#ae8bd5">Activity 10</span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <!-- <span style="color:#8c8c8c"><NAME></span> -->
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
#
# Building Movie Database API
# +
def search_movie(api_key, movie_title, media_type='movie', plot_output='short', year=None):
url = f"http://www.omdbapi.com/"
payload = {
'apikey':api_key
,'t':f"{movie_title}"
,'type':media_type
,'plot':plot_output
,'y':year
}
s = requests.Session()
r = s.get(url, timeout=10, params=payload)
print(f"Connection status: {HTTPStatus(r.status_code).phrase}")
json_dict = r.json()
for k in json_dict.keys():
if k.upper() == 'RATINGS':
print(f"{k.upper()}")
for rating in json_dict[k]:
print(f"\t- {rating['Source']}: {rating['Value']}")
print(f"")
else:
print(f"{k.upper()}: {json_dict[k]}")
return Image(r.json()['Poster'])
# -
search_movie(api_key=API_Keys.OMDb_api_key, movie_title='Napoleon Dynamite')
# <br>
#
# ***
#
# ## <span style="color:#ae8bd5">Connect to the Twitter API</span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <!-- <span style="color:#8c8c8c"><NAME></span> -->
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
def get_username_tweets(bearer_token, username, num_tweets=10, exclude=None, tweet_fields=None):
client = tweepy.Client(bearer_token=bearer_token)
user_search = client.get_user(username=username).data
user_id = user_search.id
user_name = user_search.name
user_username = user_search.username
if tweet_fields is None:
tweet_fields = ['created_at']
elif 'created_at' in tweet_fields:
pass
else:
tweet_fields = tweet_fields.append('created_at')
tweets = client.get_users_tweets(
id=user_id, max_results=num_tweets, exclude=exclude, tweet_fields=tweet_fields)
print(f"""
{'='*80}
USER: {user_name}
USER_NAME: {user_username}
USER_ID: {user_id}
{'='*80}""")
for tweet in tweets.data:
print(f"""
Tweet ID: {tweet.id}
Created: {tweet.created_at}
{tweet.text}
{'_'*80}""")
# +
bearer_token = API_Keys.Twitter_bearer_token
search = 'Eagles'
get_username_tweets(bearer_token=bearer_token, username=search, num_tweets=15, exclude=['retweets','replies'])
# -
# <br>
#
# ***
#
# ## <span style="color:#ae8bd5">Metropolitan Museum Visualizations</span>
# <!-- # <span style="color:#bce35b;font-size:35px">Title<b>Title</b></span> -->
# <!-- # <span style="color:#bce35b">H1</span> -->
# <!-- ## <span style="color:#ae8bd5">H2</span> -->
# <!-- ### <span style="color:#9c4957">H3</span> -->
# <!-- #### <span style="color:white">H4</span> -->
#
# <!-- <span style="color:#8c8c8c"><NAME></span> -->
#
# [Green]: <> (#bce35b)
# [Purple]: <> (#ae8bd5)
# [Coral]: <> (#9c4957)
# [Grey]: <> (#8c8c8c)
# +
db_path = 'HakunaPatata.db'
HP.sqlite_tables(db_path=db_path)
# +
sql_txt = r"""
SELECT * FROM (
SELECT
MM."Object Number" AS OBJECT_NUMBER
, MM."AccessionYear" AS ACCESSION_YEAR
, MM."Object Name" AS OBJECT_NAME
, MM."Title" AS TITLE
, MM."Object Date" AS OBJECT_DATE
, MM."Object Begin Date" AS OBJECT_BEGIN_DATE
, CASE
WHEN SUBSTR('0000' || RTRIM("MM"."Object Begin Date"), LENGTH('0000' || RTRIM("MM"."Object Begin Date"))-3, 4) = '0000' THEN 0
ELSE CASE
WHEN CAST(RTRIM("MM"."Object Begin Date") AS INT) = 0 THEN NULL
ELSE CAST(RTRIM("MM"."Object Begin Date") AS INT)
END
END AS "OBJECT_BEGIN_DATE_NUM"
, MM."Object End Date" AS OBJECT_END_DATE
, CASE
WHEN SUBSTR('0000' || RTRIM("MM"."Object End Date"), LENGTH('0000' || RTRIM("MM"."Object End Date"))-3, 4) = '0000' THEN 0
ELSE CASE
WHEN CAST(RTRIM("MM"."Object End Date") AS INT) = 0 THEN NULL
ELSE CAST(RTRIM("MM"."Object End Date") AS INT)
END
END AS "OBJECT_END_DATE_NUM"
, MM."Artist Alpha Sort" AS ARTIST_NAME
, MM."Artist Gender" AS ARTIST_GENDER
, MM."Artist Nationality" AS ARTIST_NATIONALITY
, MM."Credit Line" AS CREDIT_LINE
, MM."Dimensions" AS DIMENSIONS
, MM."Medium" AS MEDIUM
, MM."Classification" AS CLASSIFICATION
, MM."Department" AS DEPARTMENT
, MM."Culture" AS CULTURE
FROM MET_MUSEUM AS MM
WHERE
MM."AccessionYear" <> 'None' AND REPLACE(MM."AccessionYear", ' ', '')<>'' -- should exclude Null values too
) "MAIN"
WHERE
"MAIN".OBJECT_BEGIN_DATE >= 0 AND "MAIN".OBJECT_BEGIN_DATE < CAST(STRFTIME('%Y', (DATE('now'))) AS INT)
"""
mm_df = HP.sql_to_df(db_path, sql_txt=sql_txt)
# -
# <br>
#
# Line plot of art pieces:
mm_df.groupby('OBJECT_BEGIN_DATE_NUM')['OBJECT_BEGIN_DATE_NUM'].count().plot(xlabel='Object Begin Year', ylabel='Total', title='Total Objects by Begin Year')
plt.show()
# <br>
#
# Bar chart of totals by classification
mm_df.groupby('CLASSIFICATION')['CLASSIFICATION'].count().sort_values(ascending=False).head(15).sort_values(
).plot.barh(title='Totals by Classification (top 15)', xlabel='Classification', ylabel='Total')
plt.show()
# <br>
#
# Histogram of frequency of totals by object begin year.
mm_df.groupby('OBJECT_BEGIN_DATE_NUM')['OBJECT_BEGIN_DATE_NUM'].count().hist()
plt.show()
# <br>
#
# Density Plot of frequency of totals by object begin year.
mm_df.groupby('OBJECT_BEGIN_DATE_NUM')['OBJECT_BEGIN_DATE_NUM'].count().plot(kind='kde')
plt.show()
| DSC 540 - Data Preparation/Week 9 & 10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/aviadr1/nbdev_learn/blob/master/nbdev_learn_00_setup.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# #NBDev
# following the tutorial on https://nbdev.fast.ai/tutorial
pip install nbdev --quiet
# !git clone https://github.com/aviadr1/nbdev_learn.git
# %cd nbdev_learn/
# !nbdev_install_git_hooks
| nbdev_learn_00_setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from sklearn import datasets
import sklearn,sklearn2pmml
iris = datasets.load_iris()
iris_X = pd.DataFrame(iris.data,columns=iris.feature_names)
iris_y = pd.DataFrame(iris.target,columns=["species",])
# -
from sklearn.linear_model import LogisticRegression
from sklearn2pmml.pipeline import PMMLPipeline
classifier = LogisticRegression(random_state=123)
pipeline = PMMLPipeline([
("classifier", classifier)
])
pipeline.fit(iris_X, iris_y)
from sklearn.externals import joblib
joblib.dump(pipeline, "../models/iris-pipeline.pkl.z")
| notebooks/train-iris-classifier-pmml-ksql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
import pandas as pd
df = pd.read_csv('alexa_sections_report.csv')
df.head()
df.shape
df.describe()
df['similar_sites_by_audience_overlap'].value_counts(dropna=False)
for col in df.columns:
print(df[col].value_counts(dropna=False))
for col in ['daily_pageviews_per_visitor', 'daily_pageviews_per_visitor_for_the_last_90_days', 'daily_time_on_site', 'daily_time_on_site_for_the_last_90_days', 'bounce_rate', 'bounce_rate_for_the_last_90_days', 'traffic_source_search', 'visited_just_before', 'visited_right_after', 'total_sites_linking_in']:
print(col, df[f'site_metrics_{col}'].value_counts(dropna=False, sort=False).to_dict())
df[df['site_metrics_visited_right_after'].isna()].head()
# +
import redis
r = redis.Redis()
# +
import json
from pprint import pprint
pprint(json.loads(r.get('newspointread.com')))
# -
alexa_sections = [
'comparison_metrics',
'similar_sites_by_audience_overlap',
'top_industry_topics_by_social_engagement',
'top_keywords_by_traffic',
'alexa_rank_90_days_trends',
'keyword_gaps',
'easy_to_rank_keywords',
'buyer_keywords',
'optimization_opportunities',
'top_social_topics',
'social_engagement',
'popular_articles',
'traffic_sources',
'referral_sites',
'top_keywords',
'audience_overlap',
'alexa_rank',
'audience_geography_in_past_30_days',
'site_metrics']
for section in alexa_sections:
print(section, df[section].value_counts(dropna=False, sort=False).to_dict())
'daily_pageviews_per_visitor', 'daily_pageviews_per_visitor_for_the_last_90_days', 'daily_time_on_site', 'daily_time_on_site_for_the_last_90_days', 'bounce_rate', 'bounce_rate_for_the_last_90_days', 'traffic_source_search', 'visited_just_before', 'visited_right_after', 'total_sites_linking_in']:
for col in ['daily_pageviews_per_visitor', 'daily_pageviews_per_visitor_for_the_last_90_days', 'daily_time_on_site', 'daily_time_on_site_for_the_last_90_days', 'bounce_rate', 'bounce_rate_for_the_last_90_days', 'traffic_source_search', 'visited_just_before', 'visited_right_after', 'total_sites_linking_in']:
print(col, df[f'site_metrics_{col}'].value_counts(dropna=False, sort=False).to_dict())
for col in ['site_rank', 'site_rank_over_past_90_days', 'three_month_rank_data', 'country_alexa_ranks']:
print(col, df[f'alexa_rank_{col}'].value_counts(dropna=False, sort=False).to_dict())
for col in ['alexa_rank', 'time_on_site']:
print(col, df[f'alexa_rank_90_days_trends_{col}'].value_counts(dropna=False, sort=False).to_dict())
| notebooks/alexa_section_distribution_report/alexa_section_report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Load libraries
import numpy as np;
import pandas as pd;
import seaborn as sns;
import matplotlib.pyplot as plt;
from sklearn.preprocessing import StandardScaler, LabelEncoder;
from sklearn.linear_model import LinearRegression, Lasso, Ridge;
from sklearn.model_selection import train_test_split, cross_val_score, KFold;
from sklearn.metrics import mean_squared_error;
import warnings;
pd.set_option('max_column', None);
warnings.filterwarnings('ignore');
# -
# ## 1. Load data
# Load dataset
data = pd.read_csv('Fish/fish.csv', sep=',');
# Shape the dataset
print(data.shape);
# Peek at the dataset
data.head()
# Informations
data.info()
# ## 2. EDA
# Histogram
data.hist(layout=(2, 3), figsize=(14, 8));
plt.show()
# Density
data.plot(kind='density', subplots=True, layout=(2,3), sharex=False, legend=True, fontsize=1, figsize=(12,8));
plt.show()
# Box and whisker plots
data.plot(kind='box', subplots=True, layout=(2, 3), sharex=False, fontsize=8, figsize=(12,8));
plt.show()
# Correlation matrix
cols = data.columns.difference(['Species']);
fig = plt.figure(figsize=(10, 7));
ax = fig.add_subplot(111);
cax = ax.matshow(data[cols].corr(method='pearson'), vmin=-1, vmax=1);
fig.colorbar(cax);
ticks = np.arange(0, 6, 1);
ax.set_xticks(ticks);
ax.set_yticks(ticks);
ax.set_xticklabels(cols);
ax.set_yticklabels(cols);
plt.show()
# ## 3. Processing
# +
# Some processing
# -
# ## 4. Transformation
# Numerical
cols = data.columns.difference(['Date', 'C6H6(GT)'])
data[cols] = StandardScaler().fit_transform(data[cols])
data.head()
# ## 5. Split data & Fitting models
# Select main columns to be used in training
main_cols = data.columns.difference(['C6H6(GT)', 'Date']);
X = data[main_cols];
y = data['C6H6(GT)']
# +
# Split out test and validation dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42); # test
# X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train, test_size=0.3, random_state=42); # validation
# Cross validation
kfold = KFold(n_splits=3, shuffle=True, random_state=21);
# +
# Fitting
linears = [];
linears.append(('LR', LinearRegression()));
linears.append(('RIDGE', Ridge()));
linears.append(('LASSO', Lasso()));
# Evaluate
for name, model in linears:
scores = cross_val_score(model, X_train, y_train, scoring='neg_mean_squared_error', cv=kfold);
print('%s : %.4f(%.4f)' % (name, -1 * scores.mean(), scores.std()));
# -
# ## 6. Evaluate on test data
# Fit the best model
mod = LinearRegression();
mod = mod.fit(X_train, y_train);
y_pred = mod.predict(X_test);
# Evaluate each model in turn with kfold
print('Score :', mean_squared_error(y_test, y_pred));
# Plotting predicted and true values
plt.figure(figsize=(17, 5))
plt.plot(np.arange(0, len(y_pred)), y_pred, 'o', linestyle='dashed', linewidth=1, markersize=5, label='Prediction')
plt.plot(np.arange(0, len(y_test)), y_test, 'o', linestyle='dashed', linewidth=1, markersize=5, label='True')
plt.legend()
plt.show()
# ## 7. Make persitent preprocessing data
data.to_csv('pre_air.csv', index=False);
| regression_problem/.ipynb_checkpoints/fish-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp models.RNNPlus
# -
# # RNNPlus
#
# > These are RNN, LSTM and GRU PyTorch implementations created by <NAME> - <EMAIL> based on:
# The idea of including a feature extractor to the RNN network comes from the solution developed by the UPSTAGE team (https://www.kaggle.com/songwonho,
# https://www.kaggle.com/limerobot and https://www.kaggle.com/jungikhyo).
# They finished in 3rd position in Kaggle's Google Brain - Ventilator Pressure Prediction competition. They used a Conv1d + Stacked LSTM architecture.
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.core import *
from tsai.models.layers import *
#export
class _RNN_Backbone(Module):
def __init__(self, cell, c_in, c_out, seq_len=None, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0, bidirectional=False,
n_embeds=None, embed_dims=None, cat_pos=None, feature_extractor=None, init_weights=True):
# Categorical embeddings
if n_embeds is not None:
self.to_cat_embed = MultiEmbeddding(c_in, n_embeds, embed_dims=embed_dims, cat_pos=cat_pos)
if embed_dims is None:
embed_dims = [emb_sz_rule(s) for s in n_embeds]
c_in = c_in + sum(embed_dims) - len(n_embeds)
else:
self.to_cat_embed = nn.Identity()
# Feature extractor
if feature_extractor:
if isinstance(feature_extractor, nn.Module): self.feature_extractor = feature_extractor
else: self.feature_extractor = feature_extractor(c_in, seq_len)
c_in, seq_len = self._calculate_output_size(self.feature_extractor, c_in, seq_len)
else:
self.feature_extractor = nn.Identity()
# RNN layers
rnn_layers = []
if len(set(hidden_size)) == 1:
hidden_size = hidden_size[0]
if n_layers == 1: rnn_dropout = 0
rnn_layers.append(cell(c_in, hidden_size, num_layers=n_layers, bias=bias, batch_first=True, dropout=rnn_dropout, bidirectional=bidirectional))
rnn_layers.append(LSTMOutput()) # this selects just the output, and discards h_n, and c_n
else:
for i in range(len(hidden_size)):
input_size = c_in if i == 0 else hs * (1 + bidirectional)
hs = hidden_size[i]
rnn_layers.append(cell(input_size, hs, num_layers=1, bias=bias, batch_first=True, bidirectional=bidirectional))
rnn_layers.append(LSTMOutput()) # this selects just the output, and discards h_n, and c_n
if rnn_dropout and i < len(hidden_size) - 1: rnn_layers.append(nn.Dropout(rnn_dropout)) # add dropout to all layers except last
self.rnn = nn.Sequential(*rnn_layers)
self.transpose = Transpose(-1, -2, contiguous=True)
if init_weights: self.apply(self._weights_init)
def forward(self, x):
x = self.to_cat_embed(x)
x = self.feature_extractor(x)
x = self.transpose(x) # [batch_size x n_vars x seq_len] --> [batch_size x seq_len x n_vars]
x = self.rnn(x) # [batch_size x seq_len x hidden_size * (1 + bidirectional)]
x = self.transpose(x) # [batch_size x hidden_size * (1 + bidirectional) x seq_len]
return x
def _weights_init(self, m):
# same initialization as keras. Adapted from the initialization developed
# by <NAME> (https://www.kaggle.com/junkoda) in this notebook
# https://www.kaggle.com/junkoda/pytorch-lstm-with-tensorflow-like-initialization
for name, params in m.named_parameters():
if "weight_ih" in name:
nn.init.xavier_normal_(params)
elif 'weight_hh' in name:
nn.init.orthogonal_(params)
elif 'bias_ih' in name:
params.data.fill_(0)
# Set forget-gate bias to 1
n = params.size(0)
params.data[(n // 4):(n // 2)].fill_(1)
elif 'bias_hh' in name:
params.data.fill_(0)
@torch.no_grad()
def _calculate_output_size(self, m, c_in, seq_len):
xb = torch.randn(1, c_in, seq_len)
c_in, seq_len = m(xb).shape[1:]
return c_in, seq_len
# +
#export
class _RNNPlus_Base(nn.Sequential):
def __init__(self, c_in, c_out, seq_len=None, hidden_size=[100], n_layers=1, bias=True, rnn_dropout=0, bidirectional=False,
n_embeds=None, embed_dims=None, cat_pos=None, feature_extractor=None, fc_dropout=0., last_step=True, bn=False,
custom_head=None, y_range=None, init_weights=True):
if not last_step: assert seq_len, 'you need to pass a seq_len value'
# Backbone
hidden_size = listify(hidden_size)
backbone = _RNN_Backbone(self._cell, c_in, c_out, seq_len=seq_len, hidden_size=hidden_size, n_layers=n_layers,
n_embeds=n_embeds, embed_dims=embed_dims, cat_pos=cat_pos, feature_extractor=feature_extractor,
bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional, init_weights=init_weights)
# Head
self.head_nf = hidden_size * (1 + bidirectional) if isinstance(hidden_size, Integral) else hidden_size[-1] * (1 + bidirectional)
if custom_head:
if isinstance(custom_head, nn.Module): head = custom_head
else: head = custom_head(self.head_nf, c_out, seq_len)
else: head = self.create_head(self.head_nf, c_out, seq_len, last_step=last_step, fc_dropout=fc_dropout, bn=bn, y_range=y_range)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
def create_head(self, nf, c_out, seq_len, last_step=True, fc_dropout=0., bn=False, y_range=None):
if last_step:
layers = [LastStep()]
else:
layers = [Flatten()]
nf *= seq_len
if bn: layers += [nn.BatchNorm1d(nf)]
if fc_dropout: layers += [nn.Dropout(fc_dropout)]
layers += [nn.Linear(nf, c_out)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
class RNNPlus(_RNNPlus_Base):
_cell = nn.RNN
class LSTMPlus(_RNNPlus_Base):
_cell = nn.LSTM
class GRUPlus(_RNNPlus_Base):
_cell = nn.GRU
# -
bs = 16
c_in = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, c_in, seq_len)
test_eq(RNNPlus(c_in, c_out)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, hidden_size=[100, 50, 10], bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, hidden_size=[100], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape,
[bs, c_out])
test_eq(LSTMPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
test_eq(GRUPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
test_eq(RNNPlus(c_in, c_out, seq_len, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5, last_step=False)(xb).shape,
[bs, c_out])
test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
test_eq(GRUPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
feature_extractor = MultiConv1d(c_in, kss=[1,3,5,7])
custom_head = nn.Sequential(Transpose(1,2), nn.Linear(8,8), nn.SELU(), nn.Linear(8, 1), Squeeze())
test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[32,16,8,4], bidirectional=True,
feature_extractor=feature_extractor, custom_head=custom_head)(xb).shape, [bs, seq_len])
feature_extractor = MultiConv1d(c_in, kss=[1,3,5,7], keep_original=True)
custom_head = nn.Sequential(Transpose(1,2), nn.Linear(8,8), nn.SELU(), nn.Linear(8, 1), Squeeze())
test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[32,16,8,4], bidirectional=True,
feature_extractor=feature_extractor, custom_head=custom_head)(xb).shape, [bs, seq_len])
# +
bs = 16
c_in = 3
seq_len = 12
c_out = 2
x1 = torch.rand(bs,1,seq_len)
x2 = torch.randint(0,3,(bs,1,seq_len))
x3 = torch.randint(0,5,(bs,1,seq_len))
xb = torch.cat([x1,x2,x3],1)
custom_head = partial(create_mlp_head, fc_dropout=0.5)
test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
custom_head = partial(create_pool_head, concat_pool=True, fc_dropout=0.5)
test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
custom_head = partial(create_pool_plus_head, fc_dropout=0.5)
test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
custom_head = partial(create_conv_head)
test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[100, 50], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True)(xb).shape, [bs, c_out])
n_embeds = [3, 5]
cat_pos = [1, 2]
custom_head = partial(create_conv_head)
m = LSTMPlus(c_in, c_out, seq_len, hidden_size=[100, 50], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, n_embeds=n_embeds, cat_pos=cat_pos)
test_eq(m(xb).shape, [bs, c_out])
# -
from tsai.data.all import *
from tsai.models.utils import *
dsid = 'NATOPS'
bs = 16
X, y, splits = get_UCR_data(dsid, return_split=False)
tfms = [None, [Categorize()]]
dls = get_ts_dls(X, y, tfms=tfms, splits=splits, bs=bs)
model = build_ts_model(LSTMPlus, dls=dls)
print(model[-1])
learn = Learner(dls, model, metrics=accuracy)
learn.fit_one_cycle(1, 3e-3)
model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False)
learn = Learner(dls, model, metrics=accuracy)
learn.fit_one_cycle(1, 3e-3)
custom_head = partial(create_pool_head, concat_pool=True)
model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False, custom_head=custom_head)
learn = Learner(dls, model, metrics=accuracy)
learn.fit_one_cycle(1, 3e-3)
custom_head = partial(create_pool_plus_head, concat_pool=True)
model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False, custom_head=custom_head)
learn = Learner(dls, model, metrics=accuracy)
learn.fit_one_cycle(1, 3e-3)
m = RNNPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
print(m)
print(total_params(m))
m(xb).shape
m = LSTMPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
print(m)
print(total_params(m))
m(xb).shape
m = GRUPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
print(m)
print(total_params(m))
m(xb).shape
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
| nbs/105_models.RNNPlus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="BbwgR5UdNkkm"
# # Base Model Seq2Seq
#
# + colab={"base_uri": "https://localhost:8080/"} id="Q-iHU02C7fAj" outputId="118cc9b3-228a-4aee-cd32-3692a636d570"
import torch
import torch.nn as nn
import torch.optim as optim
import string
# !pip install unidecode
import unidecode
import numpy as np
import itertools
from collections import Counter
from tqdm import tqdm
# + [markdown] id="CTfRgwxmjv1B"
# # Building blocks
#
# First, we will define the encode and decoder
#
# <img src="" width="25%">
# + id="B7mLFyUG7kJH"
NULL_INDEX = 0
encoder_dim = 1024
predictor_dim = 1024
joiner_dim = 1024
# + id="KE7j2T5EY33-"
from torch import nn
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, (hidden, cell) = self.rnn(embedded)
#outputs = [src len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#outputs are always from the top hidden layer
return hidden, cell
# + id="Vlzca1orZDLa"
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
#input = [batch size]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#n directions in the decoder will both always be 1, therefore:
#hidden = [n layers, batch size, hid dim]
#context = [n layers, batch size, hid dim]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
#output = [seq len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#seq len and n directions will always be 1 in the decoder, therefore:
#output = [1, batch size, hid dim]
#hidden = [n layers, batch size, hid dim]
#cell = [n layers, batch size, hid dim]
prediction = self.fc_out(output.squeeze(0))
#prediction = [batch size, output dim]
return prediction, hidden, cell
# + id="sYSagKi-gHM4"
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio = 0.90):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src)
#first input to the decoder is the <sos>
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden and previous cell states
#receive output tensor (predictions) and new hidden and cell states
output, hidden, cell = self.decoder(input, hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
# + id="RWtkoXH6U8Pm"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
INPUT_DIM = 32
OUTPUT_DIM = 32
ENC_EMB_DIM = 32
DEC_EMB_DIM = 32
HID_DIM = 1024
N_LAYERS = 2
ENC_DROPOUT = 0.2
DEC_DROPOUT = 0.2
START_INDEX = 0
BLANK_INDEX = 27
PAD_INDEX = 30
END_INDEX = 31
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
# + [markdown] id="Ff9raB0jVGzN"
# # Some utilities
#
# Here we will add a bit of boilerplate code for training and loading data.
# + colab={"base_uri": "https://localhost:8080/"} id="5b17OQm4WdVy" outputId="0a7d7b17-0b23-4ee8-e690-e70564917f04"
import random
char_labels = ['<start>', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', '<blank>', ' ', "'", '<pad>','<end>']
char_dict = {'<start>':0, 'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8, 'i':9, 'j':10, 'k':11, 'l':12, 'm':13,
'n':14, 'o':15, 'p':16, 'q':17, 'r':18, 's':19, 't':20, 'u':21, 'v':22, 'w':23, 'x':24, 'y':25, 'z':26,
'<blank>':27, ' ':28, "'":29, '<pad>':30,'<end>':31}
char_inv_dict = {0:'', 1:'a', 2:'b', 3:'c', 4:'d', 5:'e', 6:'f', 7:'g', 8:'h', 9:'i', 10:'j', 11:'k', 12:'l',
13:'m', 14:'n', 15:'o', 16:'p', 17:'q', 18:'r', 19:'s', 20:'t', 21:'u', 22:'v', 23:'w', 24:'x', 25:'y',
26:'z', 27:'', 28:' ', 29:"'", 30:'', 31:''}
class TextDataset(torch.utils.data.Dataset):
def __init__(self, lines, batch_size):
lines = list(filter(("\n").__ne__, lines))
self.lines = lines # list of strings
collate = Collate()
self.loader = torch.utils.data.DataLoader(self, batch_size=batch_size, num_workers=0, shuffle=True, collate_fn=collate)
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx].replace("\n", "")
line =line.lower()
line = "".join(c for c in line if c in char_labels)
line = unidecode.unidecode(line) # remove special characters
x = "".join(c*random.randint(0,1) if c in ("gjp") else c*random.randint(1,2) for c in line)
x = ' '.join(x.split())
y = line
return (x,y)
def encode_string(s):
return [char_dict[c] for c in s]
def decode_labels(l):
return "".join([char_inv_dict[c] for c in l])
class Collate:
def __call__(self, batch):
"""
batch: list of tuples (input string, output string)
Returns a minibatch of strings, encoded as labels and padded to have the same length.
"""
x = []; y = []
batch_size = len(batch)
for index in range(batch_size):
x_,y_ = batch[index]
x.append([0]+encode_string(x_))
y.append([0]+encode_string(y_))
# pad all sequences to have same length
T = [len(x_) for x_ in x]
U = [len(y_) for y_ in y]
T_max = max(T)
U_max = max(U)
for index in range(batch_size):
x[index] += [PAD_INDEX] * (T_max - len(x[index])) +[31]
x[index] = torch.tensor(x[index])
y[index] += [PAD_INDEX] * (U_max - len(y[index]))+[31]
y[index] = torch.tensor(y[index])
# stack into single tensor
x = torch.stack(x)
y = torch.stack(y)
#T = torch.tensor(T)
#U = torch.tensor(U)
return (x,y) #,T,U)
def gen_features(filename, max_len):
with open(filename, "r") as f:
t_lines = f.readlines()
t_lines = t_lines[1:]
t_lines = [line.split('\t')[2] for line in t_lines ]
lines = []
for line in t_lines:
temp = line.split(' ')
count = len(temp)//max_len
resid = len(temp)%max_len
k = 0
while(count > 0):
short_line = ' '.join(temp[k:k+max_len])
lines.append(short_line)
count = count - 1
k = k + max_len
if(resid > 0):
lines.append(' '.join(temp[k:k+resid]))
return(lines)
train_filename = "../LibriSpeech/train-clean-100/transcripts_460.tsv"
eval_filename = "../LibriSpeech/dev-clean/transcripts.tsv"
test_filename = "../LibriSpeech/test-clean/transcripts.tsv"
train_lines = gen_features(train_filename, 10)
eval_lines = gen_features(eval_filename, 10)
test_lines = gen_features(test_filename, 10)
train_set = TextDataset(train_lines, batch_size=32)
eval_set = TextDataset(test_lines, batch_size=8)
test_set = TextDataset(test_lines, batch_size=8)
train_set.__getitem__(0)
# +
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
model.apply(init_weights)
# +
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss(ignore_index = PAD_INDEX)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
# +
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src, trg = batch
trg = trg.transpose(1,0)
src = src.transpose(1,0)
optimizer.zero_grad()
src = src.to(device)
trg = trg.to(device)
output = model(src, trg)
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].reshape(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src, trg = batch
trg = trg.transpose(1,0)
src = src.transpose(1,0)
src = src.to(device)
trg = trg.to(device)
output = model(src, trg, 0) #turn off teacher forcing
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].reshape(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# -
N_EPOCHS = 8
CLIP = 1
best_valid_loss = 6.0 #float('inf')
train_loss = []
valid_loss = []
load_file = './base_model_2.pt'
model.load_state_dict(torch.load(load_file))
for epoch in range(N_EPOCHS):
tr_loss = train(model, train_set.loader, optimizer, criterion, CLIP)
train_loss.append(tr_loss)
v_loss = evaluate(model, eval_set.loader, criterion)
valid_loss.append(v_loss)
lr_scheduler.step()
print(tr_loss, v_loss)
if(best_valid_loss > v_loss):
best_valid_loss = v_loss
save_file = './base_model.pt'
torch.save(model.state_dict(), save_file)
print(train_loss, valid_loss)
def inference(model, iterator):
model.eval()
epoch_loss = 0
y_pred = []
y_test = []
with torch.no_grad():
for i, batch in enumerate(iterator):
src, trg = batch
trg = trg.transpose(1,0)
src = src.transpose(1,0)
src = src.to(device)
trg = trg.to(device)
output = model(src, trg, 0) #turn off teacher forcing
output = output.argmax(-1)
output = output.transpose(1,0)
trg = trg.transpose(1,0)
y_p = output.cpu().detach().numpy()
y_t = trg.cpu().detach().numpy()
y_p_s = []
y_t_s = []
for j in range(y_p.shape[0]):
y_p_s.append(decode_labels(y_p[j,:]))
y_t_s.append(decode_labels(y_t[j,:]))
y_pred.extend(y_p_s)
y_test.extend(y_t_s)
return y_pred, y_test
y_pred, y_test = inference(model, test_set.loader)
import jiwer
from jiwer import wer
error = wer(y_test, y_pred)
print(error)
| se2seq_base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="2Y2c9H6Z9N5Z" outputId="8b89ff9d-6009-4411-eebb-8e4446ddb830"
# !gdown --id 1hgfcQHIlfnnDTSUAA_3--m-YA7BelKl3
# + id="9VJTuILh9UKM"
import pandas as pd
import torch
#handling text data
from torchtext.legacy import data
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="S419D4DN9gSO" outputId="b049ffe9-86d3-4360-88d5-835ad66d186b"
dataset=pd.read_csv('/content/Dataset 3.csv',encoding='ISO-8859-1')
dataset
# + id="29mWz69Y9UTl"
dataset.drop(10313,inplace=True)
# + id="lDDW7XdT9UZA"
dataset = dataset.sample(frac=1).reset_index(drop=True)
dataset.to_csv('data.csv',index=False)
# + id="4RUbkgvr99an"
import tensorflow as tf
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
# + id="sAYuA2Cq-DUF"
voc_size=5000
import nltk
import re
from nltk.corpus import stopwords
# + colab={"base_uri": "https://localhost:8080/"} id="7vP3Ud_a-Gyt" outputId="97c29ae6-79d4-4776-f263-9e6defee9416"
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
STOPWORDS = set(stopwords.words('english'))
# + id="hJG131LD-Hh9"
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(0, len(dataset)):
review = re.sub('[^a-zA-Z]', ' ', dataset['tweets'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
# + id="yfnVVx7d-Hk2"
onehot_repr=[one_hot(words,voc_size)for words in corpus]
# + colab={"base_uri": "https://localhost:8080/"} id="w-JWDDBR-HrP" outputId="99a4df03-d5ae-4b88-c772-9dcd34782346"
sent_length=40
embedded_docs=pad_sequences(onehot_repr,padding='post',maxlen=sent_length)
print(embedded_docs)
# + colab={"base_uri": "https://localhost:8080/"} id="KCtlwNq5LnU-" outputId="c43e40e0-b2a9-4059-d7e7-de2781f8cbcd"
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
# define model
model2 = Sequential()
model2.add(Embedding(voc_size,40,input_length=sent_length))
model2.add(Conv1D(filters=32, kernel_size=8, activation='relu'))
model2.add(MaxPooling1D(pool_size=2))
model2.add(Conv1D(filters=32, kernel_size=4, activation='relu'))
model2.add(MaxPooling1D(pool_size=2))
model2.add(Conv1D(filters=32, kernel_size=1, activation='relu'))
model2.add(MaxPooling1D(pool_size=1))
model2.add(Flatten())
model2.add(Dense(10, activation='relu'))
model2.add(Dense(1, activation='sigmoid'))
print(model2.summary())
# compile network
model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# + id="sESIzH9F-QDI"
import numpy as np
X_final=np.array(embedded_docs)
y_final=np.array(dataset.sentiment)
# + colab={"base_uri": "https://localhost:8080/"} id="o-OnhCDnCypp" outputId="872a6122-58af-44de-c1ff-f500faa24edc"
y_final
# + id="sAN7Kafv-QGP"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_final, y_final, test_size=0.2,random_state=42)
# + colab={"base_uri": "https://localhost:8080/"} id="Usm7W1Xt-UYx" outputId="28989e60-01b7-4a9b-e272-f822a58f6a36"
history=model2.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="M6eGlZaF-YVJ" outputId="4fec7e50-b642-456a-ea9a-fd73fa12cb16"
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="isa8fQyAF1aL" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="5469e301-456e-4d2e-fb7f-7a71facb3457"
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
y_pred=model2.predict(X_test)
y_pred = (y_pred > 0.5)
conf=confusion_matrix(y_test,y_pred)
print(conf)
import seaborn as sn
sn.heatmap(conf, cmap="Blues", annot=True)# font size
conf
evaluation=classification_report(y_test,y_pred)
print(evaluation)
| 170104017.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datetime import datetime
import numpy as np
import pandas as pd
import sklearn
from sklearn.linear_model import LinearRegression
#parse data
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
#label encoding on categorical data
#FAMA 49CRSP Common Stocks
df = pd.read_csv('FAMA_49CRSP.csv', dtype={'public_date' : str})
# +
import sklearn.preprocessing
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
#preprocessing here
#sort by date
df = df.sort_values(by = 'public_date', ascending = True)
#encode integer categories into numbers
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(df.FFI49_desc)
df.FFI49_desc = integer_encoded
#df = df.dropna()
ewlabels = df.indret_ew
vwlabels = df.indret_vw
df = df.drop(labels=['indret_ew', 'indret_vw'], axis=1)
#3year on year change as a prediction feature, raw pct change
yoythree = ewlabels.diff(periods = 3)
#3 years rolling percent change, averaged ie. (y1-y2 + (y3-y2)change)/2
rollavgpct = ewlabels.rolling(3).mean()
#drop first 3 years
df = df.iloc[3:]
ewlabels = ewlabels.iloc[3:]
yoythree = yoythree.iloc[3:]
#yoypctthree = yoypctthree.iloc[3:]
rollavgpct = rollavgpct.iloc[3:]
#add -1 and 1 so the bins will take on bins to be equal and set to max -1 and 1
#extrema = pd.Series([-1,1])
#ewnlabels = ewlabels.append(extrema)
#make a new output (bucket by percentage?)
# enc = KBinsDiscretizer(n_bins=8, encode='ordinal',strategy = 'uniform')
# ewnlabels = np.asarray(ewnlabels)
# ewnlabels = ewnlabels.reshape((-1,1))
# labels_binned = enc.fit_transform(ewnlabels)
# labels_binned = labels_binned[:-2]
#1 Split-Timer series data, 0.64 Train, 0.16 dev, 0.2 Test
#x_train, x_test, y_train, y_test = train_test_split(df, labels_binned, test_size = 0.2, shuffle = False)
x_train, x_test, y_train, y_test = train_test_split(df, ewlabels, test_size = 0.2, shuffle = False)
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train, test_size = 0.2, shuffle = False)
# +
def get_dates(x_train, x_dev, x_test):
train_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_train['public_date']]
dev_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_dev['public_date']]
test_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_test['public_date']]
x_train = x_train.drop('public_date', axis=1)
x_dev = x_dev.drop('public_date', axis=1)
x_test = x_test.drop('public_date', axis=1)
return train_dates, dev_dates, test_dates, x_train, x_dev, x_test
train_dates, dev_dates, test_dates, x_train, x_dev, x_test = get_dates(x_train, x_dev, x_test)
# +
from matplotlib import pyplot
fig1 = pyplot.figure(1, figsize = (6,6))
pyplot.plot(train_dates, y_train, color = 'green', label = 'industry_ew_train')
pyplot.plot(dev_dates, y_dev, color = 'yellow', label = 'industry_ew_dev')
pyplot.plot(test_dates, y_test, color = 'red', label = 'industry_ew_test')
pyplot.xlabel('Date')
pyplot.ylabel('Industry Return (Equally Weighted)')
pyplot.legend()
pyplot.show()
# +
#tutorial keras practice
#https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/
#####IGNORE THIS!!!!!!!!
from keras.models import Sequential
from keras.layers import Dense, Activation,Softmax
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
import numpy
model = Sequential()
#parameters = number of neurons, initialization method, activation function
model.add(Dense(32, input_dim=76, init = 'uniform', activation = 'relu'))
model.add(Dense(16, init = 'uniform', activation = 'relu'))
model.add(Dense(1, init = 'uniform', activation = 'sigmoid'))
# For a binary classification problem
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_tra, y_tra, epochs=25, batch_size=32)
print("----------------------------------------------------------")
scores = model.evaluate(x_tra,y_tra)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
# +
#Regression Model:
#1 Layer: 76 -> 12.78% and 15.28%
#2 Layer: 76,1 -> 0.40% and 0.48%
#3 Layers: 76, 32, 1 -> 0% and 65%
#4 layers: 76,48,32,1 + adam + -> 60.52% and 56.70%
#4 Layers: 76,32,16,1 -> 61.33% and 57.18%
#4 Layers: 76,32,8,1 -> 0%
#4 layers: 76,48,8,1 -> 0%
#6 layers: 76,48,32,16,8,1 -> 20% and 0%
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from matplotlib import pyplot
import seaborn as sns
model = Sequential()
#parameters = number of neurons, initialization method, activation function
model.add(Dense(72, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu'))
#model.add(Dense(48, kernel_initializer='normal',activation = 'relu'))
model.add(Dense(32, kernel_initializer='normal',activation = 'relu'))
#model.add(Dense(16, kernel_initializer='normal',activation = 'relu'))
#model.add(Dense(8, kernel_initializer='normal',activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal', activation = 'linear'))
# Compile model
#opt = Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
#model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape', 'cosine'])
history = model.fit(np.asarray(x_train), y_train, epochs=50)
fig2 = pyplot.figure(2,figsize = (10,10))
pyplot.plot(history.history['mean_squared_error'], color = 'blue')
pyplot.plot(history.history['mean_absolute_error'], color = 'green')
pyplot.plot(history.history['mean_absolute_percentage_error'], color = 'orange')
pyplot.plot(history.history['cosine_proximity'], color = 'red')
pyplot.ylim(-10,10)
pyplot.show()
#dev set
print("----------------------------------------------------------")
dev_predictions = model.predict(x_dev)
scores = model.evaluate(np.asarray(x_dev),y_dev)
for i in range(len(scores)):
print("\n%s: %.2f%%" % (model.metrics_names[i], scores[i]))
fig3 = pyplot.figure(3, figsize = (10,10))
pyplot.plot(dev_dates, y_dev, color = 'green')
pyplot.plot(dev_dates, dev_predictions, color = 'red')
pyplot.show()
#test set
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
test_predictions = model.predict(x_test).reshape((-1))
scores = model.evaluate(np.asarray(x_test),y_test)
for i in range(len(scores)):
print("\n%s: %.2f%%" % (model.metrics_names[i], scores[i]))
fig4 = pyplot.figure(4, figsize = (10,10))
pyplot.plot(test_dates, y_test, color = 'green')
pyplot.plot(test_dates, test_predictions, color = 'red')
pyplot.show()
# +
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
import numpy
model = Sequential()
model.add(Dense(32, input_dim=x_train.shape[1], init = 'uniform', activation = 'relu'))
model.add(Dense(16, init = 'uniform', activation = 'relu'))
model.add(Dense(8, init = 'uniform', activation = 'softmax'))
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=25, batch_size=32)
print("----------------------------------------------------------")
scores = model.evaluate(x_train,y_train)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
# +
from keras.models import Sequential
from keras.layers import Dense, Activation,Softmax
from keras.optimizers import SGD
model = Sequential()
model.add(Dense(32, input_shape = (x_train.shape)))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# For a binary classification problem
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# For a mean squared error regression problem
model.compile(optimizer='rmsprop',
loss='mse')
model.fit(x_train, y_train, epochs=5, batch_size=32)
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
| .ipynb_checkpoints/Neural Network-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py38_pytorch
# language: python
# name: conda-env-py38_pytorch-py
# ---
# # PyTorch를 이용한 딥러닝
#
# 기존의 머신러닝은 통계 방법을 사용하여 특징(feature)과 라벨(label) 사이의 관계를 결정하는 데에 의존하면서 예측 모델을 만드는 데 매우 효과적일 수 있습니다. 하지만 엄청난 데이터 가용성의 증가는 그것을 처리하는 데 필요한 컴퓨팅 기술의 발전과 결합되어 인간의 뇌가 인공 신경망이라고 불리는 구조에서 정보를 처리하는 방식을 모방하여 새로운 머신러닝 기술의 출현으로 이어졌습니다.
#
# PyTorch는 깊은 신경망(deep neural networks, DNN)을 포함한 머신러닝 모델을 만들기 위한 프레임워크입니다. 이 예에서는 PyTorch를 사용하여 부리(culmen)의 길이와 깊이, 날개(FlipperLength) 및 체질량(BodyMass)을 기준으로 펭귄의 종으로 분류하는 간단한 신경망을 생성합니다.
#
# > **인용**: 이 연습에 사용된 펭귄 데이터셋은 [Dr. <NAME>](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) 과 [Long Term Ecological Research Network](https://lternet.edu/)의 멤버인 [Palmer Station, Antarctica LTER](https://pal.lternet.edu/)이 수집하여 사용할 수 있게 만든 데이터의 서브셋이다.
#
# ## 데이터셋 살펴보기
#
# PyTorch를 사용하기 전에 모델을 만들기 위해 Palmer Islands 펭귄 데이터셋으로부터 필요한 데이터를 불러오겠습니다. 이 데이터셋에는 3가지 종류의 펭귄이 관찰되어 있습니다.
#
# > **참조**: 실제로는 딥러닝 모델 없이도 기존의 머신러닝 기술을 사용하여 펭귄 분류 문제를 쉽게 해결할 수 있습니다. 하지만 이 노트북에서 신경 네트워크의 원리를 설명하는 데이터셋은 유용하고 이해하기 쉽습니다.
# +
import pandas as pd
# load the training dataset (excluding rows with null values)
penguins = pd.read_csv('data/penguins.csv').dropna()
# Deep Learning models work best when features are on similar scales
# In a real solution, we'd implement some custom normalization for each feature, but to keep things simple
# we'll just rescale the FlipperLength and BodyMass so they're on a similar scale to the bill measurements
penguins['FlipperLength'] = penguins['FlipperLength']/10
penguins['BodyMass'] = penguins['BodyMass']/100
# The dataset is too small to be useful for deep learning
# So we'll oversample it to increase its size
for i in range(1,3):
penguins = penguins.append(penguins)
# Display a random sample of 10 observations
sample = penguins.sample(10)
sample
# -
# **Species** 열은 모델이 예측할 레이블입니다. 각 라벨 값은 0, 1 또는 2로 인코딩된 펭귄 종의 클래스를 나타냅니다. 다음 코드는 이러한 클래스 라벨에 해당하는 실제 종을 보여줍니다.
# + tags=[]
penguin_classes = ['Adelie', 'Gentoo', 'Chinstrap']
print(sample.columns[0:5].values, 'SpeciesName')
for index, row in penguins.sample(10).iterrows():
print('[',row[0], row[1], row[2],row[3], int(row[4]), ']',penguin_classes[int(row[-1])])
# -
# 흔히 지도학습 문제에서 볼 수 있듯이, 데이터셋을 일련의 레코드로 분할하여 모델을 학습하고, 학습된 모델을 검증할 수 있는 작은 데이터셋으로 분할합니다.
# + tags=[]
from sklearn.model_selection import train_test_split
features = ['CulmenLength','CulmenDepth','FlipperLength','BodyMass']
label = 'Species'
# Split data 70%-30% into training set and test set
x_train, x_test, y_train, y_test = train_test_split(penguins[features].values,
penguins[label].values,
test_size=0.30,
random_state=0)
print ('Training Set: %d, Test Set: %d \n' % (len(x_train), len(x_test)))
print("Sample of features and labels:")
# Take a look at the first 25 training features and corresponding labels
for n in range(0,24):
print(x_train[n], y_train[n], '(' + penguin_classes[y_train[n]] + ')')
# -
# *특징(features)*은 각 펭귄 관찰에 대한 측정치이며 *라벨(label)* 은 관찰이 나타내는 펭귄의 종(Adelie, Gentoo 또는 Chinstrap)을 나타내는 숫자 값입니다.
#
# ## PyTorch 라이브러리 설치 및 임포트
#
# PyTorch를 사용하여 펭귄 분류 모델을 만들 계획이기 때문에 PyTorch 라이브러리를 설치하고 임포트 위해 아래 2개의 셀을 실행해야 합니다. 특정 버전의 PyTorch 설치는 운영 체제와 *cuda*를 통한 고성능 처리에 사용할 수 있는 그래픽 처리 장치(GPU)가 컴퓨터에 있는지 여부에 따라 달라집니다. 자세한 지침은 https://pytorch.org/get-started/locally/ 에서 확인할 수 있습니다.
# +
# # !pip install torch==1.7.1+cpu torchvision==0.8.2+cpu torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
# + tags=[]
import torch
import torch.nn as nn
import torch.utils.data as td
# Set random seed for reproducability
torch.manual_seed(0)
print("Libraries imported - ready to use PyTorch", torch.__version__)
# -
# ## PyTorch에서 데이터 준비하기
#
# PyTorch는 *데이터 로더(data loaders)* 를 사용하여 학습 및 검증 데이터를 일괄 불러옵니다. 데이터를 이미 Numpy 배열(array)에 로드했지만, 데이터를 PyTorch *tensor* 개체로 변환하는 PyTorch 데이터셋으로 래핑하고 해당 데이터셋에서 배치를 읽을 로더를 생성해야 합니다.
# + tags=[]
# Create a dataset and loader for the training data and labels
train_x = torch.Tensor(x_train).float()
train_y = torch.Tensor(y_train).long()
train_ds = td.TensorDataset(train_x,train_y)
train_loader = td.DataLoader(train_ds, batch_size=20, shuffle=False, num_workers=1)
# Create a dataset and loader for the test data and labels
test_x = torch.Tensor(x_test).float()
test_y = torch.Tensor(y_test).long()
test_ds = td.TensorDataset(test_x,test_y)
test_loader = td.DataLoader(test_ds, batch_size=20, shuffle=False, num_workers=1)
print('Ready to load data')
# -
# ## 신경망 정의하기
#
# 이제 우리는 우리의 신경망을 정의할 준비가 되었다. 이 경우에는 3개의 완전 연결층(fully-connected layers)으로 구성된 네트워크를 구축하겠습니다:
# * 각 형상에 대해 입력 값(이 경우 4개의 펭귄 측정값)을 수신하고 *ReLU* 활성화 함수를 적용하는 입력 층입니다.
# * 10개의 입력을 받고, *ReLU* 활성화 함수를 적용하는 은닉 층입니다.
# * *Softmax* 활성화 함수를 사용하여 각 펭귄 종에 대한 출력을 생성하는 출력 층입니다(이 출력 층은 3가지 펭귄 종 각각에 대하여 분류 확률을 나타냅니다). Softmax 함수는 전체 합계가 1인 확률 값을 가진 벡터를 생성합니다.
# + tags=[]
import torch
import torch.nn as nn
# Define the neural network
class PenguinNet(nn.Module):
def __init__(self):
super(PenguinNet, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 10)
self.fc3 = nn.Linear(10, 3)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.softmax(self.fc3(x),dim=1)
return x
# Create a model instance from the network
model = PenguinNet()
print(model)
# -
# ## 모델 학습하기
#
# 모델을 교육하기 위해서는 반복적으로 네트워크에서 학습을 통해 계산된 값을 공급하고, 손실 함수를 사용하여 손실을 계산하고, Optimizer를 사용하여 가중치와 바이어스 값 조정하고, 보류한 테스트 데이터셋을 사용하여 모델을 검증해야 합니다.
#
# 이를 위해 모델 학습 및 최적화하는 함수와 모델을 테스트하는 함수를 만들 것입니다. 그 다음 각 에포크의 손실 및 정확도 통계를 기록하면서 이러한 함수를 50개 이상의 에포크동안 반복적으로 호출합니다.
# + tags=[]
def train(model, data_loader, optimizer):
# Set the model to training mode
model.train()
train_loss = 0
for batch, tensor in enumerate(data_loader):
data, target = tensor
#feedforward
optimizer.zero_grad()
out = model(data)
loss = loss_criteria(out, target)
train_loss += loss.item()
# backpropagate
loss.backward()
optimizer.step()
#Return average loss
avg_loss = train_loss / (batch+1)
print('Training set: Average loss: {:.6f}'.format(avg_loss))
return avg_loss
def test(model, data_loader):
# Switch the model to evaluation mode (so we don't backpropagate)
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
batch_count = 0
for batch, tensor in enumerate(data_loader):
batch_count += 1
data, target = tensor
# Get the predictions
out = model(data)
# calculate the loss
test_loss += loss_criteria(out, target).item()
# Calculate the accuracy
_, predicted = torch.max(out.data, 1)
correct += torch.sum(target==predicted).item()
# Calculate the average loss and total accuracy for this epoch
avg_loss = test_loss/batch_count
print('Validation set: Average loss: {:.6f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
avg_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
# return average loss for the epoch
return avg_loss
# Specify the loss criteria (CrossEntropyLoss for multi-class classification)
loss_criteria = nn.CrossEntropyLoss()
# Use an "Adam" optimizer to adjust weights
# (see https://pytorch.org/docs/stable/optim.html#algorithms for details of supported algorithms)
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
optimizer.zero_grad()
# We'll track metrics for each epoch in these arrays
epoch_nums = []
training_loss = []
validation_loss = []
# Train over 50 epochs
epochs = 50
for epoch in range(1, epochs + 1):
# print the epoch number
print('Epoch: {}'.format(epoch))
# Feed training data into the model to optimize the weights
train_loss = train(model, train_loader, optimizer)
# Feed the test data into the model to check its performance
test_loss = test(model, test_loader)
# Log the metrics for this epoch
epoch_nums.append(epoch)
training_loss.append(train_loss)
validation_loss.append(test_loss)
# -
# 학습 프로세스가 실행되는 동안 어떤 일이 일어나고 있는지 알아보겠습니다:
#
# 1. 각 *epoch*에서 전체 학습 데이터셋이 네트워크를 통해 전달됩니다. 각 관측치에 대해 4개의 피쳐와 입력 층에 4개의 노드가 있으므로 각 관측치에 대한 피쳐는 4개의 값의 벡터로 해당 계층에 전달됩니다. 하지만 효율성을 위해 피쳐 벡터는 배치로 그룹화되므로 여러 기능 벡터의 매트릭스가 매번 제공됩니다.
# 2. 피쳐 값의 매트릭스는 초기 가중치와 바이어스 값을 사용하여 가중 합계를 수행하는 함수로 처리됩니다. 그 다음 이 함수의 결과는 다음 레이어의 노드에 전달되는 값을 제한하기 위해 입력 층에 대한 활성화 함수에 의해 처리됩니다.
# 3. 가중 합과 활성화 함수는 각 레이어에서 반복됩니다. 함수는 개별 스칼라 값이 아닌 벡터와 매트릭스에서 작동합니다. 다시 말해, 정방향 계산은 본질적으로 중첩된 선형 대수 함수의 연속입니다. 이것이 바로 데이터사이언티스트들이 그래픽 처리 장치(GPU)가 있는 컴퓨터를 선호하는 이유입니다. GPU는 매트릭스와 벡터 계산에 최적화되어 있기 때문입니다.
# 4. 네트워크의 최종 레이어에서 출력 벡터에는 각 클래스에 대한 확률 값이 포함됩니다(이 경우 클래스 0, 1, 2). 이 벡터는 네트워크에서 계산된 값이 실제 값과 얼마나 떨어져 있는지를 결정하기 위해 *손실 함수* 에 의해 처리됩니다. 예를 들어, Gentoo 펭귄(클래스 1) 관측치에 대한 출력이 \[0.3, 0.4, 0.3\]라고 가정합니다. 올바른 예측은 \[0.0, 1.0, 0.0\]이므로 예측 값과 실제 값 사이의 차이는 \[0.3, 0.6, 0.3\]입니다. 이 차이는 각 배치에 대해 집계되고 실행 애그리게이트로 유지되어 해당 에포크의 학습 데이터에 의해 발생한 전체 오류 수준(*손실*)을 계산합니다.
# 5. 각 에포크가 끝날 때마다 검증 데이터가 네트워크를 통과하며, 데이터의 손실과 정확도가 계산됩니다. 이렇게 하면 학습하지 않은 데이터를 사용하여 모델의 성능을 비교하고, 새로운 데이터에 맞게 일반화된 것인지, 아니면 학습 데이터에 *과적합* 된 것인지 판단할 수 있기 때문에 매우 중요합니다.
# 6. 모든 데이터가 네트워크를 통해 전달되면 *학습* 데이터에 대한 손실 함수의 출력이 opimizer로 전달됩니다. 최적화 도구에서 손실 처리 방법에 대한 정확한 세부 정보는 사용 중인 특정 최적화 알고리즘에 따라 다르지만 기본적으로 입력 층에서 손실 함수에 이르는 전체 네트워크를 하나의 큰 중첩(*복합(composite)*) 기능으로 생각할 수 있습니다. optimizer는 네트워크에서 사용된 각 가중치와 바이어스 값과 관련하여 *부분 도함수*를 계산하기 위해 일부 미적분을 적용합니다. 내부 함수와 외부 함수의 미분에서 합성 함수(composite function)의 미분을 결정할 수 있도록 *체인 룰(chain rule)* 이라는 기능으로 인해 중첩 함수에 대해이를 효율적으로 수행 할 수 있습니다. 여러분은 여기서 수학의 세부사항에 대해 걱정할 필요가 없지만, 최종 결과는 편미분값(partial derivatives)이 우리에게 각 가중치와 바이어스 값에 대한 손실 함수의 기울기(또는 *gradient*)에 대해 말해준다는 것입니다. 다시 말해서, 우리는 손실을 줄이기 위해서 가중치와 바이어스 값을 증가시킬지 감소시킬지 결정할 수 있습니다.
# 7. 가중치와 바이어스를 어느 방향으로 조정할 지를 결정한 optimizer는 *learning rate* 를 사용하여 어느 정도까지 조정할 지를 결정하고, *역전파(backpropagation)* 라는 프로세스를 통해 네트워크를 통해 역방향으로 연산하여 각 레이어의 가중치와 바이어스에 새로운 값을 할당한다.
# 8. 이제 다음 에포크는 이전 에포크에서 수정된 가중치와 바이어스로 시작하는 전체 학습, 검증 및 역전파 과정을 반복한다. 이는 희망컨대 더 적은 손실값을 유도할 것이다.
# 9. 이러한 에포크는 50번 반복된다.
#
# ## 학습 및 검증 손실 리뷰
#
# 학습이 완료되면 학습 및 모델 검증 과정에서 기록한 손실 메트릭스를 검토할 수 있습니다. 저희는 두 가지를 정말 찾고 있습니다:
# * 손실은 각 에포크에 따라 감소해야 하며, 모델이 올바른 라벨을 예측하기 위한 올바른 가중치와 편향을 학습하고 있음을 보여준다.
# * 훈련 손실과 검증 손실도 유사한 추세를 따라야 하며, 이는 모델이 훈련 데이터에 과적합되지 않음을 보여준다.\
#
# 손실 메트릭을 플롯팅하고 다음을 살펴보겠습니다:
# +
# %matplotlib inline
from matplotlib import pyplot as plt
plt.plot(epoch_nums, training_loss)
plt.plot(epoch_nums, validation_loss)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training', 'validation'], loc='upper right')
plt.show()
# -
# ## 학습된 가중치와 편향 확인하기
#
# 학습된 모델은 훈련 과정 중 optimizer에 의해 결정된 최종 가중치와 편향으로 구성된다. 네트워크 모델에 따라 각 레이어에 대해 다음 값을 예상해야 합니다.
# * 1번 레이어 : 10개의 출력 노드로 가는 4개의 입력 값이 있으므로 10 x 4 가중치와 10개의 바이어스 값이 있어야 합니다.
# * 2번 레이어 : 10개의 출력 노드로 가는 10개의 입력 값이 있으므로 10 x 10개의 가중치와 10개의 바이어스 값이 있어야 합니다.
# * 3번 레이어 : 3개의 출력 노드로 가는 10개의 입력 값이 있으므로 3 x 10개의 가중치와 3개의 바이어스 값이 있어야 합니다.
# + tags=[]
for param_tensor in model.state_dict():
print(param_tensor, "\n", model.state_dict()[param_tensor].numpy())
# -
# ## 모델 성능 평가하기
#
# 그렇다면, 이 모델은 좋은 것일까요? 검증 데이터에서 보고된 정확도는 상당히 예측이 잘 된 것처럼 보이지만, 일반적으로 가능한 각 클래스의 예측을 좀 더 깊이 파고 비교하는 것이 유용합니다. 분류 모델의 성능을 시각화하는 일반적인 방법은 각 클래스에 대한 올바른 예측과 잘못된 예측의 crosstab을 보여 주는 *혼동 행렬(confusion matrix)* 을 만드는 것입니다.
# +
#Pytorch doesn't have a built-in confusion matrix metric, so we'll use SciKit-Learn
from sklearn.metrics import confusion_matrix
import numpy as np
# Set the model to evaluate mode
model.eval()
# Get predictions for the test data
x = torch.Tensor(x_test).float()
_, predicted = torch.max(model(x).data, 1)
# Plot the confusion matrix
cm = confusion_matrix(y_test, predicted.numpy())
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=45)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()
# -
# 혼동 행렬은 각 클래스에 대해 부정확한 예측보다 더 많은 정확도가 있음을 강한 대각선 색상으로 표시해서 나타내야 한다.
#
# ## 훈련된 모델 저장하기
# 이제 상당히 정확하다고 생각되는 모델이 생겼으므로 나중에 사용할 수 있도록 교육된 가중치를 저장할 수 있습니다.
# Save the model weights
model_file = 'models/penguin_classifier.pt'
torch.save(model.state_dict(), model_file)
del model
print('model saved as', model_file)
# ## 학습된 모델 사용하기
#
# 우리가 새로운 펭귄을 관찰하게 되면, 우리는 학습된 모델을 사용하여 그 펭귄의 종을 예측할 수 있다.
# + tags=[]
# New penguin features
x_new = [[50.4,15.3,20,50]]
print ('New sample: {}'.format(x_new))
# Create a new model class and load weights
model = PenguinNet()
model.load_state_dict(torch.load(model_file))
# Set model to evaluation mode
model.eval()
# Get a prediction for the new data sample
x = torch.Tensor(x_new).float()
_, predicted = torch.max(model(x).data, 1)
print('Prediction:',penguin_classes[predicted.item()])
# -
# ## 더 배우기
#
# 이 노트북은 간단한 PyTorch 예를 사용하여 심층 신경망에 관련된 기본 개념과 원리를 이해하도록 설계되었습니다. PyTorch에 대해 자세히 알아보려면 [tutorials on the PyTorch web site](https://pytorch.org/tutorials/)를 참조하십시오.
| 05a - Deep Neural Networks (PyTorch)_KR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IEU ACM Python'a Giriş - Veri Yapıları ve Objeler
# ## String ve Integer
# +
name = "Alican"
surname = "Akca"
age = 19
print(name , surname, age)
# -
# # f String
# +
name = "Alican"
surname = "Akca"
age = 19
message = f"İsmim {name} {surname} , {str(age)} yaşındayım."
message2 = "İsmim {0} {1} , {2} yaşındayım.".format(name,surname,age)
print(message)
print(message2)
# -
# # % String
# +
name = "Alican"
surname = "Akca"
age = 19
message = "İsmim %s %s , %s yaşındayım." % (name,surname,age)
# +
name = "Alican"
surname = "Akca"
age = 19
# name, surname, age = "alican","akca",19
message = f"İsmim {name} {surname} , {str(age)} yaşındayım."
print(message.lower())
print(message.upper())
print(message.title())
print(message.capitalize())
result = message.split(" ")
#print(result)
#print(type(result))
result = message.replace(".","...")
#print(message)
#print(result)
#print(type(result))
result = message.index(".")
#print(result)
#print(type(result))
# -
# # Boolean
# +
message = f"İsmim {name} {surname} , {str(age)} yaşındayım."
result = message.isalpha() #isdigit
print(type(result))
print(result)
# -
# # Float
# +
fakePi = 22/7
print(fakePi)
print(type(fakePi))
print("The fakePi is {f:1.3}".format(f=fakePi))
# -
# # Complex
# +
number = complex(1,1)
print(number)
print(type(number))
# -
# # Lists
# +
epicMessage = "Hello World!".split()
#print(epicMessage)
epicMessage.append("Merhaba")
epicMessage.append("Dünyaaaa!")
#print(epicMessage)
epicMessage[3] = "Dünya!"
#print(epicMessage)
epicMessage.remove("Hello")
#print(epicMessage)
epicMessage.pop(0)
#print(epicMessage)
#print(len(epicMessage))
epicMessage.clear()
#print(len(epicMessage))
result = "Mars" in epicMessage
#print(result)
epicList = [1,"bir",True,3.14, 1+1j]
#print(type(epicList[2]))
epicList2 = ["50", 40,False, 2.71, 1-1j]
#print(epicList + epicList2)
numbers = [1,3,5,9,40,8,2,7,40]
numbers.sort()
#print(numbers)
numbers.reverse()
#print(numbers)
#print(numbers.count(7)) # 1 var, 0 yok
print(max(numbers))
# -
# # Tuple
#
# +
list = [araba, 3.14, "yüz"]
tuple = (3, 'iki', 2)
print(type(list))
print(type(tuple))
tuple = ('araba','gemi','10') + tuple
print(tuple)
#print(tuple.count('araba'))
#print(tuple.index('10'))
#tuple.append("mehmet")
#tuple.remove("ayşe")
# -
tuple1 = (12,"a")
print(max(tuple1))
# # Dictionary
# +
plaka = {"Izmir" : 35 , "İstanbul" : 34 , "Ankara" : 666, "Antalya" : 7}
print(plaka["Ankara"])
plaka["Ankara"] = 6
#plaka.pop("Ankara")
print(plaka["Ankara"])
# +
name = input("Adınız: ")
surname = input("Soyadınız: ")
postCode = input("Posta Kodunuz: ")
users = {"ad": name,
"soyad" : surname,
"posta kodu" : postCode
}
# -
# # Set ve Frozenset
# +
meyveler = {"armut" , "elma"}
meyveler.add("muz")
meyveler.update(["çilek",'muz'])
meyveler.remove('muz')
print(meyveler)
meyveler = frozenset(meyveler)
#meyveler.add('nar')
#meyveler.remove('elma')
# -
# # Bytes
# +
epicByte = 0b11111111
print(epicByte)
print(f"{epicByte:#b} , {epicByte} 'ın karşılığıdır.'")
# -
| variablesAndObjects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MiniFlow implementation for Self-driving Car Nanodegree (Udacity)
# First of all we will proceed to implement a graph structure in MiniFlow. For this purpose we will create a Python class in order to represent a generic node.
# ## Node class
#
# The node class will have the follwing properties:
# * each node might receive input from multiple other nodes *self.inbound_nodes)*
# * each node creates a single output which will likely be passed to other nodes *(self.outbound_nodes)*
# * each node will eventually calculate a value that represents its output *(value)*
#
# For the implementation we will use two lists: one to store references to the inbound nodes, and the other to store references to the outbound nodes. An inbound node is a node from which the current node receives values. Meanwhile, outbound nodes are the nodes to which the current node passes values.
class Node(object):
def __init__(self, inbound_nodes=[]):
#Node properties
self.inbound_nodes = inbound_nodes
self.outbound_nodes = []
self.value = None
#For each inbound node, we will add the current node (self) as an outbound node of a given inbound node.
for innode in self.inbound_nodes:
innode.outbound_nodes.append(self)
# Another property is that each node will need to be able to pass values forward and also perform backpropagation. We will first implement forward prop !
def forward(self):
"""
Forward propagation which will be implemented in a subclass.
Compute the output value based on the 'inbound_nodes' and store the result
in self.value
"""
raise NotImplemented
# ## Input subclass of Node
# We will build subclasses of Node in order to perform calculations and hold values. For example, the **Input** subclass of Node. An input node has no inbound nodes, so there is no need to pass anything to the Node instantiator.
#
# **NOTE** *Input node is the only node where the value may be passed as an argument to forward(). All other node implementations should get the value of the previous node from self.inbound_nodes. Example: val0 = self.inbound_nodes[0].value*
#
# ***The Input subclass does not actually calculate anything. The input subclass just holds a value, such as a data feature or a model parameter (weight/bias)***
#
# You can set value either explicitly or with the forward() method. This value is then fed through the rest of the neural network.
class Input(Node):
def __init__(self):
Node.__init__(self)
def forward(self, value=None):
# Overwrite the value if one is passed in
if value is not None:
self.value = value
# ## Add subclass of Node
# Will perform a calculation (addition). Unlike the **Input** class, which has no inbound nodes, the **Add** class takes 2 inbound nodes, *x* and *y*, and adds the values of those nodes.
class Add(Node):
def __init__(self, x, y):
#We will access 'x' and 'y' in forward method with self.inbound_nodes[0]('x') and
#self.inbound_nodes[1]('y')
Node.__init__(self, [x, y])
def forward(self):
"""
We will set self.value to the sum of its inbound_nodes
"""
self.value = self.inbound_nodes[0].value + self.inbound_nodes[1].value
# ## Topological sorting
# For defining the network we will need to order the operations for the nodes. Given that the input to some node depends on the outputs of others, we will need to flatten the graph in such a way where all the input dependencies for each node are resolved before trying to run its calculation. This is a technique called a **topological sort**.
#
# For this purpose, we will implement **Kahn's Algorithm**. The method *topological_sort()* will return a sorted list of nodes in which all of the calculations can run in series. The input is a feed_dict represented by the Python dictionary data structure.
# ### Implementation of the Kahn's Algorithm
def topological_sort(feed_dict):
'''
'feed_dict': A dictionary where the key is a 'Input' node and the value is the
respective value feed to that node.
Returns a list of sorted nodes.
'''
input_nodes = [n for n in feed_dict.keys()]
G = {}
nodes = [n for n in input_nodes]
while len(nodes) > 0:
n = nodes.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_nodes:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
nodes.append(m)
L = []
S = set(input_nodes)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_nodes:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
# Implementation for performing a forward pass through a list of sorted nodes
def forward_pass(output_node, sorted_nodes):
"""
Arguments:
'output_node': A node in the graph, should be the output node (have no outgoing edges).
'sorted_nodes': A topologically sorted list of nodes.
Returns the output Node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value
# ## Script for building and run a graph with miniflow
# +
x, y = Input(), Input()
f = Add(x, y)
feed_dict = {x: 10, y: 5}
sorted_nodes = topological_sort(feed_dict)
output = forward_pass(f, sorted_nodes)
print("{} + {} = {} (according to miniflow)".format(feed_dict[x], feed_dict[y], output))
# -
| MiniFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# ==================================
# Fonts demo (object-oriented style)
# ==================================
#
# Set font properties using setters.
#
# See :doc:`fonts_demo_kw` to achieve the same effect using kwargs.
#
# +
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
font0 = FontProperties()
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = plt.figtext(0.1, 0.9, 'family', fontproperties=font1, **alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
font = font0.copy()
font.set_family(family)
t = plt.figtext(0.1, yp[k], family, fontproperties=font, **alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.figtext(0.3, 0.9, 'style', fontproperties=font1, **alignment)
for k, style in enumerate(styles):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style)
t = plt.figtext(0.3, yp[k], style, fontproperties=font, **alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.figtext(0.5, 0.9, 'variant', fontproperties=font1, **alignment)
for k, variant in enumerate(variants):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant)
t = plt.figtext(0.5, yp[k], variant, fontproperties=font, **alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.figtext(0.7, 0.9, 'weight', fontproperties=font1, **alignment)
for k, weight in enumerate(weights):
font = font0.copy()
font.set_weight(weight)
t = plt.figtext(0.7, yp[k], weight, fontproperties=font, **alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.figtext(0.9, 0.9, 'size', fontproperties=font1, **alignment)
for k, size in enumerate(sizes):
font = font0.copy()
font.set_size(size)
t = plt.figtext(0.9, yp[k], size, fontproperties=font, **alignment)
# Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = plt.figtext(0.3, 0.1, 'bold italic', fontproperties=font, **alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = plt.figtext(0.3, 0.2, 'bold italic', fontproperties=font, **alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = plt.figtext(-0.4, 0.3, 'bold italic', fontproperties=font, **alignment)
plt.show()
| matplotlib/gallery_jupyter/text_labels_and_annotations/fonts_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solving some system of equations
# + outputHidden=false inputHidden=false
import numpy as np
# + outputHidden=false inputHidden=false
# Solving following system of linear equation
# 3x + 2y = 7
# 2x + 3y = 9
x = np.array([[3, 2],[2,3]])
y = np.array([7, 8])
print(np.linalg.solve(x,y))
# + outputHidden=false inputHidden=false
# Solving following system of linear equation
# 9x - 17y = -20
# -13x + 7y = -94
x = np.array([[9, -17],[-13,7]])
y = np.array([-20, -94])
print(np.linalg.solve(x,y))
# + outputHidden=false inputHidden=false
# 5x - 2y = -13
# 4x + 5y = -6
x = np.array([[5, -2],[4,5]])
y = np.array([-13, -6])
print(np.linalg.solve(x,y))
# + outputHidden=false inputHidden=false
# 5x + 7y = 11
# 20x - 18y = 39
x = np.array([[5, 7],[20,-18]])
y = np.array([11, 39])
print(np.linalg.solve(x,y))
# + outputHidden=false inputHidden=false
# 3x - 2y + z = 7
# x + y + z = 2
# 3x - 2y - z = 3
x = np.array([[3, -2, 1],[1, 1, 1],[3, -2, -1]])
y = np.array([7, 2, 3])
print(np.linalg.solve(x,y))
# + outputHidden=false inputHidden=false
# 5x - 2y = -13
# 4x + 5y = -6
from sympy import *
x, y = symbols(['x', 'y'])
system = [Eq(5*x - 2*y, -7),
Eq(4*x + 5*y, -8)]
solutions = solve(system, [x, y])
print(solutions)
# + outputHidden=false inputHidden=false
# 3x - 2y + z = 7
# x + y + z = 2
# 3x - 2y - z = 3
from sympy import *
x, y, z = symbols(['x', 'y', 'z'])
system = [Eq(3*x - 2*y + z, 7),
Eq(x + y + z, 2),
Eq(3*x - 2*y - z, 3)]
solutions = solve(system, [x, y, z])
print(solutions)
# + outputHidden=false inputHidden=false
x = np.array([[3, -2, 1],[1, 1, 1],[3, -2, -1]])
y = np.array([7, 2, 3])
# linalg.solve is the function of NumPy to solve a system of linear scalar equations
print("Solutions:\n", np.linalg.solve(x, y))
# -
# ## LU decomposition with SciPy
# + outputHidden=false inputHidden=false
# LU decomposition with SciPy
import scipy.linalg as linalg # Package for LU decomposition
x = np.array([[3, -2, 1],[1, 1, 1],[3, -2, -1]])
y = np.array([7, 2, 3])
LU = linalg.lu_factor(x)
x = linalg.lu_solve(LU, y)
print("Solutions:\n",x)
# + outputHidden=false inputHidden=false
import scipy
x = scipy.array([[3, -2, 1],[1, 1, 1],[3, -2, -1]])
P, L, U = scipy.linalg.lu(x)
# + outputHidden=false inputHidden=false
print("x:\n", x)
print("-"*50)
print("P:\n", P)
print("-"*50)
print("L:\n", L)
print("-"*50)
print("U:\n", U)
# -
# ## Euclidean
# + outputHidden=false inputHidden=false
from scipy.spatial import distance
a = (1, 2, 3)
b = (4, 5, 6)
euc_dist = distance.euclidean(a, b)
print("Euclidean Distance:", euc_dist)
# -
# ## Hadamard Product
# +
a = np.array([[1,2],[3,4]])
b = np.array([[5,6],[7,8]])
hp = np.multiply(a,b)
print("Hadamard Product:\n", hp)
# + outputHidden=false inputHidden=false
# Another method
a * b
# + outputHidden=false inputHidden=false
# Another method
np.multiply(a,b)
# -
# ## Dot Product
# + outputHidden=false inputHidden=false
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
dp = x @ y
print('Dot Product:\n', dp)
# + outputHidden=false inputHidden=false
# Another Method
np.dot(x,y)
# -
# ## Dot product of vectors
#
# Finding the product of the summation of two vectors and the output will be a single vector.
# + outputHidden=false inputHidden=false
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
dotproduct = sum(i*j for i,j in zip(x,y))
print('Dot product is : ' , dotproduct)
# + outputHidden=false inputHidden=false
x = [1,2,3,4]
y = [5,6,7,8]
dotproduct = sum(i*j for i,j in zip(x,y))
print('Dot product is : ' , dotproduct)
# -
# ## Inner Product
# + outputHidden=false inputHidden=false
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
ip = np.inner(x,y)
print('Inner Product:\n', ip)
| Mathematics_for_Machine_Learning_Linear_Algebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 8</font>
#
# ## Download: http://github.com/dsacademybr
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# ### NumPy
# Para importar numpy, utilize:
# import numpy as np
#
# Você também pode utilizar:
# from numpy import * . Isso evitará a utilização de np., mas este comando importará todos os módulos do NumPy.
# Para atualizar o NumPy, abra o prompt de comando e digite: pip install numpy -U
# Importando o NumPy
import numpy as np
np.__version__
# ## Criando Arrays
# Help
help(np.array)
# Array criado a partir de uma lista:
vetor1 = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
print(vetor1)
# Um objeto do tipo ndarray é um recipiente multidimensional de itens do mesmo tipo e tamanho.
type(vetor1)
# Usando métodos do array NumPy
vetor1.cumsum()
# Criando uma lista. Perceba como listas e arrays são objetos diferentes, com diferentes propriedades
lst = [0, 1, 2, 3, 4, 5, 6, 7, 8]
lst
type(lst)
# Imprimindo na tela um elemento específico no array
vetor1[0]
# Alterando um elemento do array
vetor1[0] = 100
print(vetor1)
# Não é possível incluir elemento de outro tipo
vetor1[0] = 'Novo elemento'
# Verificando o formato do array
print(vetor1.shape)
# ## Funções NumPy
# A função arange cria um vetor contendo uma progressão aritmética a partir de um intervalo - start, stop, step
vetor2 = np.arange(0., 4.5, .5)
print(vetor2)
# Verificando o tipo do objeto
type(vetor2)
# Formato do array
np.shape(vetor2)
print (vetor2.dtype)
x = np.arange(1, 10, 0.25)
print(x)
print(np.zeros(10))
# Retorna 1 nas posições em diagonal e 0 no restante
z = np.eye(3)
z
# Os valores passados como parâmetro, formam uma diagonal
d = np.diag(np.array([1, 2, 3, 4]))
d
# Array de números complexos
c = np.array([1+2j, 3+4j, 5+6*1j])
c
# Array de valores booleanos
b = np.array([True, False, False, True])
b
# Array de strings
s = np.array(['Python', 'R', 'Julia'])
s
# O método linspace (linearly spaced vector) retorna um número de
# valores igualmente distribuídos no intervalo especificado
np.linspace(0, 10)
print(np.linspace(0, 10, 15))
print(np.logspace(0, 5, 10))
# ## Criando Matrizes
# Criando uma matriz
matriz = np.array([[1,2,3],[4,5,6]])
print(matriz)
print(matriz.shape)
# Criando uma matriz 2x3 apenas com números "1"
matriz1 = np.ones((2,3))
print(matriz1)
# Criando uma matriz a partir de uma lista de listas
lista = [[13,81,22], [0, 34, 59], [21, 48, 94]]
# A função matrix cria uma matria a partir de uma sequência
matriz2 = np.matrix(lista)
matriz2
type(matriz2)
# Formato da matriz
np.shape(matriz2)
matriz2.size
print(matriz2.dtype)
matriz2.itemsize
matriz2.nbytes
print(matriz2[2,1])
# Alterando um elemento da matriz
matriz2[1,0] = 100
matriz2
# +
x = np.array([1, 2]) # NumPy decide o tipo dos dados
y = np.array([1.0, 2.0]) # NumPy decide o tipo dos dados
z = np.array([1, 2], dtype=np.float64) # Forçamos um tipo de dado em particular
print (x.dtype, y.dtype, z.dtype)
# -
matriz3 = np.array([[24, 76], [35, 89]], dtype=float)
matriz3
matriz3.itemsize
matriz3.nbytes
matriz3.ndim
matriz3[1,1]
matriz3[1,1] = 100
matriz3
# ## Usando o Método random() do NumPy
print(np.random.rand(10))
import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib as mat
mat.__version__
print(np.random.rand(10))
plt.show((plt.hist(np.random.rand(1000))))
print(np.random.randn(5,5))
plt.show(plt.hist(np.random.randn(1000)))
imagem = np.random.rand(30, 30)
plt.imshow(imagem, cmap = plt.cm.hot)
plt.colorbar()
# ## Operações com datasets
import os
filename = os.path.join('iris.csv')
# No Windows use !more iris.csv. Mac ou Linux use !head iris.csv
# !head iris.csv
# #!more iris.csv
# Carregando um dataset para dentro de um array
arquivo = np.loadtxt(filename, delimiter=',', usecols=(0,1,2,3), skiprows=1)
print (arquivo)
type(arquivo)
# Gerando um plot a partir de um arquivo usando o NumPy
var1, var2 = np.loadtxt(filename, delimiter=',', usecols=(0,1), skiprows=1, unpack=True)
plt.show(plt.plot(var1, var2, 'o', markersize=8, alpha=0.75))
# ## Estatística
# Criando um array
A = np.array([15, 23, 63, 94, 75])
# Em estatística a média é o valor que aponta para onde mais se concentram os dados de uma distribuição.
np.mean(A)
# O desvio padrão mostra o quanto de variação ou "dispersão" existe em
# relação à média (ou valor esperado).
# Um baixo desvio padrão indica que os dados tendem a estar próximos da média.
# Um desvio padrão alto indica que os dados estão espalhados por uma gama de valores.
np.std(A)
# Variância de uma variável aleatória é uma medida da sua dispersão
# estatística, indicando "o quão longe" em geral os seus valores se
# encontram do valor esperado
np.var(A)
d = np.arange(1, 10)
d
np.sum(d)
# Retorna o produto dos elementos
np.prod(d)
# Soma acumulada dos elementos
np.cumsum(d)
a = np.random.randn(400,2)
m = a.mean(0)
print (m, m.shape)
plt.plot(a[:,0], a[:,1], 'o', markersize=5, alpha=0.50)
plt.plot(m[0], m[1], 'ro', markersize=10)
plt.show()
# ## Outras Operações com Arrays
# Slicing
a = np.diag(np.arange(3))
a
a[1, 1]
a[1]
b = np.arange(10)
b
# [start:end:step]
b[2:9:3]
# Comparação
a = np.array([1, 2, 3, 4])
b = np.array([4, 2, 2, 4])
a == b
np.array_equal(a, b)
a.min()
a.max()
# Somando um elemento ao array
np.array([1, 2, 3]) + 1.5
# Usando o método around
a = np.array([1.2, 1.5, 1.6, 2.5, 3.5, 4.5])
b = np.around(a)
b
# Criando um array
B = np.array([1, 2, 3, 4])
B
# Copiando um array
C = B.flatten()
C
# Criando um array
v = np.array([1, 2, 3])
# Adcionando uma dimensão ao array
v[:, np.newaxis], v[:,np.newaxis].shape, v[np.newaxis,:].shape
# Repetindo os elementos de um array
np.repeat(v, 3)
# Repetindo os elementos de um array
np.tile(v, 3)
# Criando um array
w = np.array([5, 6])
# Concatenando
np.concatenate((v, w), axis=0)
# Copiando arrays
r = np.copy(v)
r
# Conheça a Formação Cientista de Dados, um programa completo, 100% online e 100% em português, com mais de 400 horas de carga horária, mais de 1.200 aulas em vídeos e dezenas de projetos, que vão ajudá-lo a se tornar um dos profissionais mais cobiçados do mercado de análise de dados. Clique no link abaixo, faça sua inscrição, comece hoje mesmo e aumente sua empregabilidade:
#
# https://www.datascienceacademy.com.br/bundle/formacao-cientista-de-dados
# # Fim
#
# ### Obrigado
#
# ### Visite o Blog da Data Science Academy - <a href="http://blog.dsacademy.com.br">Blog DSA</a>
#
| Data Science Academy/PythonFundamentos/Cap08/Notebooks/DSA-Python-Cap08-01-NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>, March 26th, vg-stats
# ## Video Game Sales Data Analysis
import pandas as pd
import numpy as np
from datetime import date
# +
# answer the following questions/do the following tasks. Note that the numbers quoted for sales are in the millions
# and apply only for those games with over 100,000 sales.
# -
df = pd.read_csv('./vgsales.csv')
df.head()
# 1. Which company is the most common video game publisher?
pd.DataFrame(df['Publisher']).groupby(df.Publisher).count().sort_values('Publisher')
# Electronic Arts
# +
# 2. What’s the most common platform?
pd.DataFrame(df['Platform']).groupby(df.Platform).count().sort_values('Platform')
# DS
# -
# 3. What about the most common genre?
pd.DataFrame(df['Genre']).groupby(df.Genre).count().sort_values('Genre')
# Action
# 4. What are the top 20 highest grossing games?
pd.DataFrame(df[['Name', 'Global_Sales']]).head(20)
# +
# 5. For North American video game sales, what’s the median?
# Provide a secondary output showing 'about' ten games surrounding the median sales output
# NA_Sales
pd.DataFrame(df[['Name', 'NA_Sales']])
# 16597 / 2 = 8,298.5 round up = 8,299 is the median
# -
# 5.
df[(df['NA_Sales'] < 0.085) & (df['NA_Sales'] > 0.075)]
# +
# 6. For the top-selling game of all time, how many standard deviations
# above/below the mean are its sales for North America?
# find mean average
# find top selling game of all time
# how many s.d. above mean are its sales for N.A.?
# top selling
pd.DataFrame(df[['Name', 'Global_Sales']]).head(1)
# Wii Sports Global_Sales = 82.74
# find N.A. mean
pd.DataFrame(df[['Name', 'NA_Sales']]).mean()
# NA_Sales mean = 0.264667
# find Wii Sports NA sales
pd.DataFrame(df[['Name', 'NA_Sales']])
# 41.49
# find S.D.
Na_Sales_Mean = 0.2646667
pd.DataFrame(df[((df['NA_Sales'] >= Na_Sales_Mean))]).NA_Sales.std()
# 1.51903
# +
# 7. The Nintendo Wii seems to have outdone itself with games.
# How does its average number of sales compare with all of the other platforms?
pd.DataFrame(df[['Platform', 'Global_Sales']]).head(1)
# Wii 82.74
# compare to all other platforms
pd.DataFrame(df[['Platform', 'Global_Sales']]).groupby(df.Platform).count().sort_values('Platform', ascending = False)
# Wii is 4th most
# +
# 8. Come up with 3 more questions that can be answered with this data set.
# What are the top 10 highest grossing games in Europe?
pd.DataFrame(df[['Name', 'EU_Sales']]).head(10)
# -
# What are the top 5 highest grossing genres in other sales?
pd.DataFrame(df[['Genre', 'Other_Sales']]).groupby(df.Genre).count().sort_values('Other_Sales', ascending = False).head()
# What are the top 10 years with the highest global sales?
pd.DataFrame(df[['Year', 'Global_Sales']]).head(10)
| vg_stats/vg_stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
import pickle
path = ("./resources/Team_df.csv")
com_data = pd.read_csv(path)
print(com_data.shape)
com_data.head()
# +
# get index of every team's data
team_index = com_data['Team']
# Remove Opponent, Score, Result
rf_model_data = com_data[['Team', 'Opp', 'TmScore', 'O_1stD', 'O_Tot_yd', 'O_P_Yd', 'O_R_Yd', 'O_TO',
'D_1stD', 'D_Tot_Yd', 'D_P_Yd', 'D_R_Yd', 'D_TO', 'Home','Prediction_LR','Prediction_ADA']]
# change to season stats
season_stats = ['O_1stD', 'O_Tot_yd', 'O_P_Yd', 'O_R_Yd', 'O_TO',
'D_1stD', 'D_Tot_Yd', 'D_P_Yd', 'D_R_Yd', 'D_TO']
rf_model_data[season_stats] = rf_model_data[season_stats] * 16
#get indexs of every teams
team_index = com_data['Team']
rf_model_data = pd.get_dummies(rf_model_data)
rf_model_data.head()
# -
# ## MODELING
# +
# Random forest
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
# Data processing
X = rf_model_data.drop('TmScore', axis=1).values
y = rf_model_data['TmScore'].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.10)
RF_model = RandomForestRegressor(n_estimators= 2000, random_state= 20)
RF_model.fit(X_train, y_train)
y_pred = RF_model.predict(X_test)
print(mean_absolute_error(y_test, y_pred))
# -
Pkl_RF_model = 'RF_model.sav'
pickle.dump(RF_model, open(Pkl_RF_model, 'wb'))
# +
pd.options.display.max_rows = None
importance = RF_model.feature_importances_
feature_names = rf_model_data.drop('TmScore', axis=1).columns
d = {'Var_Name': feature_names, 'Imp': importance}
df = pd.DataFrame(data=d)
df = df.sort_values(by= ['Imp'], ascending = False).reset_index(drop=True)
df.head(15)
# function that return the score of two teams playing
pd.options.display.max_columns = None
# Create playoff test dataset from season averages
def Score_Predictor(home_team, away_team):
team1 = home_team
team2 = away_team
team1_data = rf_model_data[com_data['Team'] == team1].drop('TmScore', axis=1).reset_index(drop=True)
team2_data = rf_model_data[com_data['Team'] == team2].drop('TmScore', axis=1).reset_index(drop=True)
week_slice = slice(1,16)
#1 Remove if no team names
team1_test = pd.DataFrame(team1_data[week_slice].mean(axis=0)).T #select week to use as average
team1_test
opp_columns = team1_test.filter(like='Opp').columns
team1_test[opp_columns] = 0
team1_test['Opp_' + team2] = 1
team1_test['Home'] = 1
#2
team2_test = pd.DataFrame(team2_data[week_slice].mean(axis=0)).T #select week to use as average
opp_columns = team2_test.filter(like='Opp').columns
team2_test[opp_columns] = 0
team2_test['Opp_' + team1] = 1
team2_test['Home'] = 0 #remove home field advantage
# head to head matchup
team1_test[['D_1stD','D_Tot_Yd','D_P_Yd','D_R_Yd','D_TO']] = team2_test[['O_1stD','O_Tot_yd','O_P_Yd','O_R_Yd','O_TO']]
team2_test[['D_1stD','D_Tot_Yd','D_P_Yd','D_R_Yd','D_TO']] = team1_test[['O_1stD','O_Tot_yd','O_P_Yd','O_R_Yd','O_TO']]
X_Playoff_test = pd.concat([team1_test, team2_test])
X_Playoff_test.fillna(0, inplace = True) # added to address the NANs that was causing the error
scores = RF_model.predict(X_Playoff_test)
print(team1, "will score", round(scores[0], 1))
print(team2, "will score", round(scores[1], 1))
if scores[0] > scores[1]:
winner = team1
else:
winner = team2
print(winner, "are the WINNERS!!!")
return scores, winner
# -
# # Potentially add for loop to insert the winner to next round (The names have to be in full names in score predicor) and add the seeding
# ### NFC Playoff Round 1
scores, winner = Score_Predictor('Philadelphia Eagles', 'Tampa Bay Buccaneers')
scores, winner = Score_Predictor('San Francisco 49ers', 'Dallas Cowboys')
scores, winner = Score_Predictor('Arizona Cardinals', 'Los Angeles Rams')
# ### AFC Playoff Round 1
scores, winner = Score_Predictor('Pittsburgh Steelers', 'Kansas City Chiefs')
scores, winner = Score_Predictor('Las Vegas Raiders', 'Cincinnati Bengals')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
# ### NFC Playoff Round 2
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
# ### NFC Playoff Round 2
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
# ### NFC Championship
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
# ### AFC Championship
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
# ### Suberbowl
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
| RandomForest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Create a Dataset from a JSON file
#
# This example illustrates how to create a Dataset from a JSON file using pandas
# +
import pandas
import requests
from pandas.io.json import json_normalize
remote_file_path = 'http://opendata.paris.fr/api/records/1.0/search/?dataset=arbresremarquablesparis&rows=200'
data_json = requests.get(remote_file_path).json()['records']
df = pandas.DataFrame(json_normalize(data_json))
df.head()
# -
# Setup the geometry data
df[['lon','lat']] = pandas.DataFrame(df['geometry.coordinates'].values.tolist(), index=df.index)
# +
from cartoframes.data import Dataset
ds = Dataset(df)
# +
from cartoframes.viz import Map, Layer
Map(Layer(ds))
| examples/downloading_and_uploading_data/create_dataset_from_json.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CLI Record Linkage CSV Generation
# ## Boilerplate
# %load_ext autoreload
# %autoreload 2
from importlib import reload
import logging
reload(logging)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.INFO, datefmt='%H:%M:%S')
# +
import sys
sys.path.insert(0, '../..')
# -
random_seed = 42
# ## Load Dataset
# +
import urllib
import tempfile
dataset_url = 'https://dbs.uni-leipzig.de/file/Amazon-GoogleProducts.zip'
tf = tempfile.NamedTemporaryFile(mode='r', delete=False)
tf.close()
urllib.request.urlretrieve(dataset_url, tf.name);
# +
import zipfile
import os
td = tempfile.TemporaryDirectory()
with zipfile.ZipFile(tf.name, "r") as zf:
zf.extractall(td.name)
os.listdir(td.name)
# +
import csv
from entity_embed.data_utils.utils import Enumerator
id_enumerator = Enumerator()
record_dict = {}
source_field = '__source'
left_source = 'amazon'
right_source = 'google'
with open(f'{td.name}/Amazon.csv', newline='', encoding="latin1") as f:
for record in csv.DictReader(f):
record['id'] = id_enumerator[record["id"]]
record[source_field] = left_source
del record['description'] # drop description, for benchmarking
record_dict[record['id']] = record
with open(f'{td.name}/GoogleProducts.csv', newline='', encoding="latin1") as f:
for record in csv.DictReader(f):
record['id'] = id_enumerator[record["id"]]
record['title'] = record.pop('name') # in Google, title is called name
record[source_field] = right_source
del record['description'] # drop description, for benchmarking
record_dict[record['id']] = record
# +
pos_pair_set = set()
with open(f'{td.name}/Amzon_GoogleProducts_perfectMapping.csv', newline='') as f:
for record in csv.DictReader(f):
id_left = id_enumerator[record['idAmazon']]
id_right = id_enumerator[record['idGoogleBase']]
pos_pair_set.add((id_left, id_right))
len(pos_pair_set)
# +
from entity_embed.data_utils import utils
cluster_mapping, cluster_dict = utils.id_pairs_to_cluster_mapping_and_dict(pos_pair_set, record_dict)
len(cluster_dict)
# +
cluster_field = 'cluster'
utils.assign_clusters(record_dict=record_dict, cluster_field=cluster_field, cluster_mapping=cluster_mapping)
# +
from entity_embed.data_utils import utils
train_record_dict, valid_record_dict, test_record_dict = utils.split_record_dict_on_clusters(
record_dict=record_dict,
cluster_field=cluster_field,
train_proportion=0.5,
valid_proportion=0.2,
random_seed=random_seed)
# +
from entity_embed.data_utils import utils
test_record_dict, unlabeled_record_dict, rest_dict = utils.split_record_dict_on_clusters(
record_dict=test_record_dict,
cluster_field=cluster_field,
train_proportion=0.5,
valid_proportion=0.5,
random_seed=random_seed)
unlabeled_record_dict.update(rest_dict)
# -
del record_dict
# +
import os
td.cleanup()
os.remove(tf.name)
# -
# ## Preprocess
all_record_dicts = [
train_record_dict,
valid_record_dict,
test_record_dict,
unlabeled_record_dict
]
field_list = ['title', 'manufacturer', 'price']
# +
import unidecode
def clean_str(s):
return unidecode.unidecode(s).lower().strip()
for record_dict_ in all_record_dicts:
for record in record_dict_.values():
for field in field_list:
record[field] = clean_str(record[field])
# -
# ## CSV Generation
# +
import random
rnd = random.Random(random_seed)
fieldnames = ['id', *field_list, '__source']
def write_csv(filepath, record_dict_, fieldnames, include_labels):
if include_labels:
fieldnames = [cluster_field] + fieldnames
with open(filepath, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for record in record_dict_.values():
writer.writerow({k: v for k, v in record.items() if k in fieldnames})
write_csv('../../example-data/rl-train.csv', train_record_dict, fieldnames, include_labels=True)
write_csv('../../example-data/rl-valid.csv', valid_record_dict, fieldnames, include_labels=True)
write_csv('../../example-data/rl-test.csv', test_record_dict, fieldnames, include_labels=True)
write_csv('../../example-data/rl-unlabeled.csv', unlabeled_record_dict, fieldnames, include_labels=False)
# -
# ## JSON Generation
unlabeled_pos_pairs = utils.cluster_dict_to_id_pairs(
cluster_dict=utils.record_dict_to_cluster_dict(unlabeled_record_dict, cluster_field),
left_id_set={record['id'] for record in unlabeled_record_dict.values() if record[source_field] == left_source},
right_id_set={record['id'] for record in unlabeled_record_dict.values() if record[source_field] == right_source}
)
len(unlabeled_pos_pairs)
# +
import json
with open('../../example-data/rl-unlabeled-pos-pairs.json', 'w', encoding='utf-8') as f:
json.dump(list(unlabeled_pos_pairs), f, indent=4)
# +
from entity_embed.data_utils.field_config_parser import DEFAULT_ALPHABET
alphabet = DEFAULT_ALPHABET
field_config_dict = {
'title': {
'field_type': "MULTITOKEN",
'tokenizer': "entity_embed.default_tokenizer",
'alphabet': alphabet,
'max_str_len': None, # compute
},
'title_semantic': {
'key': 'title',
'field_type': "SEMANTIC_MULTITOKEN",
'tokenizer': "entity_embed.default_tokenizer",
'vocab': "fasttext.en.300d",
},
'manufacturer': {
'field_type': "MULTITOKEN",
'tokenizer': "entity_embed.default_tokenizer",
'alphabet': alphabet,
'max_str_len': None, # compute
},
'price': {
'field_type': "STRING",
'tokenizer': "entity_embed.default_tokenizer",
'alphabet': alphabet,
'max_str_len': None, # compute
}
}
# -
with open('../../example-data/rl-field-config.json', 'w', encoding='utf-8') as f:
json.dump(field_config_dict, f, indent=4)
# ```bash
# entity_embed_train \
# --field_config_json example-data/rl-field-config.json \
# --train_csv example-data/rl-train.csv \
# --valid_csv example-data/rl-valid.csv \
# --test_csv example-data/rl-test.csv \
# --unlabeled_csv example-data/rl-unlabeled.csv \
# --csv_encoding utf-8 \
# --cluster_field cluster \
# --source_field __source \
# --left_source amazon \
# --embedding_size 300 \
# --lr 0.001 \
# --min_epochs 5 \
# --max_epochs 100 \
# --early_stop_monitor valid_recall_at_0.3 \
# --early_stop_min_delta 0 \
# --early_stop_patience 20 \
# --early_stop_mode max \
# --tb_save_dir tb_logs \
# --tb_name rl-example \
# --check_val_every_n_epoch 1 \
# --batch_size 32 \
# --eval_batch_size 64 \
# --num_workers -1 \
# --multiprocessing_context fork \
# --sim_threshold 0.3 \
# --sim_threshold 0.5 \
# --sim_threshold 0.7 \
# --ann_k 100 \
# --m 64 \
# --max_m0 64 \
# --ef_construction 150 \
# --ef_search -1 \
# --random_seed 42 \
# --model_save_dir trained-models/rl/ \
# --use_gpu 1
# ```
# ```bash
# entity_embed_predict \
# --model_save_filepath "trained-models/rl/...fill-here..." \
# --unlabeled_csv example-data/rl-unlabeled.csv \
# --csv_encoding utf-8 \
# --source_field __source \
# --left_source amazon \
# --eval_batch_size 50 \
# --num_workers -1 \
# --multiprocessing_context fork \
# --sim_threshold 0.3 \
# --ann_k 100 \
# --m 64 \
# --max_m0 64 \
# --ef_construction 150 \
# --ef_search -1 \
# --random_seed 42 \
# --output_json example-data/rl-prediction.json \
# --use_gpu 1
# ```
# +
from entity_embed.evaluation import evaluate_output_json
precision, recall, f1, pe_ratio = evaluate_output_json(
unlabeled_csv_filepath='../../example-data/rl-unlabeled.csv',
output_json_filepath='../../example-data/rl-prediction.json',
pos_pair_json_filepath='../../example-data/rl-unlabeled-pos-pairs.json'
)
print("precision", precision)
print("recall", recall)
print("f1", f1)
print("pe_ratio", pe_ratio)
| notebooks/cli-example-generation/CLI-Record-Linkage-CSV-Generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ItsT33dak5Ou" colab_type="text"
# # Learning disentangled representations
#
# x --> encoder --> z [-1, 1] --> sample --> z' {-1, 1} --> dropout --> z'' {-1, 0, 1} --> decoder --> x_bar
#
# z' ~ Binomial(p = |z| * 0.5 + 0.5).sample() * 2 - 1
#
# The idea is that z either recognizes excitatorily (z == 1) or inhibitorily (z == -1) or z is ambiguous/ignorable/not-applicable (z near 0).
# + [markdown] id="nAz-3TRznYB8" colab_type="text"
#
# + id="L7OsISnxX4nA" colab_type="code" outputId="09696e08-e4f2-4928-98b6-7e0383c629d4" executionInfo={"status": "ok", "timestamp": 1562209334781, "user_tz": 420, "elapsed": 13201, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 165}
# http://pytorch.org/
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
# cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
# #!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision
# !pip install torch
# !pip install tqdm
# !pip install dotmap
from dotmap import DotMap
import logging
import numpy as np
import os
import random
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from tqdm import tqdm, tqdm_notebook
from scipy.stats import norm
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s.%(msecs)03d %(name)s:%(funcName)s %(levelname)s:%(message)s',
datefmt="%M:%S")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from skimage.draw import line_aa
from matplotlib.colors import LinearSegmentedColormap
# %matplotlib inline
plt.style.use('classic')
plt_cm_RdYlGn_colors=['#FF8080', '#000000', '#80FF80']
plt_cm_RdYlGn_=LinearSegmentedColormap.from_list('plt_cm_RdYlGn_', plt_cm_RdYlGn_colors)
# from google.colab import drive
# drive.mount('/content/gdrive')
# save_path_prefix = '/content/gdrive/My Drive/Colab Notebooks/saved/QDL_01'
# + id="IJbCTh--ZDof" colab_type="code" colab={}
def load_mnist(train=True, batch_size=64):
kwargs = {'num_workers': 1, 'pin_memory': True} if device=="cuda" else {}
loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=train, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
])),
batch_size=batch_size, shuffle=True, **kwargs)
return loader
def show_image(image, vmin=None, vmax=None, title=None, print_values=False, figsize=(4, 4), cmap=plt.cm.plasma):
#print("image ", image.shape)
image = image.cpu().numpy()
fig, ax1 = plt.subplots(figsize=figsize)
if title:
plt.title(title)
#i = image.reshape((height, width))
#print("i ", i.shape)
ax1.imshow(image, vmin=vmin, vmax=vmax, interpolation='none', cmap=cmap)
plt.show()
if print_values:
print(image)
def show_image_grid(images, vmin=0, vmax=1, nrows=None, ncols=None, fig_width=30):
s = images.shape
assert len(s) == 3
if nrows is None or ncols is None:
resolution = int(s[0] ** 0.5)
nrows = resolution
ncols = resolution
assert images.shape[0] == nrows * ncols, f"{images.shape[0]} != {nrows} * {ncols}"
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(fig_width, fig_width * nrows / ncols),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(left=0.03, right=0.97, hspace=0, wspace=0)
axs = axs.flat
for i in np.arange(s[0]):
axs[i].axis("off")
axs[i].imshow(images[i].detach().cpu().numpy(), vmin=vmin, vmax=vmax, interpolation='none', cmap=plt.cm.plasma, aspect='auto')
plt.tight_layout()
plt.show()
# Creates pdf for each item
# (input_count, feature_count, height, width) => (input_count, feature_count * resolution, height, width)
def to_pdf(mu, var, resolution=10):
mu_shape = mu.shape
input_count, feature_count, height, width = mu_shape
step = 1.0 / (resolution + 1)
assert mu_shape == var.shape
assert resolution > 0
assert resolution < 50
# mu and logvar: move features to the end and flatten
print("to_pdf: mu", mu.shape)
mu_combined = mu.detach().permute(0, 2, 3, 1).contiguous().view(-1).cpu().numpy()
print("to_pdf: mu_combined", mu_combined.shape)
var = torch.clamp(var, step * 0.005, 3.0)
var_combined = var.detach().permute(0, 2, 3, 1).contiguous().view(-1).cpu().numpy()
print("to_pdf: var_combined", var_combined.shape)
# pdf for each item
rr = np.arange(step, 1, step)
pdfs = []
for i in np.arange(mu_combined.shape[0]):
pdf = norm.pdf(rr, mu_combined[i], var_combined[i])
pdf = pdf / pdf.sum()
pdfs.append(pdf)
mu__ = torch.as_tensor(pdfs)
print("to_pdf: mu__", mu__.shape) # (*, resolution)
mu__ = mu__.view(-1, feature_count, resolution)
print("to_pdf: mu__", mu__.shape) # (*, feature_count, resolution)
mu__ = mu__.view(input_count, height, width, feature_count * resolution).contiguous()
print("to_pdf: mu__", mu__.shape) # (input_count, height, width, feature_count * resolution)
#mu__ = mu__.permute(0, 3, 1, 2).contiguous() # (input_count, feature_count * resolution, height, width)
#print("to_pdf: mu__", mu__.shape)
return mu__
# plt.plot(to_pdf(torch.tensor([[[[0.49]]]]), torch.tensor([[[[0.04]]]]), resolution=40).reshape(-1, 1).numpy())
def sample_from_pdf(pdf):
assert pdf.shape == (resolution, )
#print("pdf ", pdf)
sum_pdf = sum(pdf)
#print("sum_pdf ", sum_pdf)
if sum_pdf > 0:
v = random.random()
#print("v ", v)
s = 0
index = 0
while s < v and index < resolution:
s += pdf[index] / sum_pdf
index += 1
#print(" s ", s)
#print(" index ", index)
# apply scale (conflates value and confidence!)
return [(index - 1) * sum_pdf / resolution]
else:
return [0]
def sample_from_images__(images__):
assert len(images__.shape) == 3
# reshape images__ from (image count, height, width*resolution) into (image count*height*width, resolution)
s = images__.shape
flattened_images__ = images__.view(s[0], s[1], int(s[2] / resolution), resolution)
s = flattened_images__.shape
flattened_images__ = flattened_images__.view(s[0] * s[1] * s[2], s[3])
# sample single value from each distributions into (image count*height*width, 1)
sampled_pixels = torch.Tensor([sample_from_pdf(item.cpu().numpy()) for item in flattened_images__])
# reshape back into (image count, height, width)
sampled_images = sampled_pixels.view(s[0], s[1], s[2])
return sampled_images
def averaged_sample_from_images__(images__, count=10):
sampled_images = torch.stack([sample_from_images__(images__) for i in range(count)])
return sampled_images.mean(dim=0)
# + [markdown] id="zfAtGnO7m083" colab_type="text"
# ## Train
# Use x values in the range 0-1
# + id="zZmZ_fcJ2euC" colab_type="code" colab={}
# x -> QD(x) -> x_bar
# Quantized distribution auto encoder
class QDAE(nn.Module):
def __init__(self, input_output_size, hidden_size, latent_feature_count, resolution):
super(QDAE, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.WARN)
self.input_output_size = input_output_size
self.hidden_size = hidden_size
self.latent_feature_count = latent_feature_count
self.resolution = resolution
self.enc1 = nn.Linear(input_output_size, hidden_size*8)
self.enc2 = nn.Linear(hidden_size*8, hidden_size*4)
self.enc3 = nn.Linear(hidden_size*4, hidden_size)
self.enc4 = nn.Linear(hidden_size, latent_feature_count * resolution)
#assert latent_feature_count == 1, "TODO: 1 qd_to_features layer per latent feature"
#self.qd_to_features = nn.Linear(latent_feature_count * resolution, latent_feature_count)
self.dec1 = nn.Linear(latent_feature_count * resolution, hidden_size * 4)
self.dec2 = nn.Linear(hidden_size * 4, input_output_size)
self.monotonic = torch.sigmoid(torch.linspace(0, 1, self.resolution)).unsqueeze(dim=1)
self.dropout = nn.Dropout(0.2)
def encode(self, x):
x = self.dropout(x)
x = self.enc1(x)
x = F.relu(x)
x = self.enc2(x)
x = F.relu(x)
x = self.enc3(x)
x = F.relu(x)
x = self.enc4(x)
x = torch.tanh(x)
return x
def decode(self, z__):
y = self.dec1(z__)
y = F.relu(y)
y = self.dec2(y)
#x_bar = F.sigmoid(y)
x_bar = y
return x_bar
def forward(self, x):
self.logger.debug(f"x {x.shape}")
z = self.encode(x)
z_ = self.reparameterize(z)
z__ = self.dropout(z_)
z__ = z_
x_bar = self.decode(z__)
return z, z_, z__, x_bar
def reparameterize(self, z):
z_ = torch.distributions.relaxed_bernoulli.RelaxedBernoulli(0.1, z * 0.5 + 0.5).rsample() * 2 - 1
#z_ = torch.bernoulli(z * 0.5 + 0.5) * 2 - 1
#z_ = z
return z_
def train(model, device, data, optimizer, epochs, log_interval):
model.train()
#print("X", X)
for epoch in range(epochs):
for i, d in enumerate(data, 0):
if i > 10:
break
X, labels = d
X = X.view(X.shape[0], -1)
optimizer.zero_grad()
z, z_, z__, X_bar = model(X)
#print("output", output)
loss = F.mse_loss(X_bar, X)
# smoothness
#loss += (qd_h[:, 0:-1] - qd_h[:, 1:]).pow(2).mean().pow(0.5) * 0.1
# should sum to 1
#loss += (qd_h.sum(dim=1) - 1).pow(2).mean().pow(0.5) * 0.1
# use all elements of resolution across samples
#use_count = z_.sum(dim=0)
#avg_use = use_count.mean()
#err = (use_count - avg_use).pow(2).sum().pow(0.5) / resolution
#loss += err * 0.1
# sparse use = values near 0.5 are discouraged. encourage more contrast, i.e
# values near 0 and 1.
#loss += (0.5 - (qd_h - 0.5).abs()).mean() * 0.1
# ideally sum close to 0 as equal number of 1s and -1s
#loss += z_.mean(dim=1).abs().sum() * 0.1
#loss += (1 - z.abs().mean()) * 0.01
loss.backward()
optimizer.step()
if epoch % log_interval == 0:
print(f"Epoch: {epoch} \t Loss: {loss.item():.6f}")
show_image(z[0:16].detach() , title="z", print_values=False, vmin=-1, vmax=1, cmap=plt_cm_RdYlGn_)
show_image(z_[0:16].detach() , title="z_", print_values=False, vmin=-1, vmax=1, cmap=plt_cm_RdYlGn_)
show_image(z__[0:16].detach(), title="z__", print_values=False, vmin=-1, vmax=1, cmap=plt_cm_RdYlGn_)
show_image_grid(X[0:16,:].view(-1,28,28), nrows=4, ncols=4, fig_width=6)
show_image_grid(X_bar[0:16,:].view(-1,28,28), nrows=4, ncols=4, fig_width=6)
if loss < 0.0001:
break
# + id="fAY1nNaBZvXA" colab_type="code" outputId="c0965242-7ae6-4cec-ff4e-f9ac5edcccdd" executionInfo={"status": "ok", "timestamp": 1562215444018, "user_tz": 420, "elapsed": 345811, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
resolution = 10
data = load_mnist()
#X = next(iter())[0].squeeze(dim=1).view(1000, -1).float()
model = QDAE(input_output_size=28*28, hidden_size=resolution * 3, latent_feature_count=1, resolution=resolution)
#for param in model.parameters():
# print(type(param.data), param.size())
#nn.init.constant(param.data, val=0.1)
#param.data += 0.1
optimizer = optim.Adam(model.parameters(), lr=3e-4)
train(model=model, device=device, data=data, optimizer=optimizer, epochs=1000, log_interval=100)
# + id="IKxK71SwmTZ9" colab_type="code" outputId="b9edde94-4d32-4420-8230-32343d0bff0e" executionInfo={"status": "error", "timestamp": 1562209350233, "user_tz": 420, "elapsed": 28620, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 198}
show_image_grid(X[0:16].view(-1,28,28), nrows=4, ncols=4, fig_width=6)
z, z_, z__, X_bar = model(X[0:16])
show_image_grid(X_bar.view(-1,28,28), nrows=4, ncols=4, fig_width=6)
# + id="SD2sbc_-Cjwe" colab_type="code" colab={}
# latent vector arithmatic
show_image(qd_h.detach(), print_values=False)
qd = (qd_h[7] + qd_h[8]) / 2
qd = qd.unsqueeze(dim=0)
show_image(qd_h[7].unsqueeze(dim=0).detach(), print_values=True)
show_image(qd_h[8].unsqueeze(dim=0).detach(), print_values=True)
show_image(qd_h[0].unsqueeze(dim=0).detach(), print_values=True)
show_image(qd.detach(), print_values=True)
y_bar = model.decode(qd_h[0])
show_image(y_bar.view(3,3).detach())
# + id="308nPnep8_bP" colab_type="code" colab={}
dr = 5
c = 5
mu = torch.tensor(np.linspace(0, 1, r * c)).unsqueeze(0).unsqueeze(0).unsqueeze(0).float()
var = torch.ones(mu.shape).float() * 0.2
y = to_pdf(mu, var, resolution=resolution).squeeze().float()
print(y.shape)
show_image(y)
y_bar = model.decode(y)
print("y_bar", y_bar.shape)
show_image_grid(y_bar.view(-1,3,3), nrows=r, ncols=c, fig_width=3)
# + id="3_7-g3bpEIuY" colab_type="code" colab={}
| Autoencoding kernel convolution/06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('./'):
for filename in filenames:
print(os.path.join(dirname, filename))
# +
# Single layer perceptron implementation
# -
def calculate_precision(arr):
precision=[]
for i in range(len(arr)):
sumVal=0
for j in range(len(arr)):
sumVal+=arr[i][j]
precision.append(arr[i][i]/sumVal)
return precision
# +
def calculate_accuracy(arr):
leading_diagonal=0
for i in range(0, len(arr)):
leading_diagonal+=arr[i][i]
return leading_diagonal/sum(sum(np.array(arr)))
def calculate_recall(arr):
recall=[]
for i in range(len(arr)):
sumVal=0
for j in range(len(arr)):
sumVal+=arr[j][i]
recall.append(arr[i][i]/sumVal)
return recall
# +
import numpy as np
class Perceptron:
def __init__(self, learning_rate=0.05, n_iters=1000):
self.lr = learning_rate
self.n_iters = n_iters
self.activation_func = self._unit_step_func
self.weights = None
self.bias = None
def fit(self, X, y):
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
self.bias = 0
y_ = np.array([1 if i > 0 else 0 for i in y])
for _ in range(self.n_iters):
print(_)
for idx, x_i in enumerate(X):
linear_output = np.dot(x_i, self.weights) + self.bias
y_predicted = self.activation_func(linear_output)
update = self.lr * (y_[idx] - y_predicted)
self.weights += update * x_i
self.bias += update
print(idx, x_i)
#
def predict(self, X):
linear_output = np.dot(X, self.weights) + self.bias
y_predicted = self.activation_func(linear_output)
return y_predicted
def _unit_step_func(self, x):
return np.where(x>=0, 1, 0)
# -
# Training the model
data = pd.read_csv("./Website Phishing.csv")
data.head(10)
x=data.values.tolist()
training_data=[i[:-1] for i in x]
label_value=[i[-1] for i in x]
# perceptron_model=Perceptron()
# perceptron_model.fit(np.array(training_data), label_value)
# +
# Prediction
import seaborn as sns
import random as r
from sklearn.metrics import confusion_matrix
perceptron_predictions=[]
for i in range(len(training_data)):
res=perceptron_model.predict(training_data[i])
perceptron_predictions.append(res)
c=confusion_matrix(label_value, perceptron_predictions)
sns.heatmap(c, annot=True)
perceptron_accuracy=calculate_accuracy(c)
perceptron_precision=calculate_precision(c)
perceptron_recall=calculate_recall(c)
print("accuracy:", perceptron_accuracy)
print("precision: ", perceptron_precision)
print("recall: ", perceptron_recall)
# -
# multilayer perceptron with backpropagation
from sklearn.neural_network import MLPClassifier
X = [i[:-1] for i in x]
y = [i[-1] for i in x]
clf = MLPClassifier(solver='lbfgs', alpha=1e-6, hidden_layer_sizes=(15,7), random_state=1, max_iter=4000)
clf.fit(X, y)
# +
# multilayer perceptron prediction
X=[i[:-1] for i in x]
multilayer_perceptron_results=[]
for i in range(len(training_data)):
res=clf.predict([X[i]])
multilayer_perceptron_results.append(res[0])
c=confusion_matrix(label_value, multilayer_perceptron_results)
sns.heatmap(c, annot=True)
multilayer_perceptron_accuracy=calculate_accuracy(c)
multilayer_perceptron_precision=calculate_precision(c)
multilayer_perceptron_recall=calculate_recall(c)
print("accuracy:", multilayer_perceptron_accuracy)
print("precision: ", multilayer_perceptron_precision)
print("recall: ", multilayer_perceptron_recall)
# +
# ADALINE
# +
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
clf = make_pipeline(StandardScaler(),SGDClassifier(max_iter=10000, tol=1e-5))
clf.fit(X, y)
# +
adaline_results=[]
X=[i[:-1] for i in x]
for i in range(len(training_data)):
res=clf.predict([X[i]])
adaline_results.append(res[0])
c=confusion_matrix(label_value, adaline_results)
sns.heatmap(c, annot=True)
adaline_accuracy=calculate_accuracy(c)
adaline_precison=calculate_precision(c)
adaline_recall=calculate_recall(c)
print("accuracy:", adaline_accuracy)
print("precision: ", adaline_precison)
print("recall: ", adaline_recall)
# +
# MLP classsifier
# -
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
clf = MLPClassifier(random_state=1, max_iter=1000).fit(training_data, label_value)
# +
mlp_results=[]
X=[i[:-1] for i in x]
for i in range(len(training_data)):
res=clf.predict([X[i]])
mlp_results.append(res[0])
c=confusion_matrix(label_value, mlp_results)
sns.heatmap(c, annot=True)
mlp_accuracy=calculate_accuracy(c)
mlp_precision=calculate_precision(c)
mlp_recall=calculate_recall(c)
print("accuracy:", mlp_accuracy)
print("precision: ", mlp_precision)
print("recall: ", mlp_recall)
# -
# !pip install sklearn-genetic
from __future__ import print_function
import numpy as np
from sklearn import datasets, linear_model
from genetic_selection import GeneticSelectionCV
estimator = linear_model.LogisticRegression(solver="liblinear", multi_class="ovr")
selector = GeneticSelectionCV(estimator,
cv=5,
verbose=1,
scoring="accuracy",
max_features=5,
n_population=50,
crossover_proba=0.5,
mutation_proba=0.2,
n_generations=40,
crossover_independent_proba=0.5,
mutation_independent_proba=0.05,
tournament_size=3,
n_gen_no_change=10,
caching=True,
n_jobs=-1)
selector = selector.fit(training_data, label_value)
print(selector.support_)
# +
genetic_algorithm=[]
X=[i[:-1] for i in x]
for i in range(len(training_data)):
res=selector.predict([X[i]])
genetic_algorithm.append(res[0])
c=confusion_matrix(label_value, genetic_algorithm)
sns.heatmap(c, annot=True)
genetic_algorithm_accuracy=calculate_accuracy(c)
genetic_algorithm_precision=calculate_precision(c)
genetic_algorithm_recall=calculate_recall(c)
print("accuracy:", genetic_algorithm_accuracy)
print("precision: ", genetic_algorithm_accuracy)
print("recall: ", genetic_algorithm_accuracy)
# +
# accuracy histogram
import matplotlib.pyplot as plt
x = [perceptron_accuracy, multilayer_perceptron_accuracy, adaline_accuracy, mlp_accuracy, genetic_algorithm_accuracy]
y=[1,2,3,4,5]
left = [1, 2, 3, 4, 5]
height = x
tick_label = ['Perceptron', 'Multilayer Perceptron', 'Adaline', 'MLP', 'Genetic Algorithm']
plt.bar(left, height, tick_label = tick_label, width = 0.8, color = ['red', 'blue'])
plt.xlabel('models')
plt.ylabel('accuracy')
plt.title('Accuracy comparison')
plt.show()
# -
import pickle
filename = 'MLP_Model.sav'
pickle.dump(clf, open(filename, 'wb'))
filename="Multilayer_Perceptron_Model.sav"
pickle.dump(mlp, open(filename, 'wb'))
| checkpoints/projectDL-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Handle imports
import numpy as np
import pandas as pd
import seaborn as sns
import math
from matplotlib import pyplot as plt
# # Auxiliary functions
# Turns a numpy array into a frequency dictionary
def frequency_dict(x):
counts = np.unique(x,return_counts=True)
counts = map(lambda x,y:(x,y),counts[0],counts[1])
counts = {x[0]:x[1] for x in counts}
return counts
# from a df with probability distribution build a sample observations set
def random_dataframe(dist,samples):
df = pd.DataFrame()
df['Outcome'] = np.random.choice(dist['Values'], p=dist['Probs'],size = samples)
return_mapping = pd.Series(dist['Returns'].values,index=dist['Values']).to_dict()
df['Returns'] = list(map(lambda x: return_mapping[x],df['Outcome']))
return df
def distribution_plots(dist):
# %matplotlib inline
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=False, figsize = (20,6))
ax1.title.set_text("Probabilities i.e. expected frequencies")
sns.barplot(x='Values',y='Probs',data=dist, ax=ax1)
ax2.title.set_text("Returns per outcome")
sns.barplot(x='Values',y='Returns',data=dist, ax=ax2)
ax3.title.set_text("Weighted expected returns per outcome")
sns.barplot(x='Values',y='Expected Returns',data=dist, ax=ax3)
def results_plots(df):
# %matplotlib inline
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize = (20,6))
ax1.title.set_text('Table of frequences per outcome')
sns.countplot(df['Outcome'], ax=ax1)
ax2.title.set_text('Table of frequences per return')
sns.countplot(df['Returns'], ax=ax2)
def gen_dist(values,probs,returns):
dist = pd.DataFrame()
dist['Values'] = values
dist['Probs'] = probs
dist['Returns'] = returns
dist['Expected Returns'] = list(map(lambda x,y:x*y,dist['Probs'],dist['Returns']))
expected_returns = np.dot(dist['Probs'],dist['Returns'])
print("Expected_Return: {}".format(round(expected_returns,4)))
print("Total_expected_returns: {}".format(round(expected_returns*sample_size,4)))
return dist, expected_returns
# # Setting up our distribution
dist = pd.DataFrame()
sample_size = 10**6
price = 3.5
dist['Values'] = [1,2,3,4,5,6]
dist['Probs'] = [1./6,1./6,1./6,1./6,1./6,1./6]
dist['Returns'] = [5,4,9,8,20,100]
#dist['Returns'] = list(map(lambda x:x-price,dist['Values']))
# # Dist info
dist, expected_returns = gen_dist(dist['Values'],dist['Probs'],dist['Returns'])
distribution_plots(dist)
dist
# # Results
df = random_dataframe(dist,sample_size)
results_plots(df)
# # Comparison of expectations
total_expected_returns = np.sum(df['Returns'])
pd.DataFrame({'Expected':[expected_returns,expected_returns*len(df)],'Observed':[total_expected_returns/len(df),total_expected_returns]}, index = ['Average Returns','Total Returns'])
| Expectations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import all libraries which will be used.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
import os
import numpy as np
from sklearn.model_selection import train_test_split
# +
# Load image dataset.
def load_images(folder):
images = []
for filename in os.listdir(folder):
img = mpimg.imread(os.path.join(folder, filename))
if img is not None:
images.append(img)
return images
# -
image_data = load_images('data')
# +
# Preprocessing of the particle dataset.
"""Image should 64 x 64 dimensions"""
img_size_cropped = 64
def pre_process_image(image_data, training):
"""This function takes a single image as input"""
if training:
"""Randomly crop the input image"""
image_data = tf.random_crop(image_data, size[img_size_cropped, img_size_cropped])
"""Rotation of the particle images 90 degrees, 180 degrees and 270 degrees"""
tf.contrib.image.rotate(image_data, 90)
tf.contrib.image.rotate(image_data, 180)
tf.contrib.image.roatate(image_data, 270)
"""Limit the image pixels between [0,1] in case of overflow"""
image_data = tf.minimum(image_data, 1.0)
image_data = tf.maximum(image_data, 0.0)
else:
"""
Crop the input image around the centre so it is the same
size as images that are randomly cropped during training.
"""
image_data = tf.image.resize_image_with_crop_or_pad(image_data, target_height=img_size_cropped, target_width=img_size_cropped)
return images
# +
# Splitting our dataset.
train_batch_size = 64
def random_batch():
"""Number of images in the dataset"""
num_images = len(image_data)
idx = np.random.choice(num_images, size=train_batch_size, replace=False)
"""Use index for splitting the dataset"""
x_batch = image_data[:500]
y_batch = image_data[501:]
return x_batch, y_batch
# -
image_data, training = random_batch()
pre_process_image(image_data, training)
# Building the feature extraction level: CNN model using Tensorflow framework.
"""
We have 3 convolutional layers and 3 subsampling layers
followed by fully-connected layer and a sigmoid activation layer.
"""
# Input layer.
input_layer = tf.reshape(image_data, [-1, 28, 28, 1])
| Single Particle Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Διακριτή και Συνεχής Μέθοδος των ελαχίστων τετραγώνων / Discrete and Continuous Method of Least Squares και χρήση Εκθετικής και Κλασματικής Προσαρμογής δεδομένων (Regression) στην Διακριτή Μέθοδο ΕΛΤ </center>
# ## <center> Σύντομη θεωρία της μεθόδου και εφαρμογή της μέσω της $MATLAB$ και του NumPy module της Python </center>
# Η προσέγγιση συναρτήσεων με ένα πολυώνυμο ειναί στενά συνδεδεμένη με την *πολυωνιμική παρεμβολή*; για παράδειγμα είχαμε δει την πολυωνυμική παρεμβολή κατα Lagrange (που δίνει το πολυώνυμο παρεμβολής σε μορφή Lagrange η σε μορφή Newton) από την οποία προκύπτει οτι το πολυώνυμο (**σε μορφή Lagrange**):
# $$
# p_n(x) = \sum^{n}_{i=0} \,l_i(x)y_i \nonumber
# $$
#
# ειναι το **μοναδικό** πολυώνυμο στο $\mathbb{R}_n[x]$ το οποίο επαληθεύει το σύνολο δεδομένων: $\{(x_0,y_0),(x_1,y_1),\dots,(x_{n},y_{n})\}$ δηλαδή για $i=0, \, 1, \dots, \, n$ έχουμε ότι:
#
# $$
# p_n(x_i)=y_i \tag{1} \label{eq:1}
# $$
#
# Αυτό γιατί οι συντελεστές $l_i(x) \, , \forall i$ ονομάζονται οι **συντελεστές παρεμβολής** και ορίζονται απο την σχέση:
#
# $$
# l_i(x)=\prod_{\substack{j=0 \\ j \neq i}}^n \frac{x-x_j}{x_i-x_j} \, \, , i = 0, 1, \dots, n \tag{2} \label{eq:2}
# $$
#
# Είναι φανερό τότε από την \eqref{eq:2} ότι όταν $x=x_i$ έχουμε ότι $l_i(x_i)=1$ λόγω της απαλοιφής του αριθμητή και του παρανόμαστή και έτσι προκύπτει η \eqref{eq:1}
#
# Σε αυτό όμως το άρθρο θα επικεντρωθούμε στην _Μέθοδο των ελαχίστων τετραγώνων (Method of least squares - LSQ Method)_ όπου αποτελεί μέθοδος *προσεγγισης / παλινδρόμησης (regression)* μιας συνάρτησης; δηλαδή που προσεγγίζουμε την συνάρτηση με ένα πολυώνυμο κάποιου βαθμού, από το οποίο όμως *δεν* ζητάμε να επαληθεύει το σύνολο των δεδομένων ακριβώς. Αυτό μας επιτρέπει να αποφύγουμε κάποιες δυσκολίες που παρουσιάζονταν στην μέθοδο της πολυωνυμικής παρεμβολής, όπως τον όγκο των πράξεων που πρέπει να γίνουν για εύρεση αυτού του πολυωνύμου (αν και στην μορφή Newton αυτές απλοποιούνται δραματικά) ή τα προβλήματα που παρουσιάζονται όταν υπαρχεί noise στα δεδομένα μας, όπως λάθη και αποκλίσεις κατά την δειγματοληψία ενός τυχαίου δείγματος για παράδειγμα. Και εδώ είναι που προκύπτει η χρήση της μεθόδου ελαχίστων τετραγώνων όπου είναι η πιο συχνή μέθοδος για δημιουργία της "best-fit" προσέγγισης επι του συνόλου δεδομένων.
#
# Στην ουσία προσπαθούμε να ελαχιστοποιήσουμε το άθροισμα των τετραγώνων των διαφορών μεταξύ των τεταγμένων των σημείων που θέλουμε να προσεγγίσουμε και της τιμής της προσεγγιστικής αυτής πολυωνυμικής συνάρτησης στην αντίστοιχη $x$-συντεταγμένη των σημείων. Αυτό λόγω του ότι η Ευκλείδια απόσταση μεταξύ δύο σημείων $(x_1,y_1) \, , (x_2,y_2)$ στο $\mathbb{R}^2$ (επικεντρωνόμαστε στην προσέγγιση συναρτήσεων *μίας πραγματικής μεταβλητής* όπου έχουν γράφημα στο $\mathbb{R}^2$) δίνεται από τον τύπο:
#
# $$
# d=\sqrt{(x_1-x_2)^2+(y_1-y_2)^2} \tag{3} \label{eq:3}
# $$
#
# Τώρα εάν έχουμε το σύνολο δεδομέων $\{(x_0,y_0),(x_1,y_1),\dots,(x_{n},y_{n})\}$ και έστω $g(x) = a_mx^m+a_{m-1}x^{m-1}+\dots+a_1x+a_0$ το προσεγγιστικό αυτό πολυώνυμο βαθμού m (όπου προφανώς $m\leq n-1$ καθώς εάν $m=n$ τότε πέρνουμε το πολυώνυμο παρεμβολής) τότε η μέθοδος των ελαχίστων τετραγώνων ουσιαστικά επιδιώκει την ελαχιστοποιήση όπως είπαμε του αθροίσματος, γνωστό και ως **συνάρτηση σφάλματος (error function)**:
#
# $$
# A=\sum_{i=0}^{n} (g(x_i)-y_i)^2 \tag{4} \label{eq:4}
# $$
#
# H \eqref{eq:4} προκύπτει απο το γεγονός ότι θέλουμε να ελαχιστοποιήσουμε την απόσταση μεταξύ των σημείων $(x_i,g(x_i))$ και $(x_i,y_i)$; έστω αυτή η $d_i$, για $\forall \, i=0 \,, 1 \,, \dots, \, n$. Aπό την \eqref{eq:3} η απόσταση αυτή θα δίνεται από τον τύπο:
#
# $$
# d_i=|g(x_i)-y_i| \tag{5} \label{eq:5}
# $$
#
# Αξιοσημείωτη παρατήρηση σε αυτό το σημείο ειναι το γεγονός ότι στην \eqref{eq:4} προσπαθούμε να ελαχιστοποιήσουμε το τετράγωνο των διαφορών όμως η \eqref{eq:5} μας παραπέμπει στην ελαχιστοποιήση του αθροίσματος των *απολύτων τιμών* των $y$-συντεταγμένων. Αυτό γιατί υπάρχουν σημαντικά προτερήματα με το να χρησιμοποιήσουμε το τετράγωνο των διαφορών σε κάθε σημείο αντι την απόλυτη τιμή της διαφοράς (ή οποιοδήποτε άλλο μέγεθος που υπολογίζει το σφάλμα(error) μεταξύ της προσέγγισης και του συνόλου των δεδομένων). Αυτά είναι:
#
# 1. Θετικές διαφορές *δέν* αναιρούν τις αρνητικές και το αντίθετο (κάτι που δεν βλέπουμε στην απόλυτη τιμή των διαφορών αλλα σε κάποιο άλλο μέγεθος που υπολογίζει το σφάλμα μεταξύ της προσέγγισης και του συνόλου των δεδομένων)
# 2. Η παραγώγιση είναι εύκολη (θα δούμε ότι θα χρειαστεί να υπολογίσουμε τις μερικές παραγώγους της \eqref{eq:4} ως προς του συντελεστές του πολυωνύμου $g(x)$); και τέλος
# 3. Μικρές διαφορές γινονταί μικρότερες και μεγάλες όλο και μεγαλύτερες λόγω του τετραγωνισμού
#
# Πίσω στην \eqref{eq:4} τώρα και στην εύρεση μιας συστηματικής μεθόδου που θα μας επιτρέπει τον προσδιορισμό των συντελεστών του πολυωνύμου $g(x)$. Παρατηρούμε ότι αυτή ειναι στην πραγματικότητα συνάρτηση $m+1$ αγνώστων, καθώς η $g(x)$ είναι η άγνωστη - ζητούσα συνάρτηση, όπου οι μεταβλητές είναι οι συντελεστές του $g(x)$. Δηλαδή: $A \, = A(a_0, \, a_1,\dots, \, a_m)$.
#
# Για την ελαχιστοποίηση της $A(a_0, \, a_1,\dots, \, a_m)$ θα χρησιμοποιήσουμε ένα σημαντικό αποτέλεσμα του Διαφορικού Λογισμού;
#
# Τα _"κρίσιμα σημεία"_ μιας συνάρτησης πολλαπλών μεταβλητών, είναι τα σημεία στα οποία μηδενίζεται η κλίση / gradient της συνάρτησης, σε αυτή την περίπτωση ειναι το σύνολο των σημείων $(\alpha_0, \, \alpha_1,\dots, \, \alpha_m)$ όπου $\nabla A(\alpha_0, \, \alpha_1,\dots, \, \alpha_m) = \vec{0}$. Τότε χρησιμοποιώντας το **"Κριτήριο του Sylvester"** μπορούμε να προσδιορίσουμε την φύση του κρίσημου αυτού σημείου στο $\mathbb{R}^{m+1}$ (χωρίς κάποιο περιορισμό, ψάχνουμε για του συντελεστές του $g(x)$ σε *όλο* το $\mathbb{R}^{m+1}$), δηλαδή εαν είναι *τοπικό μέγιστο* ή *τοπικό ελάχιστο*. Εμείς προσπαθούμε για την εύρεση των τοπικών **ελαχίστων**.
# <hr style="border:2px solid gray"> </hr>
# <span style="color:blue"> **Kριτήριο του Sylvester για Θετικά Ορισμένους Πίνακες** </span>
# ---
#
# Έστω ο πίνακας $H := (h_{ij})_{i,j=1, \dots, \, n} \in \mathbb{R}^{n \times n}$ *συμμετρικός*. Για $\forall i=1, \, 2, \dots, \, n$ συμβολίζουμε με $\Delta_i$ την **ορίζουσα** του $i \times i$ τετραγωνικού υποπίνακα $M_i$ όπου: </span>
#
# $$
# M_i := (h_{kl})_{k,l=1, \dots, \, i} , \forall i=1, \, 2, \dots, \, n \nonumber
# $$
#
# Δηλαδή $\Delta_i = \det (M_i)$ όπου ο πίνακας $M_i$ δημιουργείται πέρνωντας των $i \times i$ τετραγωνικό υποπίνακα με πρώτο στοιχειο πάντα το $h_{11}$ από την *άνω-αριστερά* γωνία του πίνακα $H$. O πίνακας $M_i$ ονομάζεται ο *__κύριος ελάσσων (principal minor)__ πίνακας τάξης $i$* υποπίνακας του H.
#
# Τότε με βάση το **Κριτήριο του Sylvester** έχουμε ότι:
#
# $$
# \text{O Η είναι θετικά ορισμένος} \Leftrightarrow \Delta_i>0 \, \, , \forall \, i=1 \,, 2 \,, \dots, \, n \nonumber $$
# <hr style="border:2px solid gray"> </hr>
# Tώρα θα χρειαστεί να ορίσουμε την **Εσσιανή** μιας συνάρτησης καθώς και το **πολυώνυμο Taylor τάξης 2** μέσω του Θεωρήματος του Taylor για συναρτήσεις πολλαπλών μεταβλητών ώστε να καταλήξουμε με μια μεθοδολογία για προσδιορισμό των Τοπικών Ελαχίστων μια συνάρτησης πολλαπλών μεταβλητών και έτσι να κατανοήσουμε πραγματικά την Μέθοδο Ελαχίστων Τετραγώνων.
# <hr style="border:2px solid gray"> </hr>
# H <span style="color:blue">**Εσσιανή**</span> μιας βαθμωτής συνάρτησης; $f(x_1, \, x_2, \dots, \, x_n): \mathbb{R}^n \longrightarrow \mathbb{R}$ συνάρτησης ορίζεται να είναι ο πίνακας μερικών παραγώγων </span>
#
# \begin{equation*}
# [Hf](\vec{x}) = \begin{pmatrix}
# \frac{\partial^2f}{\partial x_1 \partial x_1}(\vec{x}) & \frac{\partial^2f}{\partial x_1 \partial x_2}(\vec{x}) & \cdots & \frac{\partial^2f}{\partial x_1 \partial x_n}(\vec{x}) \\
# \vdots & \vdots & \ddots & \vdots \\
# \frac{\partial^2f}{\partial x_n \partial x_1}(\vec{x}) & \frac{\partial^2f}{\partial x_n \partial x_2}(\vec{x}) & \cdots & \frac{\partial^2f}{\partial x_n \partial x_n}(\vec{x}) \\
# \end{pmatrix}
# \end{equation*}
#
# * Eάν τώρα η βαθμωτή συνάρτηση $f$ είναι αρκετά ομαλή στο (ανοικτό) πεδίο ορισμού της, στην ακρίβεια; $f \in \mathcal{C}^2(\mathcal{D}_f)$ τότε από το **Θεώρημα Ισότητας Μεικτών Παραγώγων**; $\frac{\partial^2f}{\partial x_i \partial x_j} = \frac{\partial^2f}{\partial x_j \partial x_i} \, , \, \forall i, \, j \in \{1, \, 2, \dots, \, n\}$ έχουμε ότι η Εσσιανή της $f$ είναι **συμμετρικός** πίνακας. Δηλαδή:
#
# $$
# [Hf](\vec{x}) = [Hf](\vec{x})^T \, , \, \forall \vec{x}\in\mathcal{D}_f \tag{Def 1.1} \label{eq:6}
# $$
# <hr style="border:2px solid gray"> </hr>
# <span style="color:blue"> **Τύπος πολυωνύμου Taylor τάξης 2, από το Θεώρημα Taylor** </span>
# ___
#
# Εάν η $f: A\subseteq\mathbb{R}^n \longrightarrow \mathbb{R}$ είναι
# $\mathcal{C}^3(A)$ τότε $\forall \vec{x_ο}\in A$ εάν $\vec{x} = \vec{x_o} + \vec{h} , \ \vec{h} = (h_1, \cdots, \ h_n)$:
#
#
# \begin{align*}
# f(\vec{x_o}+\vec{h})=f(\vec{x}) &= f(\vec{x_o}) + \sum_{i=1}^n \frac{\partial f}{\partial x_i}(\vec{x_o})h_i + \frac{1}{2} \sum_{i, j=1}^n \frac{\partial^2f}{\partial x_i \partial x_j}h_ih_j+\mathcal{R}_2(\vec{x}, \ \vec{x_o}) \ , \forall \vec{x}\in A \\
# &\equiv f(\vec{x_o}) + \nabla f(\vec{x_o}) \cdot \vec{h} + \langle \vec{h} \cdot [Hf](\vec{x}), \ \vec{h} \rangle +\mathcal{R}_2(\vec{x}, \ \vec{x_o}) \ , \forall \vec{x}\in A \tag{Def 2.1} \label{eq:7} \\
# \end{align*}
# όπου:
#
# $$
# \lim_{\vec{h}\to\vec{0}}\dfrac{|\mathcal{R}(\vec{x_o}+\vec{h},\vec{x_o})|}{\begin{Vmatrix} \vec{h} \end{Vmatrix}^2}=0 \nonumber
# $$
#
# Ονομάζουμε το $\mathcal{T}(\vec{x_o}+\vec{h},\vec{h}) = f(\vec{x_o}) + \sum_{i=1}^n \frac{\partial f}{\partial x_i}(\vec{x_o})h_i + \frac{1}{2} \sum_{i, j=1}^n \frac{\partial^2f}{\partial x_i \partial x_j}h_ih_j$ **το πολυώνυμο Taylor τάξης 2 με κέντρο το $\vec{x_o} \in A$** και αποτελεί την *καλύτερη πολυωνυμικη προσέγγιση της $f$ στο $\vec{x_o} \in A$*
# Και εδώ είναι που καταλήγουμε στο **Κριτήριο της Εσσιανής** που είναι το πολυ-μεταβλητό ανάλογο του Κριτηρίου της 2ης παραγώγου που έχουμε στον Απειροστικό Λογισμό μίας μεταβλητής. Πρώτα μια διευκρύνηση πάνω στους *Θετικά ορισμένους πίνακες* / **Positive-Definite matrices**, που είδαμε στο Κριτήριο του Sylvester.
# <hr style="border:2px solid gray"> </hr>
# <span style="color:blue"> **Θετικά Ορισμένοι Πίνακες / Positive-Definite matrices** </span>
# ___
#
# Ένας συμμετρικός, πραγματικός πίνακας $H \in \mathbb{R}^{n \times n}$ ονομάζεται **θετικά ορισμένος (positive-definite)** αν.ν.:
#
# $$
# \vec{x}^TH\vec{x} > 0, \ \forall \vec{x} \in \mathbb{R}^n \smallsetminus \{\vec{0}\} \tag{Def 3.1} \label{eq:8}
# $$
# <hr style="border:2px solid gray"> </hr>
# Θα χρησιμοποιήσουμε τώρα τον τύπο του Taylor τάξης 2 \eqref{eq:7}. Χ.Β.Γ., έστω ότι $\vec{x_o}$ είναι κρίσημο σημείο της βαθμωτής συνάρτησης $f$ και ότι αυτή είναι $\mathcal{C}^3$ στο $\mathcal{D}_f$. Άρα έχουμε ότι $\nabla f(\vec{x_o})=\vec{0}$. Οπότε, $\forall \vec{x} \in \mathcal{D}_f$ και $||\vec{h}||$ **μικρό**:
#
#
# \begin{align}
# f(\vec{x_o}+\vec{h})-f(\vec{x_o}) &= f(\vec{x})-f(\vec{x_o}) \nonumber \\ \nonumber
# &= \mathcal{T}(\vec{x_o}+\vec{h}, \ \vec{x_o})+\mathcal{R}(\vec{x_o}+\vec{h}, \ \vec{x_o})-f(\vec{x_o}) && \because \eqref{eq:7} \\ \nonumber
# &= \frac{1}{2}\langle \vec{h} \cdot [Hf](\vec{x}), \ \vec{h} \rangle + \mathcal{R}(\vec{x_o}+\vec{h}, \ \vec{x_o}) && \because \nabla f(\vec{x_o})=\vec{0}
# \end{align}
#
#
# Άρα από το Θεώρημα του Taylor, \eqref{eq:7}, καθώς $\lim_{\vec{h}\to\vec{0}}\dfrac{|\mathcal{R}(\vec{x_o}+\vec{h},\vec{x_o})|}{\begin{Vmatrix} \vec{h} \end{Vmatrix}^2}=0$ και άρα $\vec{h} \neq \vec{0}$, εξ' ορισμού ορίου, τότε για $\vec{h}$ σε μια $\epsilon$-γειτονία του $\vec{0}$, δηλαδή $\vec{h} \to \vec{0} \Leftrightarrow ||\vec{h}|| \to 0$ τότε έχουμε ότι το μέγεθος $\langle \vec{h} \cdot [Hf](\vec{x}), \ \vec{h} \rangle$ **υπερισχυει κατα πολύ** του (*θετικού*) μεγέθους $\mathcal{R}(\vec{x_o}+\vec{h}, \ \vec{x_o})$ και άρα έχουμε ότι, εάν ο πίνακας $[Hf](\vec{x})$ είναι **θετικά οριμένος** τότε εξ' ορισμού:
#
#
# \begin{align}
# \vec{h} \cdot [Hf](\vec{x}) \cdot \vec{h}^T &\equiv \langle \vec{h} \cdot [Hf](\vec{x}), \ \vec{h} \rangle > 0, \ \vec{h} \neq \vec{0} &&\because \text{Η εσσιανή είναι θετικά ορισμένη} \nonumber \\ &\Rightarrow f(\vec{x_o}+\vec{h})-f(\vec{x_o})>0 \nonumber \\
# &\Rightarrow f(\vec{x_o}+\vec{h})>f(\vec{x_o}) \nonumber \\
# &\Rightarrow \vec{x_o} \ \text{είναι ΤΟΠΙΚΟ ΕΛΑΧΙΣΤΟ / T.Ε. / Absolute Minimum} \nonumber \\
# \end{align}
# Όποτε έχοντας το πιο πάνω Κριτήριο της Εσσιανής για τοπικά ακρότατα, τότε μπορούμε να ορίσουμε την πιο κάτω μεθολογία για εύρεση των τοπικών **ελαχιστων** (αναλόγως αναπτύσεται μέθοδος και για τα τοπικά μέγιστα) και κατ' επέκταση, μέθοδο εύρεσης της προσεγγιστικής συνάρτησης της Μεθοδου των Ελαχίστων Τετραγώνων.
# <hr style="border:2px solid gray"> </hr>
# 1. Ορίζουμε την συνάρτηση: $A(a_0, \, a_1,\dots, \, a_m)=\sum_{i=1}^{n+1} (g(x_i)-y_i)^2$ ($\because \eqref{eq:4}$) όπου $a_i$ είναι οι συντελεστές του ζητούμενου πολυωνύμου; $g(x)$ βαθμού $m\leq n-1$ που θέλουμε να **προσεγγίζει** το σύνολο σημείων: $\{(x_0,y_0),(x_1,y_1),\dots,(x_{n},y_{n})\}$
#
#
# 2. Βρίσκουμε τις μερικές παραγώγους $\frac{\partial A}{\partial a_i}, \ \forall i=0, \ 1, \dots, \ m$ και επιλύουμε το σύστημα ($m+1$)-εξισώσεων:
#
#
# $$
# \left\{
# \begin{array} \\
# \frac{\partial A}{\partial a_0}=0 \\
# \frac{\partial A}{\partial a_1}=0 \\
# \vdots \\
# \frac{\partial A}{\partial a_m}=0
# \end{array}
# \right. \nonumber \tag{Σ} \label{eq:9} %We dont close the braces by having the period syntax instead of the "\}"
# $$
#
#
# 3. Bρίσκουμε την **εσσιανή** της συνάρτησης $A(a_0, \, a_1,\dots, \, a_m)$, $[HA](\vec{x})$ και την υπολογίζουμε σε κάθε στοιχείου του συνόλου $\mathcal{S}$. Δηλαδή υπολογίζουμε τους πίνακες: $[HA](\vec{s_1}), \ \dots \ , \ [HA](\vec{s_l})$. <span style="color:red"> <em>Προφανώς καθώς το \eqref{eq:9} είναι ΤΕΤΡΑΓΩΝΙΚΟ <b>ΓΡΑΜΜΙΚΟ</b> σύστημα (ως προς τους συντελεστές του πολυωνύμου $g(x)$) τότε η Εσσιανή της συνάρτησης $A$ ΕΙΝΑΙ <b>ΠΑΝΤΑ Ο ΜΗΔΕΝΙΚΌΣ</b> ΠΊΝΑΚΑΣ. </em> </span> **Οπότε καθώς υπάρχει τουλάχιστον ένας κύριος ελάσσων πίνακας με μηδενική ορίζουσα τότε, το κριτήριο της Εσσιανής / του Sylvester δεν μπορει να χρησιμοποιηθεί!**
# ---
#
# (<span style="color:green"> **EAN Η ΕΣΣΙΑΝΗ ΔΕΝ ΗΤΑΝ Ο ΜΗΔΕΝΙΚΟΣ ΠΙΝΑΚΑΣ:**</span>
#
# <span style="color:green">* Mε το Κριτήριο του Sylvester βρίσκουμε τους πίνακες αυτούς οι οποιοί είναι θετικά ορισμένοι. Έστω, Χ.Β.Γ., ότι αυτόι είναι οι $[HA](\vec{\sigma_1}), \ \dots \ , \ [HA](\vec{\sigma_r})$ όπου $\{ \vec{\sigma_1}, \dots, \ \vec{\sigma_r} \} \subseteq \mathcal{S} = \{s_1, \dots, \ s_l\}$. Tότε με βάση το Κριτήριο της Εσσιανης το σύνολο $\{ \vec{\sigma_1}, \dots, \ \vec{\sigma_r} \}$ περιέχει τα σημεία στα οπόια η $A(a_0, \, a_1,\dots, \, a_m)$ παρουσιάζει τοπικό ελάχιστο.</span>
#
#
# <span style="color:green">* Υπολογίζουμε τις παραστάσεις $A(\vec{\sigma_1}), \dots, \ A(\vec{\sigma_l})$. Εάν $\vec{\mu} = \min \{ A(\vec{\sigma_1}), \dots, \ A(\vec{\sigma_l}) \}$ τότε η τιμή $Α(\vec{\mu})$ είναι το **ΑΠΟΛΥΤΟ** ελάχιστο της συνάρτησης $A$.) </span>
#
# ---
# **Όμως η ελάχιστη τιμή του αθροίσματος των τετραγώνων προκύπτει θέτοντας την κλίση της συνάρτησης του σφάλματος ίση με το $0$. Αυτό γίνεται εύκολα φανερό (αν και με καποιές πραξείς) στην περίπτωση της γραμμικής προσαρμογής Ελαχίστων Τετραγώνων, δηλαδή όταν, $m=1$. Τότε προκύπτει ότι:**
#
# $$
# A(a_0,a_1)=(n+1)\overline{y^2}-2a_1(n+1)\overline{xy}-2a_0(n+1)\overline{y}+a_0^2(n+1)\overline{x^2}+2a_0a_1(n+1)\overline{x}+(n+1)a_0 \nonumber \\
# \text{όπου, } \overline{y^2}=\frac{y_0^2+\cdots+y_n^2}{n+1} \ , \ \overline{x^2}=\frac{x_0^2+\cdots+x_n^2}{n+1} \ , \ \overline{xy}=\frac{x_0y_0+\cdots+x_ny_n}{n+1} \ , \ \overline{y}=\frac{y_0+\cdots+y_n}{n+1} \ , \ \overline{x}=\frac{x_0+\cdots+x_n}{n+1} \nonumber
# $$
#
# Όμως το πιο πάνω είναι γράφημα ενός **παραβολοείδους** στο σύστημα αξόνων $\mathbf{O}_{a_0a_1A}$ με θετικούς συντελεστές στους όρους $a_0^2, a_1^2$, οπότε, στρέφει τα κοίλα του προς τα πάνω, δηλαδή είναι κυρτό γράφημα και άρα στο κρίσημο σημείο της η $A$, λόγω αυτής της γεωμετρικής απόδειξης, θα παρουσιάζει, απόλυτο **ελάχιστο**. Η πιο πάνω μεθοδολογια εφαρμόζεται και γενικά για $m \in \mathbb{N}$ και έτσι η ελάχιστη τιμή του αθροίσματος των τετραγώνων προκύπτει θέτοντας την κλίση της συνάρτησης του σφάλματος ίση με το $0$ για κάθε τιμή του $m$.
#
# 3. Επιλύουμε το ανωτέρω σύστημα, \eqref{eq:9}, είτε με απαλοιφή Gauss είτε με την μέθοδο του Cramer και βρίσκουμε, εάν ο πίνακας συντελεστών του συστήματως **δεν** ειναι ιδιάζων, την μοναδική του λύση $\vec{\mu}=(\mu_0 \dots, \ \mu_m) \in \mathbb{R}^{m+1}$
#
# Έστω το σύνολο λύσεων του συστήματος \eqref{eq:9}; $\mathcal{S}=\{\vec{\alpha} \in \mathbb{R}^{m+1}: \vec{\alpha} \text{ είναι λύση του συστήματος } \eqref{eq:9} \} = \{s_1, \dots, \ s_l\}$. Εάν $|\mathcal{S}|=1$ με μοναδικό στοιχείο το $\vec{\mu}$:
#
# 4. Εάν, $\vec{\mu}=(\mu_0 \dots, \ \mu_m) \in \mathbb{R}^{m+1}$ τότε το πολυώνυμο:
# $$
# g(x) = \mu_mx^m+\mu_{m-1}x^{m-1}+\cdots+\mu_1x+\mu_0 \ , \ \text{είναι το ζητούμενο προσεγγιστικό πολυώνυμο} \nonumber
# $$
#
# <hr style="border:2px solid gray"> </hr>
# Αφότου είδαμε την γενική περίπτωση της μεθόδου των ελαχίστων τετραγώνων, εας δούμε τώρα κάποια πρακτικά παραδείγματα, με βάση την πιο πάνω γενική μεθοδολογία, μαζί με προσομοιώσεις και γεωμετρικές απεικονίσεις με την βοήθεια της $MATLAB$ και της Python. Για την ακρίβεια, παραδείγματα τα οποία επικαλόνται μιας λογικής σειράς επίλυσης και κατανόησης θα τα επιλύουμε με την χρήση της Python, καθώς αυτό το paper δημιουργήθηκε σε $Jupyter \ Notebook$ το οποίο προσφέρει interactive Python console και την δυνατότητα επεξεργασίας κώδικα καθώς και την παρουσίαση γραφημάτων της Matplotlib, inline.
# <hr style="border:3px solid black"> </hr>
# ## Παράδειγμα 1: Έστω τα δεδομένα $(1, 2.5), \ (3, 3.5), \ (5, 6.35) \text{ και } (7, 8.1), \ (n=3)$. Tα απεικονίζουμε με τον πιο κάτω κώδικα:
# +
# %matplotlib inline
# %config InlineBachend.figure_format='svg' #saving the figure in vector graphics format for better quality
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 3, 5, 7], int)
y = np.array([2.5, 3.5, 6.35, 8.1], float)
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure(figsize=(10,5))
_ = plt.scatter(x,y , marker='*', c='red', s=80, label='Our Data')
_ = plt.xlabel('x', fontsize=14)
_ = plt.ylabel('y', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([-0.5,10])
axes.set_ylim([-0.5,10])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='best', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.axvline(0, color='black', lw=2)
_ = plt.show
# -
# Παρατηρούμε, για καλή μας τύχη, ότι τα δεδομένα μας μπορούν να προσεγγιστούν με ικανοποιητική ακρίβεια από μια εξίσωση ευθείας, δηλαδή απο ένα πολυώνυμο βαθμού 1. Έστω αυτό είναι το $g(x)=ax+b$. Όπως είπαμε και στην θεωρία μας, οι συντελέστες αυτού του πολυωνύμου, $a, \ b$ είναι οι αριθμοί οι οποιοί ελαχιστοποιούν την ακόλουθη παράσταση:
#
# \begin{align}
# A(a,b) &= (g(x_1)-y_1)^2+(g(x_2)-y_2)^2+(g(x_3)-y_3)^2+(g(x_4)-y_4)^2 \nonumber \\
# &= (ax_1+b-y_1)^2+(ax_2+b-y_2)^2+(ax_3+b-y_3)^2+(ax_4+b-y_4)^2 \nonumber \\
# \end{align}
#
# Βρίσκουμε τωρα τα κρίσημα σημεία της συνάρτησης αυτής, υπολογίζοντας αρχικά τις μερικές παραγώγους της συνάρτησης $A$ ως προς $a$ και ως προς $b$ και έπειτα λύνωντας το σύστημα: $\frac{\partial A}{\partial a}=0 \text{ και } \frac{\partial A}{\partial b}=0$. Έχουμε:
#
#
# \begin{align}
# \frac{\partial A}{\partial a} &= 2[x_1(ax_1+b-y_1)+x_2(ax_2+b-y_2)+x_3(ax_3+b-y_3)+x_4(ax_4+b-y_4)] \nonumber \\
# &= 2[a(x_1^2+x_2^2+x_3^2+x_4^2)+b(x_1+x_2+x_3+x_4)-(x_1y_1+x_2y_2+x_3y_3+x_4y_4)] \ = \ 0 \nonumber \\ \tag{Ex. 1.1} \label{eq:10}
# \end{align}
#
# \begin{align}
# \frac{\partial A}{\partial b} &= 2[(ax_1+b-y_1)+(ax_2+b-y_2)+(ax_3+b-y_3)+(ax_4+b-y_4)] \nonumber \\
# &= 2[a(x_1+x_2+x_3+x_4)+4b-(y_1+y_2+y_3+y_4)] \ = \ 0 \nonumber \\
# \tag{Ex. 1.2} \label{eq:11}
# \end{align}
#
# Tώρα ορίζοντας τις πιο κάτω ποσότητες:
#
# $$
# \mathcal{S}_{xx}=\sum_{i=1}^4x_i^2, \ \mathcal{S}_{x}=\sum_{i=1}^4x_i, \ \mathcal{S}_{xy}=\sum_{i=1}^4x_iy_i, \ \mathcal{S}_{y}=\sum_{i=1}^4y_i \nonumber
# $$
#
# το σύστημα εξισώσεων \eqref{eq:10} και \eqref{eq:11}, γνωστες και ως οι **κανονικές εξισώσεις των δεδομένων (normal equations)** του συνόλου δεδομένων, μετατρέπεται στο **ισοδύναμο** ομοιογενές γραμμικό σύστημα:
#
# $$
# \left\{
# \begin{array} \\
# a\mathcal{S}_{xx}+b\mathcal{S}_{x}=\mathcal{S}_{xy} \\
# a\mathcal{S}_{x}+4b=\mathcal{S}_{y} \\
# \end{array}
# \right. \nonumber \\
# \Updownarrow \\
# \mathbf{S}
# \cdot
# \begin{pmatrix}
# a \\
# b \\
# \end{pmatrix}
# \ = \
# \begin{pmatrix}
# \mathcal{S}_{xy} \\
# \mathcal{S}_{y} \\
# \end{pmatrix}
# \equiv
# \begin{pmatrix}
# \mathcal{S}_{xx} & \mathcal{S}_{x} \\
# \mathcal{S}_{x} & 4 \\
# \end{pmatrix}
# \cdot
# \begin{pmatrix}
# a \\
# b \\
# \end{pmatrix}
# \ = \
# \begin{pmatrix}
# \mathcal{S}_{xy} \\
# \mathcal{S}_{y} \\
# \end{pmatrix}
# $$
#
# Τώρα εάν ο πίνακας $\mathbf{S}$ είναι **αντιστρέψιμος**, δηλαδή $4\mathcal{S}_{xx}-\mathcal{S}_{x}^2 \neq 0$, όπως γνωρίζουμε από την Γραμμική Άλγεβρα το ανωτέρο γραμμικο σύστημα έχει **μοναδική** λύση η οποία, από απαλοιφή Gauss ή απο μέθοδο του Cramer προκύπτει να είναι η:
#
# $$
# a=\frac{4\mathcal{S}_{xy}-\mathcal{S}_{x}\mathcal{S}_{y}}{4\mathcal{S}_{xx}-\mathcal{S}_{x}^2}, \ \ b=\frac{\mathcal{S}_{xx}\mathcal{S}_{y}-\mathcal{S}_{xy}\mathcal{S}_{x}}{4\mathcal{S}_{xx}-\mathcal{S}_{x}^2} \nonumber
# $$
#
# Για την *γραμμική εκδοχή της Μεθόδου Ελαχίστων Τετραγώνων* θα δημιουργήσουμε συναρτήση στην $MATLAB$ όπου επιστρέφει τους συντελεστές $a, \ b$ του πολυωνύμου $g(x)=ax+b$ όπου αποτελεί την **γραμμική προσέγγιση ελαχίστων τετραγώνων των δεδομένων (σε arrays/vectors)**, $\vec{x}, \ \vec{y}$. Στα NumPy & SciPy module της Python ήδη υπάρχει η built-in συνάρτηση **lstsq()** όπου κάνει αυτή ακριβώς την δουλειά για εμάς. Aρχικά ας γράψουμε μια συνάρτηση σε ένα m-file όπου για ένα τυχαίο δείγμα δεδομένων $\vec{x}, \ \vec{y}$ μεγέθους n θα μας επιστρέφει το γραμμικό πολυώνυμο ελαχίστων τετραγώνων (σε array μορφη στην $MATLAB$), $g(x)$.
# <hr style="border:2px solid gray"> </hr>
# ### Our $MATLAB$ function
#
# function [a,b] = lin_lsq(x,y)
#
# % This function returns the coefficients of the linear regression of the given input data, x and y, in row or column vector
#
# % form. It implements the linear version of the Method of Least Squares. It also displays a table where each row contains the
#
# % x and y coordinates of the data, the linear function of the method of LSQ evaluated at that point and the absolute error of
#
# % the best-fit linear function and the data. We also display the total squared error at the end.
#
# n = length(x); % **SOS THIS n IS THE ACTUAL SIZE OF OUR DATA SET. THAT MEANS IT IS EQUIVALENT TO n+1 IN OUR THEORY OVERVIEW ABOVE**
#
# x = x(:); y = y(:); % We make the data into column vectors, incase they were given otherwise. We dont use the transpose syntax
#
# % in case the given vector where already in column vector form
#
# s_x = sum(x); s_xx = sum(x.^2); s_y = sum(y); s_xy = sum(x.*y);
#
# a = (n*s_xy-s_x*s_y)/(n*s_xx-s_x*s_x);
#
# b = (s_xx*s_y-s_xy*s_x)/(n*s_xx-s_x*s_x);
#
# table = [x, y, (a*x+b), y-(a*x+b)];
#
# disp('| x | y | g(x_i) | y_i-(a*x_i+b) |')
#
# disp('--------------------------------------------')
#
# disp(sprintf('|%7.3f|%7.3f|%10.3f|%15.3f| \n',table')) % fprintf = disp(sprintf())
#
# # %because sprintf iterates column by column we use the transpose of the
#
# # %table matrix for the correct output of our table
#
# sq_err = sum(table(:,4).^2);
#
# disp(['The total squared error is ', num2str(sq_err)]);
#
# ### Output (we will print it through Python's IPython module, Image function)
from IPython.display import Image
Image(filename ="lin_lsq_output.png", width=1280, height=720)
# <hr style="border:2px solid gray"> </hr>
# ### Creating an equivelant Python function and visualising the linear approximation of the LSQ Method compared to the data set
# <hr style="border:2px solid gray"> </hr>
# +
# %matplotlib inline
#saving the figure in vector graphics format for better quality
# %config InlineBachend.figure_format='svg'
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as LA
def lin_lsq(x,y):
"""
This function returns the coefficients of the linear regression AND the corresponding linear polyonomial
of the given input data, x and y, in row or column vector form. It implements the linear version of the Method
of Least Squares. It also displays a table where each row contains the x and y coordinates of the data, the
linear function of the method of LSQ evaluated at that point and the absolute error of the best-fit linear
function and the data. We also display the total squared error at the end.
"""
n = np.prod(x.shape)
x = x.reshape(n,1) #if given otherwise, we turn x and y vectors to column vectors
y = y.reshape(n,1)
s_x = np.sum(x); s_xx = np.sum(x**2); s_y = np.sum(y); s_xy = np.sum(x*y)
S = np.array([[s_xx, s_x], [s_x, 4]], float)
d = np.array([s_xy, s_y], float)
try:
S_inv = LA.inv(S) # if LA.det(S)=0 then a LinAlgError exception will be raised
except LA.LinAlgError:
print("""With the given data, the system of normal equations of the Method of LSQ, does not have or
has infinite solutions because the coefficient matrix, S, is singular, i.e. it doesnt have an inverse.""")
return None
else:
sol = LA.solve(S,d)
a = sol[0]; b = sol[1]
g = np.poly1d([a,b])
g_x = g(x)
err = y-g_x
print('| x | y | g(x) | y-g(x) | \n ----------------------------------------')
table = np.concatenate((x, y, g_x, err), axis=1)
for (x_i, y_i, g_xi, err_i) in table:
print(f'| {x_i:5.2f} | {y_i:5.2f} | {g_xi:5.2f} | {err_i:6.2f} |')
print(f"Also the total squared error is {sum(err**2)[0]:.2f} \n")
return (a,b), g
x = np.array([1, 3, 5, 7], int)
y = np.array([2.5, 3.5, 6.35, 8.1], float)
(a,b), g = lin_lsq(x,y)
print(f"The coefficients of the linear polyonomial of the Method of LSQ are: a = {a:.2f} and b = {b:.2f}")
t = np.linspace(0,10, num=1000)
g_t = g(t)
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure(figsize=(10,5))
_ = plt.scatter(x, y , marker='*', c='red', s=80, label='Our Data')
_ = plt.plot(t, g_t, c='blue', linewidth='2.0', label=r'$g(x)=ax+b$')
_ = plt.xlabel('x', fontsize=14)
_ = plt.ylabel('y', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([-0.5,10])
axes.set_ylim([-0.5,10])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='best', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.axvline(0, color='black', lw=2)
_ = plt.title("The plot of the data compared to our approximation",
{'fontsize': 18,
'verticalalignment': 'baseline',
'horizontalalignment': 'center'} )
_ = plt.show
# -
# *Βλέπουμε απο τα αποτελέσματα της Python και της MATLAB ότι, με ακρίβεια 2 δεκαδικών ψηφίων, οι 2 εκδοχές της Μεθόδου LSQ επιφέρει και στα 2 προγράμματα / προσομοιώσεις το ίδιο αποτέλεσμα. Στην Python, όπως είχαμε προαναφέρει, θα μπορούσαμε να χρησιμοποιήσουμε απευθείας την συνάρτηση lstsq() του NumPy ή SciPy module αντί για την lin_lsq() που εμείς δημιουργήσαμε.*
# <hr style="border:2px solid gray"> </hr>
# ## Παράδειγμα 2: Τετραγωνικές και κυβικές προσεγγίσεις της Μεθόδου Ελαχίστων Τετραγώνων
# Με την ίδια μεθοδολογία που αναπτύξαμε πριν φτάσουμε στα παραδειγματα και που εφαρμόσαμε στο *Παράδειγμα 1*, θα δούμε την μεθοδολογία για να προσεγγίσουμε ένα σύνολο δεδομένων $\{(x_0,y_0),(x_1,y_1),\dots,(x_{n},y_{n})\}$, με πολυώνυμα βαθμού 2 (**Quadratic Least Squares Approximation**) και βαθμού 3 (**Cubic Least Squares Approximation**) (Στην περίπτωση όπου $n=3$ τότε το πολυώνυμο που προκύπτει από την Κυβική προσέγγιση ελαχίστων τετραγώνων *είναι* το πολυώνυμο παρεμβολής του Legendre, λόγω μοναδικότητας και καθώς m=n (βλέπε **Παράδειγμα 2.1** και \eqref{eq:1}) και όταν $n=2$, το πολυώνυμο που προκύπτει απο την Τετραγωνική προσέγγιση ελαχίστων τετραγώνων *είναι* το πολυώνυμο παρεμβολής του Legendre). Άρα θα δούμε τις περιπτώσεις όπου $m=2$ και $m=3$, αντίστοιχα. (στο *Παράδειγμα 1*, είδαμε την περίπτωση $m=1$)
# ## $m=2$ :
#
# Έχουμε ότι η συνάρτηση σφάλματος ορίζεται να είναι η: $A(a,b,c)=\sum_{i=1}^{n+1}(g(x_i)-y_i)^2$ όπου $g(x)=ax^2+bx+c$, είναι το ζητούμενο προσεγγιστικό πολυώνυμο της Μεθόδου Ελαχίστων Τετραγώνων. Εξισώνοντας τις μερικές παραγώγους της $A$ ως προς $a, \ b, \ c$ με το $0$, τότε προκύπτουν οι **κανονικές εξισώσεις** των συντελεστών, $a, \ b, \ c$:
#
# $$
# \left\{
# \begin{array} \\
# a\sum_{i=0}^n x_i^4+b\sum_{i=0}^n x_i^3+c\sum_{i=0}^n x_i^2 = \sum_{i=0}^n x_i^2y_i \nonumber \\
# a\sum_{i=0}^n x_i^3+b\sum_{i=0}^n x_i^2+c\sum_{i=0}^n x_i = \sum_{i=0}^n x_iy_i \nonumber \\
# a\sum_{i=0}^n x_i^2+b\sum_{i=0}^n x_i+ \ (n+1)c \ = \sum_{i=0}^n y_i \nonumber \\
# \end{array}
# \right.
# $$
#
# Εαν τώρα το ανωτέρω γραμμικό σύστημα έχει μη-ιδιάζων πίνακα συντελεστών; $\mathcal{S}$ τότε αυτό έχει μοναδική λύση, της οποίας οι συντεταγμένες όπως είχαμε δει, είναι οι συντελεστές του ζητούμενου πολυωνύμου, $g(x)$. Aπό την μέθοδο του Cramer προκύπτει ότι:
#
# $$
# a=\frac{\begin{vmatrix}
# \sum_{i=0}^n x_i^2y_i & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_iy_i & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n y_i & \sum_{i=0}^n x_i & (n+1)
# \end{vmatrix}}{\begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i & (n+1)
# \end{vmatrix}}, \
# b=\frac{\begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^2y_i & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_iy_i & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n y_i & (n+1)
# \end{vmatrix}}{\begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i & (n+1)
# \end{vmatrix}}, \
# c=\frac{\begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2y_i \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_iy_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i & \sum_{i=0}^n y_i \\
# \end{vmatrix}}{\begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i & (n+1)
# \end{vmatrix}} \nonumber
# $$
#
# $$
# \text{ όπου ισχύει ότι } \begin{vmatrix}
# \sum_{i=0}^n x_i^4 & \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 \\
# \sum_{i=0}^n x_i^3 & \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i \\
# \sum_{i=0}^n x_i^2 & \sum_{i=0}^n x_i & (n+1)
# \end{vmatrix} \ne 0 \nonumber
# $$
# ## m=3:
#
# Παρομοίως έχουμε ότι, με ανάλογο τρόπο των περιπτώσεων m=1 και m=2, το σύστημα γραμμικών εξισώσεων που προσδιορίζει του συντελεστές της "best fit" κυβικής συνάρτησης $g(x)=ax^3+bx^2+cx+d$ είναι το:
#
# $$
# \left\{
# \begin{array} \\
# a\sum_{i=0}^n x_i^6+b\sum_{i=0}^n x_i^5+c\sum_{i=0}^n x_i^4+d\sum_{i=0}^n x_i^3 = \sum_{i=0}^n x_i^3y_i \nonumber \\
# a\sum_{i=0}^n x_i^5+b\sum_{i=0}^n x_i^4+c\sum_{i=0}^n x_i^3 +d\sum_{i=0}^n x_i^2 = \sum_{i=0}^n x_i^2y_i \nonumber \\
# a\sum_{i=0}^n x_i^4+b\sum_{i=0}^n x_i^3+ c\sum_{i=0}^n x_i^2+d\sum_{i=0}^n x_i= \sum_{i=0}^n x_iy_i \nonumber \\
# a\sum_{i=0}^n x_i^3+b\sum_{i=0}^n x_i^2+ c\sum_{i=0}^n x_i+ \ d(n+1) \ = \sum_{i=0}^n y_i
# \end{array}
# \right.
# $$
#
# To οποίο επιλύεται με μεθόδους Γραμμικής Άλγεβρας ή αλγορίθμων επίλυσης γραμμικών συστημάτων. Για παράδειγμα, θα δούμε ότι για τα δεδομένα του *Παραδείγματος 1*, καθώς $n=3=m$ τότε η Κυβική Προσέγγιση Ελαχίστων Τετραγώνων είναι το μοναδικό πολυώνυμο Legendre όπου επαληθεύει το σύνολο δεδομένων μας, \eqref{eq:1}.
# ## Παράδειγμα 2.1: Κυβική προσέγγιση ελαχίστων τετραγώνων επί των δεδομένων $(1, 2.5), \ (3, 3.5), \ (5, 6.35) \text{ και } (7, 8.1), \ (n=3)$
# +
# %matplotlib inline
#saving the figure in vector graphics format for better quality
# %config InlineBachend.figure_format='svg'
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as LA
def cub_lsq(x,y):
"""
This function returns the coefficients of the CUBIC regression AND the corresponding linear polyonomial
of the given input data, x and y, in row or column vector form. It implements the CUBIC version of the Method
of Least Squares. It also displays a table where each row contains the x and y coordinates of the data, the
cubic function of the method of LSQ evaluated at that point and the absolute error of the best-fit cubic
function and the data. We also display the total squared error at the end.
"""
n = np.prod(x.shape)
x = x.reshape(n,1) #if given otherwise, we turn x and y vectors to column vectors
y = y.reshape(n,1)
s_x = np.sum(x); s_x2 = np.sum(x**2); s_x3 = np.sum(x**3); s_x4 = np.sum(x**4); s_x5 = np.sum(x**5); s_x6 = np.sum(x**6)
s_x3y = np.sum(x**3*y); s_x2y = np.sum(x**2*y); s_xy = np.sum(x*y); s_y = np.sum(y)
S = np.array([[s_x6, s_x5, s_x4, s_x3], [s_x5, s_x4, s_x3, s_x2], [s_x4, s_x3, s_x2, s_x], [s_x3, s_x2, s_x, n]], float)
r = np.array([s_x3y, s_x2y, s_xy, s_y], float)
try:
S_inv = LA.inv(S) # if LA.det(S)=0 then a LinAlgError exception will be raised
except LA.LinAlgError:
print("""With the given data, the system of normal equations of the Method of LSQ, does not have or
has infinite solutions because the coefficient matrix, S, is singular, i.e. it doesnt have an inverse.""")
return None
else:
sol = LA.solve(S,r)
a = sol[0]; b = sol[1]; c = sol[2]; d = sol[3]
g = np.poly1d([a,b,c,d])
g_x = g(x)
err = y-g_x
print('| x | y | g(x) | y-g(x) | \n ----------------------------------------')
table = np.concatenate((x, y, g_x, err), axis=1)
for (x_i, y_i, g_xi, err_i) in table:
print(f'| {x_i:5.2f} | {y_i:5.2f} | {g_xi:5.2f} | {err_i:6.2f} |')
print(f"Also the total squared error is {sum(err**2)[0]:.2f} \n")
return (a,b,c,d), g
x = np.array([1, 3, 5, 7], int)
y = np.array([2.5, 3.5, 6.35, 8.1], float)
(a,b,c,d), g = cub_lsq(x,y)
print(f"The coefficients of the CUBIC polyonomial of the Method of LSQ are: a = {a:.2f}, b = {b:.2f}, c = {c:.2f} and d = {d:.2f}")
t = np.linspace(0,10, num=1000)
g_t = g(t)
print("""We see that our approximation interpolates our data, EXACTLY. As we see from the output table, the total squared
error is 0, as well as the individual differences of the data and the approximations at each point (with a specific
tolerance). That is attributed to the fact, like we explained at our theory overview at the beginning of this Jupyter
Notebook, that the number of data points, (in this example 4=n+1) is exactly one above the degree of the polyonomial
approximation that we try to achieve with the Method of Least Squares. That is m=3=4-1=n. So in other words, in this
instance where m=3=4-1, the Method of Cubic Apprpximation is equivalent to the Legendre Method of Polyonomial
Interpolation that we discussed briefly at the beginning!""")
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure("Cubic approximation of our data", figsize=(10,5))
_ = plt.scatter(x, y , marker='*', c='red', s=80, label='Our Data')
_ = plt.plot(t, g_t, c='blue', linewidth='2.0', label=r'$g(x)=ax^3+bx^2+cx+d$')
_ = plt.xlabel('x', fontsize=14)
_ = plt.ylabel('y', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([-0.5,10])
axes.set_ylim([-0.5,10])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='best', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.axvline(0, color='black', lw=2)
_ = plt.title("The plot of the data compared to our cubic approximation",
{'fontsize': 18,
'verticalalignment': 'baseline',
'horizontalalignment': 'center'} )
_ = plt.show
# -
# <hr style="border:2px solid gray"> </hr>
# ## Προσαρμογή δεδομένων με μη-πολυωνυμικές συναρτήσεις - Eκθετικές Προσεγγίσεις
# Πολλές φόρες προκύπτει το ενδεχόμενο όπου δεν θέλουμε να προσαρμόσουμε πολυώνυμο στα δεδομένα μας και θα ήταν πιο βολικό σε αυτά να προσαρμόσουμε μια συνάρτηση που τα αντιπροσωπεύει καλύτερα και πιο αποτελεσματικά. Για παράδειγμα σε μια έρευνα που έγινε από το Indiana University Center for Studies of Law in Action το 2007 και δημοσιεύτηκε στο παγκόσμιο συνέδριο του ICADTS - The International Council on Alcohol, Drugs and Traffic Safety στο Seattle, USA, μελετήθηκε το ρίσκο αυτοκινητιστικού ατυχήματος υπό την επήρεια αλκοόλης. Δεδομένα από 2871 αυτοκινητιστικά ατυχήματα / δυστυχήματα χρησιμοποιήθηκαν ώστε να υπολογιστεί κατα πόσο συσχετίζεται η Συγκέντρωση Αλκοόλ στο αίμα (BAC – Blood Alcohol Concentration) με το ρίσκο να γίνει ένα αυτοκινητιστικό δυστήχημα. Ο παρακάτω πίνακας παρουσιάζει τα αποτελέσματα της έρευνας:
#
# | BAC | Relative Risk of crashing |
# | ----- | --------------------------------- |
# | 0 | 1 |
# | 0.01 | 1.03 |
# | 0.03 | 1.06 |
# | 0.05 | 1.38 |
# | 0.07 | 2.09 |
# | 0.09 | 3.54 |
# | 0.11 | 6.41 |
# | 0.13 | 12.6 |
# | 0.15 | 22.1 |
# | 0.17 | 39.05 |
# | 0.19 | 65.32 |
# | 0.21 | 99.78 |
#
# To *"Relative Risk"* είναι μια ποσότητα προδιορίζει το πόσες φορές πιο πιθανό είναι ένα άτομο υπο την καθορισμένη ποσότητα BAC να προκαλέσει αυτοκινητιστικό σε σύγκριση με ένα άτομο με *μηδενικό* BAC. Για παράδειγμα, ένα άτομο με 0.09 BAC είναι 3.54 φορές πιο πιθανό να προκαλέσει ένα αυτοκινητιστικό σε σύγκριση με ένα άτομο με 0 BAC. Εάν σχεδιάσουμε τα δεδομένα μας στο Καρτεσιανό επίπεδο με τον $x$-άξονα να αναπαριστά την στάθμη BAC ενώ ο $y$-άξονας να αναπαριστά το αντίστοιχο "relative risk" (σχετικό ρίσκο) παρατηρούμε ότι αυτά προσαρμόζονται ιδιαίτερα αποτελεσματικά απο μια **εκθετική συνάρτηση** (καλύτερα απότι θα τα προσέγγιζε ένα πολυώνυμο 2ου ή 3ου ή 4ου βαθμού) (**exponential regression**).
# +
# %matplotlib inline
# %config InlineBachend.figure_format='svg' #saving the figure in vector graphics format for better quality
import matplotlib.pyplot as plt
import numpy as np
x = np.concatenate((np.array([0, 0.01], float), np.arange(0.03,0.23,0.02)),axis=0)
y = np.array([1, 1.03, 1.06, 1.38, 2.09, 3.54, 6.41, 12.6, 22.1, 39.05, 65.32, 99.78], float)
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure(figsize=(10,5))
_ = plt.scatter(x,y , marker='*', c='red', s=80, label='Our Data')
_ = plt.xlabel('BAC level', fontsize=14)
_ = plt.ylabel('Relative Risk of Crashing', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([-0.05,0.25])
axes.set_ylim([-5,110])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='upper left', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.axvline(0, color='black', lw=2)
_ = plt.show
# -
# Καθώς όπως βλέπουμε τα δεδομένα μας σχετίζονται με εκθετικό τρόπο τότε θέλουμε να βρούμε σταθερές $c$ και $a$, τέτοιες ώστε η εκθετική συνάρτηση $y=ce^{ax}$ να προσαρμόζει τα δεδομένα μας. Πάλι θα χρησιμοποιήσουμε (για καλή μας τύχη) στην $MATLAB$ την συνάρτηση την οποία δημιουργήσαμε, **lin_lsq()** ή για ευκολία την εντολή **polyfit()** με arguement στην παράμετρο του βαθμού του πολυωνύμου, τον αριθμό $1$. Υπενθυμίζουμε ότι όταν εάν $n+1$ είναι το πλήθος των δεδομένων μας τότε η συνάρτηση **polyfit(x,y,m)** της $MATLAB$ επιστρέφει το πoλυώνυμο βαθμού $m<n$ που προκύπτει από την **Μέθοδο Ελαχίστων Τετραγώνων**. Για να πάρουμε αυτές τις σταθερές $c$ και $a$, αρκεί να δώσουμε σαν arguements στην εντολή polyfit() τα διανύσματα $\vec{x} \text{ και } \ln{\vec{y}}$ και να ζητήσουμε το πολυώνυμο **1ου βαθμού** με το syntax:
#
# $$
# \text{polyfit(x,log(y),1)} \nonumber
# $$
#
# στην $MATLAB$. **Yπενθυμίζουμε ότι η συνάρτηση polyfit(), με την ίδια λειτουργικότητα, βρίσκεται και στο NumPy module της Python. Σε αυτό το παράδειγμα, αυτή θα χρησιμοποιήσουμε (για σκοπούς του παραδείγματος), αντί για την lin_lsq() που δημιουργήσαμε πιο πάνω στο Παράδειγμα 1**
#
# Ο λόγος που αυτό λειτουργεί είναι απλός;
#
# Εάν $y=ce^{ax}$ τότε,
# $$
# \ln{y} = \ln{ce^{ax}} \Rightarrow \overbrace{\ln{y}}^{Y} = \overbrace{\ln{c}}^{C} + ax \Leftrightarrow Y=ax+C \nonumber
# $$
#
# που μας λέει ότι θέλουμε να βρούμε ένα πολυώνυμο 1ου βαθμού, με την Μέθοδο Ελαχίστων Τετραγώνων για τα δεδομένα μας $(x,Y)=(x,\ln{y})$ και αυτό γιατι έχουμε βάσιμες πληροφορίες (κυρίως γεωμετρικές απο την αναπαράσταση των δεδομένων ή από προηγούμενες έρευνες που έγινας στο θέμα μας) ότι τα δεδομένα μας συσχετίζονται με εκθετικό τρόπο.
# +
# %matplotlib inline
# %config InlineBachend.figure_format='svg' #saving the figure in vector graphics format for better quality
import matplotlib.pyplot as plt
import numpy as np
n = np.prod(x.shape)
x = np.concatenate((np.array([0, 0.01], float), np.arange(0.03,0.23,0.02)))
y = np.array([1, 1.03, 1.06, 1.38, 2.09, 3.54, 6.41, 12.6, 22.1, 39.05, 65.32, 99.78], float)
g = np.polyfit(x,np.log(y),1)
a = g[0]; upper_c = g[1]; c = np.exp(upper_c);
g_fun = lambda t: c*np.exp(a*t)
g_x = g_fun(x)
err = y-g_x
x = x.reshape(n,1)
y = y.reshape(n,1)
g_x = g_x.reshape(n,1)
err = err.reshape(n,1)
t = np.linspace(0,0.25,num=1000)
print('| x | y | g(x) | y-g(x) | \n ----------------------------------------')
table = np.concatenate((x, y, g_x, err), axis=1)
for (x_i, y_i, g_xi, err_i) in table:
print(f'| {x_i:5.2f} | {y_i:5.2f} | {g_xi:5.2f} | {err_i:6.2f} |')
print(f"Also the total squared error is {sum(err**2)[0]:.2f} \n")
print(f"The coefficients of the exponential regression are: c = {c:.2f}, a = {a:.2f}")
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure(figsize=(10,5))
_ = plt.scatter(x,y , marker='*', c='red', s=80, label='Our Data')
_ = plt.plot(t, g_fun(t), c='blue', linewidth='2.0', label=r'$g(x)=ce^{ax}$')
_ = plt.xlabel('BAC level', fontsize=14)
_ = plt.ylabel('Relative Risk of Crashing', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([-0.05,0.25])
axes.set_ylim([-5,110])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='upper left', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.axvline(0, color='black', lw=2)
_ = plt.show
# -
# <hr style="border:2px solid gray"> </hr>
# ## Προσαρμογή δεδομένων με μη-πολυωνυμικές συναρτήσεις - Κλασματικές Προσεγγίσεις
# Πολλές είναι και οι περιπτώσεις όμως όπου τα δεδομένα μας μπορούν να προσαρμοστούν από μια κλασματική συνάρτηση (**reciprocal regression**) εαν υπάρχει κλασματική σχέση μεταξύ των δεδομένων (*reciprocal relation*). Δηλαδή εαν παρατηρήσουμε ότι τα δεδομένα μας συσχετίζονται με τέτοιο τρόπο τότε μπορούμε να τα προσεγγίσουμε, όχι με ένα πολυώνυμο, αλλά με ένα πηλίκο - κλασματική συνάρτηση της μορφής:
#
# $$
# y=\frac{1}{\gamma x+\delta} \nonumber
# $$
#
# Έστω τα πιο κάτω δεδομένα τα οποία προήρθαν από ένα σχολικό πείραμα, όπου οι μαθητές υπολόγιζαν την ισχύ μιας πηγής φωτός (intensity of light) σε candela (cd) σε συνάρτηση με την απόσταση από την πηγή φωτός σε inches ("):
#
# | d | i |
# |-------|---------|
# | 30" | 0.85 cd|
# | 35" | 0.67 cd|
# | 40" | 0.52 cd|
# | 45" | 0.42 cd|
# | 50" | 0.34 cd|
# | 55" | 0.28 cd|
# | 60" | 0.24 cd|
# | 65" | 0.21 cd|
# | 70" | 0.18 cd|
# | 75" | 0.15 cd|
#
# Για να βρούμε μια τέτοια συνάρτηση, χρησιμοποιηούμε την ακόλουθη εντολή / συναρτήση με το καθορισμένο syntax, όπως είδαμε και στην Εκθετική Προσέγγιση, είτε στην Python είτε στην $MATLAB$:
#
# $$
# \text{Στην MATLAB: } polyfit(x,1./y,1) \\ \text{ και στην Python: } numpy.polyfit(x,1/y,1) \nonumber
# $$
#
# Ο λόγος που λειτουργεί αυτό, είναι και εδώ απλός:
#
# Έστω $y=\frac{1}{\gamma x+\delta}$ τότε,
#
# $$
# \overbrace{\frac{1}{y}}^{Y} = \gamma x+\delta \Leftrightarrow Y=\gamma x+\delta \nonumber
# $$
#
# Άρα η εντολή θα μας δώσει το πολυώνυμο $\gamma x+\delta$ το οποίο χρησιμοποιούμε για την προσέγγιση των δεδομένων μας. **Μπορούμε φυσικά να προσεγγίσουμε τα δεδομένα μας και με κλασματικές συναρτήσεις όπου ο παρανομαστής του πηλίκου αποτελεί πολυώνυμο βαθμού μεγαλύτερου του $1$, απλά αλλάζοντας το arguement του βαθμού της προσέγγισης στο input της εντολής polyfit() στο ποθητό αριθμό.** Για παράδειγμα, για τα δεδομένα αυτού του πειράματος, θα δούμε ότι χρησιμοποιόντας **Quadratic** Reciprocal Regression με την Μέθοδο LSQ, επιτυγχάνουμε καλύτερα προσαρμογή και προσέγγιση των δεδομένων μας!
# +
# %matplotlib inline
# %config InlineBachend.figure_format='svg' #saving the figure in vector graphics format for better quality
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(30, 80, 5)
y = np.array([0.85, 0.67, 0.52, 0.42, 0.34, 0.28, 0.24, 0.21, 0.18, 0.15], float)
n = np.prod(x.shape)
g_1 = np.polyfit(x,1/y,1)
g_2 = np.polyfit(x,1/y,2)
gamma = g_1[0]; delta = g_1[1]; a = g_2[0]; b = g_2[1]; c = g_2[2];
gfun_1 = lambda t: 1/(gamma*t+delta)
gfun_2 = lambda t: 1/(a*t**2+b*t+c)
g_1_x = gfun_1(x); g_2_x = gfun_2(x)
err_1 = y-g_1_x
err_2 = y-g_2_x
x = x.reshape(n,1)
y = y.reshape(n,1)
g_1_x = g_1_x.reshape(n,1)
g_2_x = g_2_x.reshape(n,1)
err_1 = err_1.reshape(n,1)
err_2 = err_2.reshape(n,1)
t = np.linspace(25,80,num=1000)
print("g_1(x)=γx+δ and g_2(x)=ax^2+bx+c \n")
print('| x | y | g_1(x) | y-g_1(x) | g_2(x) | y-g_2(x) | \n \
---------------------------------------------------------------------')
table = np.concatenate((x, y, g_1_x, err_1, g_2_x, err_2), axis=1)
for (x_i, y_i, g_1_xi, err_1_i, g_2_xi, err_2_i) in table:
print(f'| {x_i:5.2f} | {y_i:4.2f} | {g_1_xi:7.2f} | {err_1_i:8.2f} | {g_2_xi:7.2f} | {err_2_i:8.2f} |')
print(f"The total squared error for the Linear reciprocal regression is {sum(err_1**2)[0]:.2f} \n")
print(f"The total squared error for the Quadratic reciprocal regression is {sum(err_2**2)[0]:.6f} \n")
print(f"The coefficients of the LINEAR reciprocal regression are: γ = {gamma:.2f} and δ = {delta:.2f}")
print(f"The coefficients of the QUADRATIC reciprocal regression are: a = {a:.3f}, b = {b:.3f} and c = {c:.3f}")
# Creating the plot and editing some of its attributes for clarity. We will just copy and paste them for future use.
# We also assign every modification to our plot to a dummy/garbage collecting variable; '_' to prevent unwanted outputs
_ = plt.figure(figsize=(10,5))
_ = plt.scatter(x, y, marker='*', c='red', s=80, label='Our Data')
_ = plt.plot(t, gfun_1(t), c='blue', linewidth='1.0', label=r'$g_1(x)=\frac{1}{\gamma x+\delta}$')
_ = plt.plot(t, gfun_2(t), c='purple', linewidth='1.0', label=r'$g_2(x)=\frac{1}{ax^2+bx+c}$')
_ = plt.xlabel('Distance in inches (")', fontsize=14)
_ = plt.ylabel('Intensity of Light Source (cd)', fontsize=14)
_ = plt.grid(True)
axes = plt.gca() #gca stands for get current axes
axes.set_xlim([25,80])
axes.set_ylim([-0.1,1.4])
_ = plt.rcParams['xtick.labelsize']=18
_ = plt.rcParams['ytick.labelsize']=18
_ = plt.legend(loc='best', fontsize=14) #Sets the legend box at the best location
_ = plt.axhline(0, color='black', lw=2)
_ = plt.show
# -
# <hr style="border:2px solid gray"> </hr>
# ## Παράδειγμα 3: Εφαρμογή όλων των μεθόδων που είδαμε σε ένα πρακτικό πρόβλημα.
# Έστω ότι κατα την μοντελοποιήση μιας δεξαμενής πετρελαίου, μας αναθέτετε το έργο εύρεσης μια σχέσης μεταξύ την σταθερά ισορροπίας μιας (χημικής) αντίδρασης και της πίεσης, υπό μια σταθερή θερμοκρασία. Τα δεδομένα στον πιο κάτω πίνακα, συσχετίζουν τις σταθερές ισορροπίας (*K-values, Vapor-Liquid Equilibrium (VLE)*) με την πίεση (σε μονάδες ΚPSIA (1000 PSIA), δηλαδή σε μονάδες S.I.; $6.895E+6$ Pascal) και προέκυψαν απο μια πειραματική Pressure volume temperature (PVT) ανάλυση.
#
# | Pressure | K-value |
# |----------|---------|
# | 0.635 | 7.5 |
# | 1.035 | 5.58 |
# | 1.435 | 4.35 |
# | 1.835 | 3.55 |
# | 2.235 | 2.97 |
# | 2.635 | 2.53 |
# | 3.035 | 2.2 |
# | 3.435 | 1.93 |
# | 3.835 | 1.7 |
# | 4.235 | 1.46 |
# | 4.635 | 1.28 |
# | 5.035 | 1.11 |
# | 5.435 | 1.0 |
#
# Αναπαριστούμε τα δεδομένα μας και τα προσεγγίζουμε / προσαρμόζουμε με **6** διαφορετικές συναρτήσεις, αξιοποιώντας τα subplots της Matplotlib (εύκολα αυτό το πρόγραμμα στην Python μετατρέπεται σε Script m-file στην $MATLAB$). Tέλος, υπολογίζουμε και συγκρίνουμε, το συνολικό τετραγωνικό σφάλμα από την κάθε προσαρμογή, ώστα να παρθεί ποιά από τις πιο κάτω αποτελεί την best-fit καμπύλη για αυτό το συγκεκριμένο σύνολο δεδομένων.
#
# 1. Γραμμική Συνάρτηση / Πολυώνυμο βαθμού 1: $g_1(x)=mx+d$
#
#
# 2. Πολυώνυμο βαθμού 2: $g_2(x)=ax^2+bx+c$
#
#
# 3. Πολυώνυμο βαθμού 12 (=13-1, n=12): $g_3(x)=a_{12}x^{12}+ \cdots +a_1x+a_0$
#
#
# 4. Eκθετική συνάρτηση με βάση την σταθερά του Euler: $g_4(x)=re^{lx}$
#
#
# 5. Κλασματική συνάρτηση με γραμμική συνάρτηση στον παρονομαστή: $g_5(x)=\frac{1}{\lambda x+\delta}$
#
#
# 6. Κλασματική συνάρτηση με πολυώνυμο βαθμού 2 στον παρονομαστή: $g_6(x)=\frac{1}{\alpha x^2+\beta x+\gamma}$
#
# +
# %matplotlib inline
# %config InlineBachend.figure_format='svg' #saving the figure in vector graphics format for better quality
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.635, 5.5, 0.4)
y = np.array([7.5, 5.58, 4.35, 3.55, 2.97, 2.53, 2.2, 1.93, 1.7, 1.46, 1.28, 1.11, 1.0], float)
n = np.prod(x.shape)
g_1 = np.polyfit(x,y,1)
g_2 = np.polyfit(x,y,2)
g_3 = np.polyfit(x,y,12)
# This, as we saw at the corresponding section, will need some tweaking first
g_4 = np.polyfit(x,np.log(y),1)
r = np.exp(g_4[1]); l = g_4[0]
g_5 = np.polyfit(x,1/y,1)
g_6 = np.polyfit(x,1/y,2)
gfun_exp = lambda t: r*np.exp(l*t)
gfun_rec1 = lambda t: 1/(g_5[0]*t+g_5[1])
gfun_rec2 = lambda t: 1/(g_6[0]*t**2+g_6[1]*t+g_6[2])
err_1 = y-np.polyval(g_1,x)
err_2 = y-np.polyval(g_2,x)
err_3 = y-np.polyval(g_3,x)
err_4 = y-gfun_exp(x)
err_5 = y-gfun_rec1(x)
err_6 = y-gfun_rec2(x)
print("The total squared errors for each regression are:\n")
print(f"Linear Approximation of LSQ: {sum(err_1**2):.4f} \n")
print(f"Quadratic Approximation of LSQ: {sum(err_2**2):.4f} \n")
print(f"Polyonomial Interpolation (Lagrange Polyonomial): {sum(err_3**2):.4f} \n")
print(f"Exponential Approximation of LSQ: {sum(err_4**2):.4f} \n")
print(f"Linear Reciprocal Approximation of LSQ: {sum(err_5**2):.4f} <---- This is quite large because at approximately \
x=0.222... we have a horizontal asymptote \n")
print(f"Quadratic Reciprocal Approximation of LSQ: {sum(err_6**2):.4f} \n")
# We could visualise these to be honest much better with the SymPy module but that could be a bit time consuming
print("The functions in the order we defined them: \n")
print(f"g_1(x)={g_1[0]:.2f}x+{g_1[1]:.2f}\n")
print(f"g_2(x)={g_2[0]:.2f}x^2{g_2[1]:.2f}x+{g_2[2]:.2f}\n")
print(f"g_3(x)={g_3[0]:.3f}x^12{g_3[1]:.2f}x^11+{g_3[2]:.2f}x^10{g_3[3]:.2f}x^9+{g_3[4]:.2f}x^8{g_3[5]:.2f}x^7+\
{g_3[6]:.2f}x^6{g_3[7]:.2f}x^5+{g_3[8]:.2f}x^4{g_3[9]:.2f}x^3+{g_3[10]:.2f}x^2{g_3[11]:.2f}x+{g_3[12]:.2f} \n")
print(f"g_4(x)={r:.2f}*exp({l:.2f}x) \n")
print(f"g_5(x)=1/({g_5[0]:.2f}*x{g_5[1]:.2f}) \n")
print(f"g_6(x)=1/({g_6[0]:.2f}*x^2+{g_6[1]:.2f}*x+{g_6[2]:.2f}) \n")
t = np.linspace(-0.5,6,num=1000)
#We change the figure size here instantly instead from the fig, Figure instance like we would do in a Python program
fig, axes = plt.subplots(nrows=2,ncols=3, figsize=(14,10))
axes[0,0].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[0,0].plot(t,np.polyval(g_1,t), '-b', linewidth=1.0, label=r'$g_1(x)=mx+d$')
axes[0,0].set_xlabel('Pressure', fontsize=12)
axes[0,0].set_ylabel('K-value', fontsize=12)
axes[0,0].legend(loc='best', fontsize=12)
axes[0,0].set_title(r"Polyonomial of deg 1, $g_1(x)=mx+d$", fontsize=14)
axes[0,0].axhline(0, color='black', lw=2)
axes[0,0].axvline(0, color='black', lw=2)
axes[0,0].set_xlim([-0.5,6])
axes[0,0].set_ylim([-0.5,8])
axes[0,1].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[0,1].plot(t,np.polyval(g_2,t), '-g', linewidth=1.0, label=r'$g_2(x)=ax^2+bx+c$')
axes[0,1].set_xlabel('Pressure', fontsize=12)
axes[0,1].set_ylabel('K-value', fontsize=12)
axes[0,1].legend(loc='best', fontsize=12)
axes[0,1].set_title(r"Polyonomial of deg 2, $g_1(x)=ax^2+bx+c$", fontsize=14)
axes[0,1].axhline(0, color='black', lw=2)
axes[0,1].axvline(0, color='black', lw=2)
axes[0,1].set_xlim([-0.5,6])
axes[0,1].set_ylim([-0.5,8])
axes[0,2].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[0,2].plot(t,np.polyval(g_3,t), '-c', linewidth=1.0, label=r'$g_3(x)=a_{12}x^{12}+ \cdots +a_1x+a_0$')
axes[0,2].set_xlabel('Pressure', fontsize=12)
axes[0,2].set_ylabel('K-value', fontsize=12)
axes[0,2].legend(loc='best', fontsize=12, mode='expand') #Because this is a really big legend box, we expand it horizontally
axes[0,2].set_title(r"Polyonomial of deg 12, $g_3(x)=a_{12}x^{12}+ \cdots +a_1x+a_0$", fontsize=10)
axes[0,2].axhline(0, color='black', lw=2)
axes[0,2].axvline(0, color='black', lw=2)
axes[0,2].set_xlim([-0.5,6])
axes[0,2].set_ylim([-0.5,8])
axes[1,0].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[1,0].plot(t,gfun_exp(t), '-k', linewidth=1.0, label=r'$g_4(x)=re^{lx}$')
axes[1,0].set_xlabel('Pressure', fontsize=12)
axes[1,0].set_ylabel('K-value', fontsize=12)
axes[1,0].legend(loc='best', fontsize=12)
axes[1,0].set_title(r"Exponential function, $g_4(x)=re^{lx}$", fontsize=14)
axes[1,0].axhline(0, color='black', lw=2)
axes[1,0].axvline(0, color='black', lw=2)
axes[1,0].set_xlim([-0.5,6])
axes[1,0].set_ylim([-0.5,8])
axes[1,1].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[1,1].plot(t,gfun_rec1(t), '-m', linewidth=1.0, label=r'$g_5(x)=\frac{1}{\lambda x+\delta}$')
axes[1,1].set_xlabel('Pressure', fontsize=12)
axes[1,1].set_ylabel('K-value', fontsize=12)
axes[1,1].legend(loc='best', fontsize=12)
axes[1,1].set_title(r"Linear Reciprocal Regression, $g_5(x)=\frac{1}{\lambda x+\delta}$", fontsize=14)
axes[1,1].axhline(0, color='black', lw=2)
axes[1,1].axvline(0, color='black', lw=2)
axes[1,1].set_xlim([-0.5,6])
axes[1,1].set_ylim([-0.5,8])
axes[1,2].scatter(x,y, marker='*', c='red', s=80, label="Data")
axes[1,2].plot(t,np.polyval(g_2,t), '-y', linewidth=1.0, label=r'$g_6(x)=\frac{1}{\alpha x^2+\beta x+\gamma}$')
axes[1,2].set_xlabel('Pressure', fontsize=12)
axes[1,2].set_ylabel('K-value', fontsize=12)
axes[1,2].legend(loc='best', fontsize=12)
axes[1,2].set_title(r"Quadratic Reciprocal Regression, $g_6(x)=\frac{1}{\alpha x^2+\beta x+\gamma}$", fontsize=14)
axes[1,2].axhline(0, color='black', lw=2)
axes[1,2].axvline(0, color='black', lw=2)
axes[1,2].set_xlim([-0.5,6])
axes[1,2].set_ylim([-0.5,8])
fig2, axis = plt.subplots(figsize=(10,8))
axis.scatter(x,y, marker='*', c='red', s=100, label="Data")
axis.plot(t,np.polyval(g_1,t), '-b', linewidth=1.0, label=r'$g_1(x)=mx+d$')
axis.plot(t,np.polyval(g_2,t), '-g', linewidth=1.0, label=r'$g_2(x)=ax^2+bx+c$')
axis.plot(t,np.polyval(g_3,t), '-c', linewidth=1.0, label=r'$g_3(x)=a_{12}x^{12}+ \cdots +a_1x+a_0$')
axis.plot(t,gfun_exp(t), '-k', linewidth=1.0, label=r'$g_4(x)=re^{lx}$')
axis.plot(t,gfun_rec1(t), '-m', linewidth=1.0, label=r'$g_5(x)=\frac{1}{\lambda x+\delta}$')
axis.plot(t,np.polyval(g_2,t), '-y', linewidth=1.0, label=r'$g_6(x)=\frac{1}{\alpha x^2+\beta x+\gamma}$')
axis.set_xlabel('Pressure', fontsize=12)
axis.set_ylabel('K-value', fontsize=12)
axis.legend(loc='best', fontsize=12)
axis.set_title("Comparing all the data", fontsize=16)
axis.axhline(0, color='black', lw=2)
axis.axvline(0, color='black', lw=2)
axis.set_xlim([-0.5,6])
axis.set_ylim([-0.5,8])
axis.annotate(r"Vertical asymptote of $g_5(x)=\frac{1}{\lambda x+\delta}$ at appr. $x=0.22\dots$", xy=(0.22,0.5),
xytext=(0.5,0.5), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left', verticalalignment='center') #Plotting an annotation for the asymptote of g_5(x)
fig.tight_layout()
plt.show()
# -
# <hr style="border:2px solid gray"> </hr>
# ## Μικρή συνοπτική ανασκόπιση της Συνεχούς Εκδοχής της Μεθόδου Ελαχίστων Τετραγώνων
#
# Ξεκινήσαμε το Notebook μας, και *σχεδον* το κλείνουμε, με την μελέτη του προβλήματος προσαρμογής κάποιου συνόλου σημείων $\{(x_0,y_0),(x_1,y_1),\dots,(x_{n},y_{n})\}$ από ένα πολυώνυμο (ή εκθετική ή κλασματική συνάρτηση) βαθμού $m \leq n$. Mε άλλα λόγια, προσεγγίζαμε μια συνάρτηση $f \in \mathcal{C}[a,b]$ στα *διακριτά* σημεία $x_0,x_1,\dots,x_m$ μελετώντας την θεωρια και διαφορά παραδείγματα της **Διακριτής** εκδοχής ελαχίστων τετραγώνων. Όπως είχαμε αναφέρει στην αρχή του Notebook, (βλ. \eqref{eq:4} και \eqref{eq:5}), θα μπορούσαμε για τον ορισμό της συνάρτησης σφάλματος να χρησιμοποιούσαμε οποιοδήποτε άλλο μέγεθος θέλουμε για προσδιορισμό του σφάλματος της προσέγγισης από τα δεδομένα. Στην πραγματικότητα, και η επιλογή της προσέγγισης μιας συνάρτησης σε μόνο $n+1$ σημεία είναι κάπως αυθαίρετη. Εάν ήμασταν φιλόδοξοι, (αρκετά!) θα απαιτούσαμε από το πολυώνυμο της Μεθόδου των Ελαχίστων Τετραγώνων να προσαρμοζει / προσεγγίζει, σε υψηλο βαθμό, την συνάρτηση $f$ επί όλου του διαστήματος $[a,b]$. Πόσα σημεία θα διαλέγαμε τότε? (Δηλαδή τι τιμή θα έπαιρνε τότε το $n$? Προφανώς θα ήπρεπε να είναι αρκετά μεγάλη...) Με ποιό τρόπο θα διαλέγαμε τα σημεία $x_i$? Eύλογο θα ήτανε να κατανέμαμε τα σημεία $x_i \in [a,b]$ ομοιόμορφα επί του $[a,b]$ Δηλαδή παίρνουμε την **κανονική διαμέριση** του $[a,b]$ μεγέθους $n+1$;
#
# $$
# \text{Oρίζουμε: } h:=\frac{b-a}{n} \text{ και έστω } x_k=a+kh, \forall k=0,1,\dots,n \nonumber
# $$
#
# Πίσω τώρα στην συνάρτηση σφάλματος, \eqref{eq:4}. Eαν πολλαπλασιάσουμε την συνάρτηση σφάλματος $Α(a_0,a_1,\dots,a_m)$, με την σταθερά $h$, η οποία είναι η απόσταση μεταξύ 2 διαδοχικών σημείων αυτής της κανονικής διαμέρισης του $[a,b]$ και πάρουμε το όριο της καθώς $n \to \infty$, δηλαδή προσπαθούμε να προσεγγίσουμε την συνάρτηση $f$ με το πολυώνυμο $g(x)$ (βαθμού $m$) σε όλο και περισσότερα σημεία, τότε η συνάρτηση $Α(a_0,a_1,\dots,a_m)$ παίρνει την μορφή ενός αθροίσματος Riemann. Δηλαδή το όριο της συνάρτησης $A$, καθώς $n \to \infty$ *προσεγγίζει* ένα ορισμένο ολοκλήρωμα:
#
# $$
# \lim_{n \to \infty} hΑ(a_0,a_1,\dots,a_m) = \lim_{n \to \infty} h \sum_{i=0}^n(g(x_i)-f(x_i))^2 = \int_a^b (g(x)-f(x))^2dx \nonumber
# $$
#
# Άρα σε αυτή την περίπτωση, όπου προσπαθούμε να μειώσουμε το σφάλμα μεταξύ της συνάρτησης και της προσέγγισης, επί άπειρα ομοιόμορφα κατανεμημενα σημεία, προσπαθούμε στην πραγματικότητα να ελαχιστοποιήσουμε ένα ολοκλήρωμα, αντί ένα άθροισμα πλέον. Θα δούμε ότι το πιο πάνω πρόβλημα ανάγεται σε πρόβλημα επίλυσης ενός γραμμικού συστήματος διάστασης, $(m+1) \times (m+1)$ αξιοποιώντας την θεωρία συναρτησιακών χώρων με εσωτερικό γινόμενο.
#
# Είναι γνωστό ότι ο χώρος $\mathcal{C}[a,b]$, όπου είναι το σύνολο όλων των πραγματικών συναρτήσεων επί του $[a,b]$ όπου είναι *συνεχείς*, είναι **γραμμικός (διανυσματικός)**. Αυτό γιατί κάθε γραμμικός συνδυασμός, συνεχών συναρτήσεων επί του $[a,b]$, είναι με την σειρά του συνεχής συνάρτηση επί του $[a,b]$. Ορίζουμε τώρα το **εσωτερικό γινόμενο** 2 **πραγματικών** (για μιγαδικές συναρτήσεις, ορίζουμε το *γενικευμένο εσωτερικό γινόμενο*) συναρτήσεων, $f,g \in \mathcal{C}[a,b]$:
#
# $$
# \langle f,g \rangle = \int_a^b f(x)g(x)dx \tag{6} \label{eq:12}
# $$
#
# Όπως γνωρίζουμε από την Γραμμική Άλγεβρα, ένας γραμμικός (διανυσματικός) χώρος $V$, ονομάζεται *χώρος με εσωτερικό γινόμενο* εάν σε κάθε ζεύγος $(x,y) \in V \times V$ μπορούμε να αντιστοιχίσουμε ακριβώς ένα πραγματικό αριθμό $\langle x,y \rangle$ τέτοιος ώστε, $\forall x,y,z \in V \text{ και } \lambda \in \mathbb{R}$ να ισχύει:
#
# * $\langle \lambda x, y \rangle = \lambda \langle x,y \rangle$
#
# * $\langle x+y,z \rangle = \langle x,z \rangle + \langle y,z \rangle$
#
# * $\langle x,y \rangle = \langle y,x \rangle$
#
# * $\langle x,x \rangle \geq 0 \text{ και } \langle x,x \rangle = 0 \Leftrightarrow x = 0_V$
#
# Άρα θα αποδείξουμε τώρα ότι ικανοιποιούνται οι πιο πάνω 4 ιδιότητες στον γραμμικό χώρο $\mathcal{C}[a,b]$
#
# * $\langle \lambda f,g \rangle \overset{\text{def.}}{=} \int_a^b \lambda f(x)g(x)dx = \lambda \int_a^b f(x)g(x)dx = \lambda \langle f,g \rangle$
#
# * $\langle f+g,h \rangle \overset{\text{def.}}{=} \int_a^b [f(x)+g(x)]h(x)dx = \int_a^b f(x)h(x)dx +0 \int_a^b g(x)h(x)dx \overset{\text{def.}}{=} \langle f,h \rangle + \langle g,h \rangle$
#
# * $\langle f,g \rangle = \langle g,f \rangle \text{, προφανώς}$
#
# * $\langle f,f \rangle = \int_a^b f^2(x)dx \geq 0 \because f^2(x) \geq 0 \text{. Επίσης, } \langle f,f \rangle = \int_a^b f^2(x)dx = 0 \Leftrightarrow f(x) \equiv 0 \\ \text{ λόγω του ορισμού ολοκληρώματος (απόδειξη με άνω αθροίσματα Riemann και ορισμό ολοκληρωσιμότητας κατά Riemann)}$
#
# Άρα ο διανυσματικός χώρος $\mathcal{C}[a,b]$ είναι χώρος με εσωτερικό γινόμενο. Επίσης ορίζουμε την $L^2$ ή Ευκλείδια νόρμα και στο $\mathcal{C}[a,b]$ ως το μέγεθος:
#
# $$
# ||f||_{L^2} := \langle f,f \rangle^{1/2} = \begin{pmatrix} \int_a^b f^2(x)dx \end{pmatrix}^{1/2} \tag{7} \label{eq:13}
# $$
# ## Eλαχιστοποίηση Ελάχιστων Τετραγώνων με χρήση Απειροστικού Λογισμού
#
# Έστω μια συνάρτηση, $f \in \mathcal{C}[a,b]$, η βασική $L^2$ προσέγγιση της, βασίζεται στην εύρεση ενός πολυωνύμου $g(x) \in \mathbb{R}_m[x]$ όπου ελαχιστοποιεί το σφάλμα $f-g$ στην Ευκλείδια νόρμα επί του $\mathcal{C}[a,b]$. Δηλαδή θέλουμε να βρούμε του συντελεστές του πολυωνύμου $g(x)$ όπου έχουμε:
#
# $$
# \min_{g(x) \in \mathbb{R}_m[x]} ||f-g||_{L^2} \equiv \min_{a_0,a_1,\dots,a_m \in \mathbb{R}} ||f-(a_mx^m+\cdots+a_1x+a_0)||_{L^2} \nonumber
# $$
#
# Έστω το πολυώνυμο που επιτυγχάνει αυτό το ελάχιστο; $\gamma(x)$. Θα δούμε πρώτα την περίπτωση $m=1$ και έπειτα την γενική περίπτωση. Καθώς η συνάρτηση $y=x^2$ είναι **αύξουσα** στο $\mathbb{R}^+$, τότε ισοδύναμα θα ελαχιστοποιήσουμε την συνάρτηση (2 μεταβλητών; $a_0,a_1$), $||f-g||_{L^2}^2$ καθώς:
#
# $$
# \underset{g(x) \in \mathbb{R}_m[x]}{arg \ min} \{||f-g||_{L^2}\} = \underset{g(x) \in \mathbb{R}_m[x]}{arg \ min} \{||f-g||_{L^2}^2\} \nonumber
# $$
#
# Άρα, $\forall g(x) \in \mathbb{R}_m[x]$, η συνάρτηση σφάλματος ορίζεται ώς:
#
# \begin{align}
# A(a_0,a_1):=||f(x)-a_0-a_1x||_{L^2}^2 &= \int_a^b (f(x)-a_0-a_1x)^2dx && \because \eqref{eq:13} \nonumber \\
# &= \int_a^b (f^2(x)-2f(x)(a_0+a_1x)+(a_0^2+2a_0a_1x+a_1^2x^2))dx \nonumber \\
# &= \int_a^b f^2(x)dx-2a_0\int_a^b f(x)dx-2a_1 \int_a^b xf(x)dx+a_0^2(b-a)+a_0a_1(b^2-a^2)+\frac{1}{3}a_1^2(b^3-a^3) \nonumber
# \end{align}
#
# Για να βρούμε το πολυώνυμο $\gamma (x)$, βρίσκουμε όπως και στην διακριτή Μέθοδο Ελαχίστων Τετραγώνων τις τιμές των $a_0,a_1$ όπου μηδενίζονται οι μερικές παραγώγοι $\frac{\partial A}{\partial a_0} \text{ και } \frac{\partial A}{\partial a_1}$. Πρώτα υπολογίζουμε αυτές τις παραγώγους:
#
# $$
# \frac{\partial A}{\partial a_0} = -2\int_a^b f(x)dx+2a_0(b-a)+a_1(b^2-a^2) \\
# \frac{\partial A}{\partial a_1} = -2\int_a^b xf(x)dx+a_0(b^2-a^2)+\frac{2}{3} a_1(b^3-a^3) \nonumber
# $$
#
# Θέτοντας τις πιο πάνω ίσες με το 0, προκύπτει το ακόλουθο ισοδύναμο $2 \times 2$ γραμμικό σύστημα:
#
# $$
# \mathbf{M}\vec{\mathcal{a}}=\vec{d} \Leftrightarrow \tag{8} \label{eq:14}
# \begin{pmatrix}
# 2(b-a) & b^2-a^2 \\
# b^2-a^2 & \frac{2}{3}(b^3-a^3)
# \end{pmatrix}
# \begin{pmatrix}
# a_0 \\
# a_1
# \end{pmatrix}
# =
# \begin{pmatrix}
# 2\int_a^b f(x)dx \\
# 2\int_a^b xf(x)dx
# \end{pmatrix}
# $$
#
# Προφανώς το πιο πάνω σύστημα έχει μοναδική λύση **αν.ν** $b \neq a$. Τότε εάν το πιο πάνω σύστημα έχει την λύση, $(\alpha_0,\alpha_1)$ τότε το ζητούμενο πολυώνυμο είναι το $\gamma (x)=\alpha_1 x+\alpha_0$, εκφραζόμενο ώς προς την κανονική βάση του πολυωνυμικού χώρου, $\mathbb{R}_m[x]$, όπου αποτελεί την **Συνεχής Γραμμική Προσέγγιση Ελαχίστων Τετραγώνων** της $f$ επί του $[a,b]$.
#
# ## Παράδειγμα εφαρμογής: Εφαρμόζουμε την μεθοδολογία μας στην $f(x)=\sin{x}$ επί του διαστήματος $[0,\pi/2]$
#
# Έχουμε ότι $\int_0^{\pi/2} \sin{x}dx = [-\cos{x}]_{x=0}^{x=\pi/2} = 1$ και
# $\int_0^{\pi/2} x\sin{x}dx = [-x\cos{x}]_{x=0}^{x=\pi/2} + \int_0^{\pi/2} \cos{x}dx = 0 + [\sin{x}]_{x=0}^{x=\pi/2} = 1$. Άρα το σύστημα \eqref{eq:14} σε αυτή την περίπτωση είναι το:
# $$
# \begin{pmatrix}
# \pi & \frac{\pi^2}{4} \\
# \frac{\pi^2}{4} & \frac{\pi^3}{12}
# \end{pmatrix}
# \begin{pmatrix}
# a_0 \\
# a_1
# \end{pmatrix}
# =
# \begin{pmatrix}
# 2 \\
# 2
# \end{pmatrix} \nonumber
# $$
#
# Το οποίο επιλύωντας το στην $MATLAB$ με την εντολή **linsolve()**, προκύπτουν οι συντελεστές του προσεγγιστικού πολυωνύμου όπως φαίνεται και πιο κάτω:
#
#
# 
# 
# Από το γράφημα βλέπουμε ότι η προσέγγιση μας είναι ικανοποιητική, όμως το σφάλμα δεν είναι και τόσο μικρο! Για την ακρίβεια, με ένα γρήγορο υπολογισμό απο την $MATLAB$ προκύπτει ότι: $||f-\gamma (x)||_{L^2}= 0.3544$
#
# Στην $MATLAB$:
#
# fun = @(x) (sin(x)-a(2)*x-a(1)).^2;
#
# sqrt(integral(fun,0,pi/2))
#
# ans =
#
# 0.3544
#
# Όπως είδαμε και στην Διακριτή εκδοχή της Μεθόδου ελαχίστων τετραγώνων, αυξάνοντας τον βαθμό του προσεγγιστικού πολυωνύμου; $m$, μειώνεται το σφάλμα, $||f-\gamma (x)||_{L^2}$. Γενικά για να βρούμε το $L^2$-optimal βαθμού $m$ πολυωνύμο της συνεχής εκδοχής της Μεθόδου ελαχίστων τετραγώνων, θα πρέπει να λύσουμε ένα $(m+1) \times (m+1)$ γραμμικό σύστημα.
# ## Γενίκευση της Συνεχής Μεθόδου Ελάχιστων Τετραγώνων για $m \in \mathbb{N}$
#
# Στην γενίκευση της Συνεχόυς μεθόδου Ελ.Τ., θα επιχειρήσουμε κάτι διαφορετικό. Στο προηγούμενο παράδειγμα όπου $m=1$ εκφράσαμε το προσεγγίστικό πολυώνυμο που ελαχιστοποιεί την ποσότητα $||f-g(x)||_{L^2}$, $\gamma (x)$ ως προς την **κανονική βάση** του $\mathbb{R}_1[x]$, $\{1,x\}$. Τώρα θα κατασκευάσουμε μια μέθοδο εύρεσης του προσεγγίστικου πολυωνύμου $g(x) \in \mathbb{R}_m[x]$, εκφράζοντας το ως προς μια αυθαίρετη βάση του πολυωνυμικού χώρου, $\mathbb{R}_m[x]$, καθώς έτσι θα προκύψουν κάποιες σημαντικές αριθμητικές ιδιότητες προς όφελος μας, σε αυτό τον διαφορετικό προσεγγιστικό αλγόριθμο.
#
# Έστω το σύνολο $\{q_0(x),q_1(x),\dots,q_m(x)\}$ αποτελεί μια βάση του $\mathbb{R}_m[x]$. Tότε, εξ ορισμού, προκύπτει ότι, $\forall g(x) \in \mathbb{R}_m[x]$:
#
# $$
# g(x)=\sum_{k=0}^m c_kq_k(x) \ , \ \text{ για κάποια } c_k \in \mathbb{R} \nonumber
# $$
#
# H συνάρτηση σφάλματος παίρνει την μορφή:
#
# \begin{align}
# A(c_0,\dots,c_m):=||f(x)-g(x)||^2_{L^2} &= \int_a^b (f(x) -\sum_{k=0}^m c_kq_k(x))^2dx \nonumber \\
# &= \langle f(x),f(x) \rangle -2\sum_{k=0}^m c_k\langle f(x),q_k(x) \rangle+\sum_{k=0}^m\sum_{l=0}^m c_kc_l\langle q_k(x),q_l(x) \rangle && \because \eqref{eq:12} \text{ and } \eqref{eq:13} \nonumber
# \end{align}
#
# Όπως και πριν υπολογίζουμε και θέτουμε τις μερικές παραγώγους, $\frac{\partial A}{\partial c_0},\dots,\frac{\partial A}{\partial c_m}$ ίσες με 0:
#
# $$
# \frac{\partial A}{\partial c_k}=-2\langle f(x),q_k(x) \rangle+\sum_{l=0}^m 2c_k\langle q_l(x),q_k(x) \rangle, \ \forall k=0,\dots,m \\ \xrightarrow[\text{gives us the m+1 equations}]{setting \ \frac{\partial A}{\partial c_k}=0}
# \langle f(x),q_k(x) \rangle = \sum_{l=0}^m c_k\langle q_l(x),q_k(x) \rangle, \ \forall k=0,\dots,m \nonumber
# $$
#
# Aυτό είναι σε συμπαγές μορφή το γραμμικό σύστημα:
#
# $$
# \begin{pmatrix}
# \langle q_0(x),q_0(x) \rangle & \langle q_0(x),q_1(x) \rangle & \cdots & \langle q_0(x),q_m(x) \rangle \\
# \langle q_1(x),q_0(x) \rangle & \langle q_1(x),q_1(x) \rangle & \cdots & \langle q_1(x),q_m(x) \rangle \\
# \vdots & \vdots & \ddots & \vdots \\
# \langle q_m(x),q_0(x) \rangle & \langle q_m(x),q_1(x) \rangle & \cdots & \langle q_m(x),q_m(x) \rangle
# \end{pmatrix}
# \begin{pmatrix}
# c_0 \\
# c_1 \\
# \vdots \\
# c_m
# \end{pmatrix}
# =
# \begin{pmatrix}
# \langle f(x),q_0(x) \rangle \\
# \langle f(x),q_1(x) \rangle \\
# \vdots \\
# \langle f(x),q_m(x) \rangle
# \end{pmatrix} \nonumber
# $$
#
# Το πιο πάνω συνήθως το συμβολίζουμε για οικονομία, $\mathbf{H} \vec{c} = \vec{b}$ του οποίου η επίλυση του επιφέρει του συντελεστές του πολυωνύμου, $\gamma (x)=c_mq_m(x)+\cdots+c_1q_1(x)+a_0q_0(x)$ **ΕΚΦΡΑΖΌΜΕΝΟ ΣΤΗΝ ΑΥΘΑΊΡΕΤΗ ΒΆΣΗ ΤΟΥ $\mathbb{R}_m[x]$, $\{q_0(x),q_1(x),\dots,q_m(x)\}$** το οποίο ελαχιστοποιεί την $L^2$-νόρμα, $||f-g(x)||_{L^2}$, δηλαδή επιτυγχάνει το $\min_{g(x) \in \mathbb{R}_m[x]} ||f-g||_{L^2}$
#
# Εύλογο ερώτημα είναι τώρα γιατί χρησιμοποιήσαμε, μια αυθαίρετη βάση του $\mathbb{R}_m[x]$, αντί την κανονική βάση, $\{1,x,x^2,\dotsm,x^m\}$? Aς εφαρμόσουμε την γενικευμένη μας μέθοδο στο διάστημα $[0,1]$ με την κανονική βάση; $q_k(x)=x^k$. Τότε:
#
# $$
# \langle q_l(x),q_k(x) \rangle = \langle x^l,x^k \rangle = \int_0^1 x^{k+l}dx = \frac{1}{k+l+1} \nonumber
# $$
#
# Με άλλα λόγια:
#
# $$
# \mathbf{H}:=(h_{kl})_{k,l=0}^m=\frac{1}{k+l+1} \equiv \text{ Τετραγωνικός Πίνακας Hilbert τάξης (m+1).} \nonumber
# $$
#
# Eίναι γνωστοί οι πίνακες Hilbert για την κακή τους στάθμη κατάστασης, με αποτέλεσμα να είναι ειδικά δύσκολο να βρούμε ακριβές λύσεις στα συστήματα μας όπου έχουν ως πίνακα συντελεστών αυτούς, ειδικά όταν ο αριθμός $m$ είναι μεγάλος (μονο για $m=5$ , $\kappa(\mathbf{H})=1.495 \times 10^7$ !!). **Αυτό επιβεβαιώνει το γεγονός ότι, η επιλογή της κανονικής βάσης του $\mathbb{R}_m[x]$, επί το $[0,1]$ είναι κακή ιδέα..**
# <hr style="border:2px solid gray"> </hr> <center><b>ΤΕΛΟΣ NOTEBOOK</b></center> <hr style="border:2px solid gray"> </hr>
| FINAL VERSION - 31_8_20/.ipynb_checkpoints/Method LSQ FINAL-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://github.com/Harvard-IACS/2018-CS109A/blob/master/content/styles/iacs.png?raw=true"> CS109A Introduction to Data Science
#
# ## Lab 2: Pandas and Web Scraping with Beautiful Soup
#
# **Harvard University**<br>
# **Fall 2019**<br>
# **Instructors:** <NAME>, <NAME>, and <NAME><br>
# **Lab Instructors:** <NAME> and <NAME> <br>
# **Authors:** <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# ---
## RUN THIS CELL TO GET THE RIGHT FORMATTING
from IPython.core.display import HTML
def css_styling():
styles = open("../../../styles/cs109.css", "r").read()
return HTML(styles)
css_styling()
# # Table of Contents
# <ol start="0">
# <li> Learning Goals </li>
# <li> Loading and Cleaning with Pandas</li>
# <li> Parsing and Completing the Dataframe </li>
# <li> Grouping </li>
# </ol>
# ## Learning Goals
#
# About 6,000 odd "best books" were fetched and parsed from [Goodreads](https://www.goodreads.com). The "bestness" of these books came from a proprietary formula used by Goodreads and published as a list on their web site.
#
# We parsed the page for each book and saved data from all these pages in a tabular format as a CSV file. In this lab we'll clean and further parse the data. We'll then do some exploratory data analysis to answer questions about these best books and popular genres.
#
#
# By the end of this lab, you should be able to:
#
# - Load and systematically address missing values, ancoded as `NaN` values in our data set, for example, by removing observations associated with these values.
# - Parse columns in the dataframe to create new dataframe columns.
# - Use groupby to aggregate data on a particular feature column, such as author.
#
# *This lab corresponds to lectures #1, #2, and #3 and maps on to homework #1 and further.*
# ### Basic EDA workflow
#
# (From the lecture, repeated here for convenience).
#
# The basic workflow is as follows:
#
# 1. **Build** a DataFrame from the data (ideally, put all data in this object)
# 2. **Clean** the DataFrame. It should have the following properties:
# - Each row describes a single object
# - Each column describes a property of that object
# - Columns are numeric whenever appropriate
# - Columns contain atomic properties that cannot be further decomposed
# 3. Explore **global properties**. Use histograms, scatter plots, and aggregation functions to summarize the data.
# 4. Explore **group properties**. Use groupby and small multiples to compare subsets of the data.
#
# This process transforms your data into a format which is easier to work with, gives you a basic overview of the data's properties, and likely generates several questions for you to followup in subsequent analysis.
# ## Part 1: Loading and Cleaning with Pandas
# Read in the `goodreads.csv` file, examine the data, and do any necessary data cleaning.
#
# Here is a description of the columns (in order) present in this csv file:
#
# ```
# rating: the average rating on a 1-5 scale achieved by the book
# review_count: the number of Goodreads users who reviewed this book
# isbn: the ISBN code for the book
# booktype: an internal Goodreads identifier for the book
# author_url: the Goodreads (relative) URL for the author of the book
# year: the year the book was published
# genre_urls: a string with '|' separated relative URLS of Goodreads genre pages
# dir: a directory identifier internal to the scraping code
# rating_count: the number of ratings for this book (this is different from the number of reviews)
# name: the name of the book
# ```
#
# Let us see what issues we find with the data and resolve them.
#
#
#
# ----
#
#
#
# After loading appropriate libraries
#
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
# ### Cleaning: Reading in the data
# We read in and clean the data from `goodreads.csv`.
# +
#Read the data into a dataframe
df = pd.read_csv("../data/goodreads.csv", encoding='utf-8')
#Examine the first few rows of the dataframe
df.head(10)
# -
# Oh dear. That does not quite seem to be right. We are missing the column names. We need to add these in! But what are they?
#
# Here is a list of them in order:
#
# `["rating", 'review_count', 'isbn', 'booktype','author_url', 'year', 'genre_urls', 'dir','rating_count', 'name']`
#
# <div class="exercise"><b>Exercise</b></div>
# Use these to load the dataframe properly! And then "head" the dataframe... (you will need to look at the read_csv docs)
#
# +
# your code here
df=pd.read_csv("../data/goodreads.csv", header=None,
names=["rating", 'review_count', 'isbn', 'booktype','author_url', 'year', 'genre_urls', 'dir','rating_count', 'name'],
)
#Examine the first few rows of the dataframe
df.head()
# -
# ### Cleaning: Examing the dataframe - quick checks
#
# We should examine the dataframe to get a overall sense of the content.
#
# <div class="exercise"><b>Exercise</b></div>
# Lets check the types of the columns. What do you find?
# your code here
#######
df.dtypes
#######
# *your answer here*
#
# Notice that `review_count` and `rating_counts` are objects instead of ints, and the `year` is a float!
# There are a couple more quick sanity checks to perform on the dataframe.
print(df.shape)
df.columns
# ### Cleaning: Examining the dataframe - a deeper look
# Beyond performing checking some quick general properties of the data frame and looking at the first $n$ rows, we can dig a bit deeper into the values being stored. If you haven't already, check to see if there are any missing values in the data frame.
#
# Let's see for a column which seemed OK to us.
missing_cols = {col: np.sum(df[col].isna()) for col in df.columns}
missing_cols
#Get a sense of how many missing values there are in the dataframe.
print(np.sum([df.rating.isnull()]))
print(np.sum([df.review_count.isnull()]))
print(np.sum([df.isbn.isnull()]))
print(np.sum([df.booktype.isnull()]))
print(np.sum([df.author_url.isnull()]))
print(np.sum([df.year.isnull()]))
print(np.sum([df.genre_urls.isnull()]))
print(np.sum([df.dir.isnull()]))
print(np.sum([df.rating_count.isnull()]))
print(np.sum([df.name.isnull()]))
#Try to locate where the missing values occur
df[df.rating.isnull()]
# 'None' is treated as a regular string, not as missing value
# replace 'None' with np.nan, so it is treated as a missing value
df_cleaned = df.replace('None', np.nan)
missing_cols_cleaned = {col: np.sum(df_cleaned[col].isna()) for col in df_cleaned.columns}
missing_cols_cleaned
# How does `pandas` or `numpy` handle missing values when we try to compute with data sets that include them?
# We'll now check if any of the other suspicious columns have missing values. Let's look at `year` and `review_count` first.
#
# One thing you can do is to try and convert to the type you expect the column to be. If something goes wrong, it likely means your data are bad.
# Lets test for missing data:
df[df.year.isnull()]
# ### Cleaning: Dealing with Missing Values
# How should we interpret 'missing' or 'invalid' values in the data (hint: look at where these values occur)? One approach is to simply exclude them from the dataframe. Is this appropriate for all 'missing' or 'invalid' values?
# +
#Treat the missing or invalid values in your dataframe
#######
df = df[df.year.notnull()]
# -
# Ok so we have done some cleaning. What do things look like now? Notice the float has not yet changed.
df.dtypes
print(np.sum(df.year.isnull()))
df.shape # We removed seven rows
# <div class="exercise"><b>Exercise</b></div>
#
# Ok so lets fix those types. Convert them to ints. If the type conversion fails, we now know we have further problems.
# your code here
df.rating_count=df.rating_count.astype(int)
df.review_count=df.review_count.astype(int)
df.year=df.year.astype(int)
# Once you do this, we seem to be good on these columns (no errors in conversion). Lets look:
df.dtypes
# Sweet!
# Some of the other colums that should be strings have NaN.
df.loc[df.genre_urls.isnull(), 'genre_urls']=""
df.loc[df.isbn.isnull(), 'isbn']=""
# ## Part 2: Parsing and Completing the Data Frame
#
# We will parse the `author` column from the author_url and `genres` column from the genre_urls. Keep the `genres` column as a string separated by '|'.
#
# We will use panda's `map` to assign new columns to the dataframe.
# Examine an example `author_url` and reason about which sequence of string operations must be performed in order to isolate the author's name.
#Get the first author_url
test_string = df.author_url[0]
test_string
test_string.split('/')[-1].split('.')[-1]
# +
#Test out some string operations to isolate the author name
test_string.split('/')[-1].split('.')[1:][0]
# -
df['author'] = df['author_url'].map(lambda x: x.split('/')[-1].split('.')[1:][0])
# <div class="exercise"><b>Exercise</b></div>
#
# Lets wrap the above code into a function which we will then use
# Write a function that accepts an author url and returns the author's name based on your experimentation above
def get_author(url):
# your code here
name = url.split('/')[-1].split('.')[1:][0]
#######
return name
#Apply the get_author function to the 'author_url' column using '.map'
#and add a new column 'author' to store the names
df['author'] = df.author_url.map(get_author)
df.author[0:5]
# <div class="exercise"><b>Exercise</b></div>
#
# Now parse out the genres from `genre_url`.
#
# This is a little more complicated because there be more than one genre.
#
# +
df.genre_urls.head()
# -
df['genres'] = df['genre_urls'].str.replace('/genres/', '')
# +
# your code here
#Examine some examples of genre_urls
#Test out some string operations to isolate the genre name
test_genre_string=df.genre_urls[0]
genres=test_genre_string.strip().split('|')
for e in genres:
print(e.split('/')[-1])
"|".join(genres)
# -
# <div class="exercise"><b>Exercise</b></div>
#
# Write a function that accepts a genre url and returns the genre name based on your experimentation above
#
#
def split_and_join_genres(url):
# your code here
genres=url.strip().split('|')
genres=[e.split('/')[-1] for e in genres]
return "|".join(genres)
# Test your function
split_and_join_genres("/genres/young-adult|/genres/science-fiction")
split_and_join_genres("")
# <div class="exercise"><b>Exercise</b></div>
#
# Use map again to create a new "genres" column
# +
df['genres']=df.genre_urls.map(split_and_join_genres)
df.head()
# -
# Finally, let's pick an author at random so we can see the results of the transformations. Scroll to see the `author` and `genre` columns that we added to the dataframe.
df[df.author == "Marguerite_Yourcenar"]
# Let us delete the `genre_urls` column.
del df['genre_urls']
# And then save the dataframe out!
df.to_csv("data/cleaned-goodreads.csv", index=False, header=True)
# ---
# drop 'author_url' column
df.drop(columns=['author_url'], inplace=True)
df.head()
# ## Part 3: Grouping
# It appears that some books were written in negative years! Print out the observations that correspond to negative years. What do you notice about these books?
# your code here
df[df.year < 0].name
#These are books written before the Common Era (BCE, equivalent to BC).
# We can determine the "best book" by year! For this we use Panda's `groupby()`. `Groupby()` allows grouping a dataframe by any (usually categorical) variable. Would it make sense to ever groupby integer variables? Floating point variables?
# + jupyter={"outputs_hidden": true}
# number of books by year
dfgb_year = df.groupby('year')['year'].count()
print(dfgb_year[:5])
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.bar(dfgb_year.index, dfgb_year.values)
ax.set_xlabel('year')
ax.set_xlim([1000, 2020])
ax.set_ylabel('number of books')
ax.set_title('# of books by year')
plt.show();
# -
# best book by year
dfgb_year = df.groupby('year')['rating', 'name', 'author'].max()
print(dfgb_year)
dfgb_author = df.groupby('author')
type(dfgb_author), type(dfgb_author['year'])
# Perhaps we want the number of books each author wrote
dfgb_author.count().head()
# Lots of useless info there. One column should suffice
# ### Exercise:
#
# - Group the dataframe by `author`. Include the following columns: `rating`, `name`, `author`. For the aggregation of the `name` column which includes the names of the books create a list with the strings containing the name of each book. Make sure that the way you aggregate the rest of the columns make sense!
#
# - Create a new column with number of books for each author and find the most prolific author!
dfgb_author = df.groupby('author')['rating', 'name'].aggregate({'rating': np.mean, 'name': '|'.join})
authors = dfgb_author.reset_index()
authors['name'] = authors['name'].str.split('|')
authors['num_books'] = authors['name'].map(len)
authors.sort_values(by='num_books', ascending=False, inplace=True)
authors.head()
###### Before we start : what do we do about these titles where 'name' is unreadable? Try different encodings?
auth_name = 'A_id_al_Qarni'
df[df.author == auth_name].head()
df[df.author == auth_name].iat[0,8].encode('UTF-16')
# let's examine the columns we have
df.columns
# Create the GroupBy table
authors = df.copy()
authors = authors[['rating','name','author']].groupby('author').agg({'rating' : np.mean,
'name' : '|'.join})
authors = authors.reset_index()
authors.head()
# split the column string and make a list of string book names
authors['name'] = authors.name.str.split('|')
authors.head()
# count the books - create new column
len(authors.name[0])
authors['num_books'] = authors['name'].str.len()
authors
# sort for more prolific
authors.sort_values(by='num_books', ascending=False).iloc[0]
# #### Winner is <NAME> with 56 books! OMG!!!
# Perhaps you want more detailed info...
df.groupby('author')['rating', 'rating_count', 'review_count', 'year'].describe()
# You can also access a `groupby` dictionary style.
ratingdict = {}
for author, subset in dfgb_author:
ratingdict[author] = (subset['rating'].mean(), subset['rating'].std())
ratingdict
# <div class="exercise"><b>Exercise</b></div>
#
# Lets get the best-rated book(s) for every year in our dataframe.
#Using .groupby, we can divide the dataframe into subsets by the values of 'year'.
#We can then iterate over these subsets
# your code here
for year, subset in df.groupby('year'):
#Find the best book of the year
bestbook = subset[subset.rating == subset.rating.max()]
if bestbook.shape[0] > 1:
print(year, bestbook.name.values, bestbook.rating.values)
else:
print(year, bestbook.name.values[0], bestbook.rating.values[0])
| content/labs/lab02/notebook/cs109a_lab2_more_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Node:
element = None
left_child_address = None
right_child_address = None
parent_address = None
import numpy as np
random_numbers = np.random.randint(low =10, high = 150 ,size =10)
random_numbers
# +
root_node = None
# -
def create_binary_search_tree(node, element):
if (node == None):
node = Node()
node.element = element
return node
if (node.element <= element):
node.left_child_address = create_binary_search_tree(node.left_child_address, element)
else:
node.right_child_address = create_binary_search_tree(node.right_child_address, element)
return node
for single_element in random_numbers:
global root_node
root_node = create_binary_search_tree(root_node, single_element)
root_node
root_node.element
root_node.left_child_address.element
root_node.right_child_address.element
root_node.left_child_address.left_child_address.right_child_address.left_child_address.left_child_address.element
root_node.left_child_address.right_child_address.element
| Binary_Tree_Day_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="8g4rFjz83o6J"
# # Predict Boston Housing Prices
#
# This python program predicts the price of houses in Boston using Linear Regression.
#
# # Linear Regression
# Linear regression is a linear approach to modeling the relationship between a scalar response (or dependent variable) and one or more explanatory variables (or independent variables).
#
# ## Pros:
# 1. Simple to implement.
# 2. Used to predict numeric values.
#
# ## Cons:
# 1. Prone to overfitting.
# 2. Cannot be used when the relation between independent and dependent variable are non linear.
# 3. Not suitable for data with higher Dimension
# + colab={} colab_type="code" id="SpnJzKHkswHf"
#import the libraries
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# + colab={"base_uri": "https://localhost:8080/", "height": 1040} colab_type="code" id="GS7O0-iAu7pO" outputId="ca2223e3-9c64-4dc2-bc46-351e16d45055"
#Load the Boston Housing data
from sklearn.datasets import load_boston
boston = load_boston()
# -
#get sample feature names
boston.feature_names
#get the target variable of first five
boston.target[:5]
#get sample data of first five rows
boston.data[5][:]
# +
# We will use boston.data , boston.feature_names and boston.taregt to create a new df
# + colab={} colab_type="code" id="HkwSY2vHv0E_"
#here from boston df and labels
df = pd.DataFrame(boston.data, columns = boston.feature_names)
y = pd.DataFrame(boston.target)
# + colab={"base_uri": "https://localhost:8080/", "height": 317} colab_type="code" id="55yPVpfeweVT" outputId="a7e1c471-1012-49b0-ddbe-a39849a18739"
#Get some statistics
df.describe().T
# + colab={} colab_type="code" id="HrZKX4J_wtgg"
#init the model
reg = linear_model.LinearRegression()
# + colab={} colab_type="code" id="xWqM85e6w7G_"
#split with 80 to 20
x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.2, random_state=42)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="36zhLfzKxthN" outputId="10bd8f5f-9281-47e0-dec9-d1e2ca2035a3"
#Train our model
reg.fit(x_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="s0vgePv-x5jC" outputId="e5e26f9a-a99e-4e00-dc85-282b5ed35a73"
#Print the coefecients
print(reg.coef_)
# + colab={"base_uri": "https://localhost:8080/", "height": 2856} colab_type="code" id="A8VH_VYsyHF0" outputId="4a0fd61f-3512-48cc-cecd-1fba768881b9"
#predict for xtest
y_pred = reg.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="7DjKafwryYcP" outputId="cd7323b7-0d9b-4439-b654-628c47dbe6c8"
#check the score
from sklearn.metrics import mean_squared_error as mse
print(f"Rmse is {np.sqrt(mse(y_test , y_pred))}")
# + colab={"base_uri": "https://localhost:8080/", "height": 1071} colab_type="code" id="PO_z7lVwydrh" outputId="dad60000-ede2-40ee-949e-ee13d195f4bf"
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="MA-oMX41y7sB" outputId="606dcf97-a454-4dc9-f729-28ccc421a1fb"
# -
| projects/.ipynb_checkpoints/BostonHousing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# * 本例展示如何在alpha-mind中使用机器学习模型
#
# * 请在环境变量中设置`DB_URI`指向数据库
# +
# %matplotlib inline
import os
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.api import *
from PyFin.api import *
# -
# ## 数据配置
# ------------
# +
freq = '20b'
universe = Universe('zz800')
batch = 8
neutralized_risk = industry_styles
risk_model = 'short'
pre_process = [winsorize_normal, standardize]
post_process = [standardize]
warm_start = 0
data_source = os.environ['DB_URI']
horizon = map_freq(freq)
engine = SqlEngine(data_source)
# -
# 我们使用当期的`[ROE, EPS, ETOP]`因子,来尝试预测未来大概一个月以后的收益。
#
# * 训练的股票池为`zz800`;;
# * 因子都经过中性化以及标准化等预处理;
# * 对于线性模型,我们以20个工作日为一个时间间隔,用过去8期的数据作为训练用特征。
# +
fit_intercept = True
features = ['ROE', 'EPS', 'ETOP']
data_meta = DataMeta(freq=freq,
universe=universe,
batch=batch,
neutralized_risk=neutralized_risk,
risk_model=risk_model,
pre_process=pre_process,
post_process=post_process,
warm_start=warm_start,
data_source=data_source)
alpha_model = LinearRegression(features=features, fit_intercept=True)
composer = Composer(alpha_model=alpha_model, data_meta=data_meta)
start_date = '2011-01-01'
end_date = '2019-01-15'
# -
# ## 模型预测
# -----------
ref_date = '2017-01-31'
ref_date = adjustDateByCalendar('china.sse', ref_date).strftime('%Y-%m-%d')
model, x, y = composer.train(ref_date)
print("Testing IC: {0:.4f}".format(composer.ic(ref_date=ref_date)[0]))
# ## 模型对比 (线性回归模型 v.s. Naive - 常数线性模型)
# ------------------
# +
const_model = ConstLinearModel(features=features, weights={f: 1. for f in features})
regression_model = LinearRegression(features=features, fit_intercept=fit_intercept)
const_composer = Composer(alpha_model=const_model, data_meta=data_meta)
regression_composer = Composer(alpha_model=regression_model, data_meta=data_meta)
# -
const_composer.train(ref_date)
regression_composer.train(ref_date)
pass
print("\nConst. Testing IC: {0:.4f}".format(const_composer.ic(ref_date=ref_date)[0]))
print("Regression Testing IC: {0:.4f}".format(regression_composer.ic(ref_date=ref_date)[0]))
# ## 模型时间序列
# -------------------------
# 通过比较在测试集的结果,我们观察如下两个模型的表现:
#
# * Naive Model:简单的使用因子上期值作为当期值的预测;
# * 线性回归模型:利用过去四期的因子值回归后得到模型,然后用这个模型预测当期值;
# +
model_dates = makeSchedule(start_date, end_date, freq, 'china.sse')
model_dates = [d.strftime("%Y-%m-%d") for d in model_dates]
model_df = pd.DataFrame(columns=['naive', 'regress', 'naive ic.', 'regress ic.'])
test_data = {}
for ref_date in model_dates:
regression_composer.train(ref_date)
const_composer.train(ref_date)
model_df.loc[ref_date, 'naive'] = const_composer[ref_date]
model_df.loc[ref_date, 'regress'] = regression_composer[ref_date]
model_df.loc[ref_date, 'naive ic.'], x, y = const_composer.ic(ref_date=ref_date)
model_df.loc[ref_date, 'regress ic.'], _, _ = regression_composer.ic(ref_date=ref_date, x=x, y=y)
test_data[ref_date] = (x, y)
print("\n{1} Const. Testing IC: {0:.4f}".format(model_df.loc[ref_date, 'naive ic.'], ref_date))
print("{1} Regression Testing IC: {0:.4f}".format( model_df.loc[ref_date, 'regress ic.'], ref_date))
# -
model_df[['naive ic.', 'regress ic.']].agg(['mean', 'std'])
# 在这个例子中,线性回归模型的IC值略微高于Naive模型。
# ## 回测( simple long short strategy)
# --------------------------
# +
industry_name = 'sw_adj'
industry_level = 1
industry_names = industry_list(industry_name, industry_level)
industry_total = engine.fetch_industry_matrix_range(universe, dates=model_dates, category=industry_name, level=industry_level)
dx_return_data = engine.fetch_dx_return_range(universe=universe,
dates=model_dates,
horizon=horizon)
# -
dx_return_data.head()
# +
rets1 = []
rets2 = []
for i, ref_date in enumerate(model_dates):
py_ref_date = dt.datetime.strptime(ref_date, '%Y-%m-%d')
industry_matrix = industry_total[industry_total.trade_date == ref_date]
dx_returns = dx_return_data[dx_return_data.trade_date == py_ref_date][['code', 'dx']]
res = pd.merge(dx_returns, industry_matrix, on=['code']).dropna()
codes = res.code.values.tolist()
alpha_logger.info('{0} full re-balance: {1}'.format(ref_date, len(codes)))
x_test, _ = test_data[ref_date]
## naive model
raw_predict1 = const_composer.predict(ref_date, x=x_test)[0].reindex(codes)
er1 = raw_predict1.fillna(raw_predict1.median()).values
target_pos1, _ = er_portfolio_analysis(er1,
res.industry_name.values,
None,
None,
False,
None,
method='ls')
target_pos1['code'] = codes
result1 = pd.merge(target_pos1, dx_returns, on=['code'])
ret1 = result1.weight.values @ (np.exp(result1.dx.values) - 1.)
rets1.append(np.log(1. + ret1))
## regression model
raw_predict2 = regression_composer.predict(ref_date, x=x_test)[0].reindex(codes)
er2 = raw_predict2.fillna(raw_predict2.median()).values
target_pos2, _ = er_portfolio_analysis(er2,
res.industry_name.values,
None,
None,
False,
None,
method='ls')
target_pos2['code'] = codes
result2 = pd.merge(target_pos2, dx_returns, on=['code'])
ret2 = result2.weight.values @ (np.exp(result2.dx.values) - 1.)
rets2.append(np.log(1. + ret2))
alpha_logger.info('{0} is finished'.format(ref_date))
# +
ret_df = pd.DataFrame({'naive': rets1, 'regress': rets2}, index=model_dates)
ret_df.loc[advanceDateByCalendar('china.sse', model_dates[-1], freq).strftime('%Y-%m-%d')] = 0.
ret_df = ret_df.shift(1)
ret_df.iloc[0] = 0.
ret_df[['naive', 'regress']].cumsum().plot(figsize=(12, 6),
title='Fixed freq rebalanced: {0}'.format(freq))
# -
| notebooks/Example 12 - Machine Learning Model Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import gensim
import sys
if "../modules/" not in sys.path: sys.path.append ("../modules/")
from semshift import measures, embeddings, alignment
early_model = embeddings.TrainedModel("/hg191/corpora/legaldata/sc-docs/0.model")
later_model = embeddings.TrainedModel("/hg191/corpora/legaldata/sc-docs/7.model")
aligned = alignment.smart_procrustes_align_gensim(early_model.m, later_model.m)
vocab = set (list (early_model.m.wv.vocab.keys()))
linguistic_drift_scores = {w: measures.HamiltonMeasures.linguistic_drift(early_model.m, later_model.m, w) for w in vocab}
k=25
cultural_shift_scores = {w: measures.HamiltonMeasures.cultural_shift (early_model.m, later_model.m, w, k) for w in vocab}
# +
namefiles = ["/hg191/corpora/legaldata/sc-docs/names.neural", "/hg191/corpora/legaldata/sc-docs/names.tagging"]
names = set ()
for namefile in namefiles:
with open (namefile) as fin:
more_names = {line.strip() for line in fin}
names = names | more_names
# -
assert (len (linguistic_drift_scores) == len (cultural_shift_scores))
print (len (linguistic_drift_scores))
print (len(names & set (list (linguistic_drift_scores.keys()))))
print (len (linguistic_drift_scores) - len(names & set (list (linguistic_drift_scores.keys()))))
with open ("/hg191/corpora/legaldata/sc-docs/vocab.linguistic_drift", "w") as fout:
for word,score in sorted(linguistic_drift_scores.items(), key=lambda x:x[1], reverse=True):
if word not in names:
fout.write ("{0}\t{1}\n".format (word,score))
with open ("/hg191/corpora/legaldata/sc-docs/vocab.cultural_shift", "w") as fout:
for word,score in sorted (cultural_shift_scores.items(), key=lambda x:x[1], reverse=True):
if word not in names:
fout.write ("{0}\t{1}\n".format (word,score))
| notebooks/semantic-changes.ipynb |