code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tutorial
# language: python
# name: tutorial
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# %matplotlib inline
# +
df = pd.read_csv('../data/sample.csv',
sep = '\t')
df.head()
# -
df.shape
df.isnull().values.any()
# +
df['output'].hist(bins = 100)
plt.ylabel('count')
plt.xlabel('output')
# +
df['input0'].hist(bins = 100)
plt.ylabel('count')
plt.xlabel('input0')
# +
df['input1'].hist(bins = 100)
plt.ylabel('count')
plt.xlabel('input1')
# +
df['output'].hist(cumulative = True,
density = 1,
bins = 100)
plt.ylabel('fraction')
plt.xlabel('output')
# +
df['input0'].hist(cumulative = True,
density = 1,
bins = 100)
plt.ylabel('fraction')
plt.xlabel('input0')
# +
df['input1'].hist(cumulative = True,
density = 1,
bins = 100)
plt.ylabel('fraction')
plt.xlabel('input1')
# -
ax1 = df.plot.scatter(x = 'input0',
y = 'output')
ax2 = df.plot.scatter(x = 'input1',
y = 'output')
ax3 = df.plot.scatter(x = 'input0',
y = 'input1')
print(df.corr())
plt.matshow(df.corr())
plt.show()
# +
from mpl_toolkits.mplot3d import Axes3D
threedee = plt.figure().gca(projection='3d')
threedee.scatter(df['input0'], df['input1'], df['output'])
threedee.set_xlabel('input0')
threedee.set_ylabel('input1')
threedee.set_zlabel('output')
plt.show()
# +
import statsmodels.api as sm
X = df['input0']
y = df['output']
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
model.summary()
# +
X = df['input1']
y = df['output']
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
model.summary()
# +
X = df[['input0','input1']]
y = df['output']
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
model.summary()
# +
X = df[['input0', 'input1']]
y = df['output']
X = sm.add_constant(X)
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
model.summary()
# -
| exploring-data/src/data_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Функция правдоподобия
# +
import matplotlib.pyplot as plt
from scipy.stats import gamma, norm, uniform
from functools import reduce
import numpy as np
import matplotlib
matplotlib.rc('font', size=22)
matplotlib.rc('animation', html='html5')
bigfontsize=20
labelfontsize=16
tickfontsize=16
plt.rcParams.update({'font.size': bigfontsize,
'axes.labelsize':labelfontsize,
'xtick.labelsize':tickfontsize,
'ytick.labelsize':tickfontsize,
'legend.fontsize':tickfontsize,
})
# -
# ## Доверительные интервалы
# Допустим что наша выборка состоит всего из одного значения $a$ случайной величины сгенерированого из нормального распределения $\mathcal{N}(3, 1)$.
# Также допустим что мы знаем что эта случайная величина сгенерирована из нормального распределения с шириной 1 и неизвестным средним. Функция правдоподобия для среднего значения будет иметь вид
#
# $$\mathcal{L}(\mu) = \frac{1}{\sqrt{2\pi}}e^{-\frac{(\mu-a)^2}{2}} $$
#
# (мы подставили известное значение выборки $x=a$ и известную ширину $\sigma = 1$)
#
# Соответственно, отрицательный логарфм функции правдоподобия:
#
# $$-\log\mathcal{L}(\mu) = \frac{(\mu-a)^2}{2} + const $$
#
# +
# Samplinng random value
a = norm(3,1).rvs(1)
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a, bins = 10, range = [1, 5])
# Plotting NLL
## Setting grid of x values
x = np.linspace(1, 5, 100)
axs[0].set_ylabel("Counts")
axs[0].set_xlabel("x")
## Calculating negative log likelihood
y = [-np.log(norm(i, 1).pdf(a)) for i in x]
## Subtracting minimum of the NLL
y = y - min(y)
axs[1].plot(x,y)
axs[1].grid(True)
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$-\Delta log \mathcal{L}$")
# -
# Минимум NLL находится в точке $\mu = a$ - как и следовало ожидать.
# Давайте рассмотрим как будут выглядить распределения плотности вероятности для нескольких значений $\mu$ и оценим насколько правдоподобно полученной значение случайной величины.
# +
# Getting grid on x
x_val = np.linspace(1, 5, 100)
pdf_2 = norm(2, 1) # PDF for mu = 2
pdf_3 = norm(3, 1) # PDF for mu = 3
pdf_4 = norm(4, 1) # PDF for mu = 4
pdf_a = norm(a[0], 1) # PDF for mu at MLE
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a, bins = 100, range = [1, 5])
axs[0].plot(x_val, pdf_2.pdf(x_val), label = "$\mu = 2$, L = {:.3f}".format(pdf_2.pdf(a[0])))
axs[0].plot(x_val, pdf_3.pdf(x_val), label = "$\mu = 3$, L = {:.3f}".format(pdf_3.pdf(a[0])))
axs[0].plot(x_val, pdf_4.pdf(x_val), label = "$\mu = 4$, L = {:.3f}".format(pdf_4.pdf(a[0])))
axs[0].plot(x_val, pdf_a.pdf(x_val), label = "MLE $\mu$, L = {:.3f}".format(pdf_a.pdf(a[0])))
axs[0].legend()
# Plotting NLL
## Setting grid of x values
x = np.linspace(1, 5, 100)
axs[0].set_ylabel("Counts")
axs[0].set_xlabel("x")
## Calculating negative log likelihood
y = [-np.log(norm(i, 1).pdf(a)) for i in x]
## Subtracting minimum of the NLL
y = y - min(y)
axs[1].plot(x,y)
axs[1].axvline(2, color = "tab:orange")
axs[1].axvline(3, color = "tab:green")
axs[1].axvline(4, color = "tab:red")
axs[1].axvline(a[0], color = "tab:purple")
axs[1].grid(True)
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$-\Delta log \mathcal{L}$")
# -
# Для наглядности, заменим NLL на непосредственно функцию правдоподобия.
# +
# Getting grid on x
x_val = np.linspace(1, 5, 100)
pdf_2 = norm(2, 1) # PDF for mu = 2
pdf_3 = norm(3, 1) # PDF for mu = 3
pdf_4 = norm(4, 1) # PDF for mu = 4
pdf_a = norm(a[0], 1) # PDF for mu at MLE
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a, bins = 100, range = [1, 5])
axs[0].plot(x_val, pdf_2.pdf(x_val), label = "$\mu = 2$, L = {:.3f}".format(pdf_2.pdf(a[0])))
axs[0].plot(x_val, pdf_3.pdf(x_val), label = "$\mu = 3$, L = {:.3f}".format(pdf_3.pdf(a[0])))
axs[0].plot(x_val, pdf_4.pdf(x_val), label = "$\mu = 4$, L = {:.3f}".format(pdf_4.pdf(a[0])))
axs[0].plot(x_val, pdf_a.pdf(x_val), label = "MLE $\mu$, L = {:.3f}".format(pdf_a.pdf(a[0])))
axs[0].legend()
# Plotting NLL
## Setting grid of x values
x = np.linspace(1, 5, 100)
axs[0].set_ylabel("Counts")
axs[0].set_xlabel("x")
## Calculating negative log likelihood
y = [norm(i, 1).pdf(a) for i in x]
axs[1].plot(x,y)
axs[1].axvline(2, color = "tab:orange")
axs[1].axvline(3, color = "tab:green")
axs[1].axvline(4, color = "tab:red")
axs[1].axvline(a[0], color = "tab:purple")
axs[1].grid(True)
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$\mathcal{L}$")
# -
# Пусть теперь наша выборка состоит более чем из одного значения. Давайте посмотрим на функцию правдоподобия в этом случае.
# +
# Samplinng random value
a = norm(3,1).rvs(100)
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a, bins = 10, range = [1, 5])
# Plotting NLL
## Setting grid of x values
x = np.linspace(1, 5, 100)
axs[0].set_ylabel("Counts")
axs[0].set_xlabel("x")
## Calculating negative log likelihood
y = [-np.sum(np.log(norm(i, 1).pdf(a))) for i in x]
## Subtracting minimum of the NLL
y = y - min(y)
axs[1].plot(x,y)
axs[1].grid(True)
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$-\Delta log \mathcal{L}$")
# -
# Обратите внимание на то что масштаб оси $y$ на правом графике изменился. Чтобы сделать это более явным, давайте посмотрим на функции правдоподобия полученные на разных размерах выборки
# +
# Samplinng random value
a = norm(3,1).rvs(1000)
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a[:1], bins = 10, range = [1, 5], density = True, label = "1 значение", alpha = 0.2)
axs[0].hist(a[:10], bins = 10, range = [1, 5], density = True, label = "10 значений", alpha = 0.4)
axs[0].hist(a[:100], bins = 10, range = [1, 5], density = True, label = "100 значений", alpha = 0.6)
# Plotting NLL
## Setting grid of x values
x = np.linspace(1, 5, 100)
axs[0].set_ylabel("Counts")
axs[0].set_xlabel("x")
## Calculating negative log likelihood
### for 1 event
y_1 = [-np.sum(np.log(norm(i, 1).pdf(a[:1]))) for i in x]
y_1 = y_1 - min(y_1)
### for 10 events
y_10 = [-np.sum(np.log(norm(i, 1).pdf(a[:10]))) for i in x]
y_10 = y_10 - min(y_10)
### for 100 events
y_100 = [-np.sum(np.log(norm(i, 1).pdf(a[:100]))) for i in x]
y_100 = y_100 - min(y_100)
axs[1].plot(x,y_1, label = "1 значение")
axs[1].plot(x,y_10, label = "10 значений")
axs[1].plot(x,y_100, label = "100 значений")
axs[1].legend()
axs[1].grid(True)
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$-\Delta log \mathcal{L}$")
# -
# С ростом числа событий функция правдоподобия становится все более крутой. __Правдоподобие набора данных убывает при удалении параметра от MLE тем быстрее, чем больше данных в нашей выборке.__
# ## Случай нескольких параметров
# В предыдущем примере мы фиксировали один из параметров модели - $\sigma = 1$. Но в общем случае модель может содержать несколько параметров. Соответственно, функция правдоподобия будет функцией от нескольких переменных.
# Рассмотрим предыдущий пример, но в этот раз мы оставим параметр $\sigma$ нашей модели так же свободным.
# +
# Samplinng random value
a = norm(3,1).rvs(100)
# Function to caltculate NLL
def nll_func(mu, sigma, d):
return -np.sum(np.log(norm(mu, sigma).pdf(d)))
# Setting grid of x values
mus = np.linspace(2, 4, 101)
sigmas = np.linspace(0.7, 1.5, 101)
# Building NLL
nll = np.zeros((101,101))
for i,mu in enumerate(mus):
for j,sigma in enumerate(sigmas):
nll[j,i] = nll_func(mu, sigma, a)
nll = nll - np.min(nll)
# -
pos_max = np.where(nll == np.min(nll))
mle_mu = mus[pos_max[1]]
mle_sigma = sigmas[pos_max[0]]
# +
# Defining plottin axis
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
# Plotting variable
axs[0].hist(a, bins = 10, range = [1, 5], density = True)
x = np.linspace(1, 5, 100)
axs[0].plot(x, norm(3,1).pdf(x),'r',label = 'Best model')
axs[0].plot(x, norm(mle_mu,mle_sigma).pdf(x),'-',label = 'True model')
axs[0].legend()
# Plotting likelihood
lscan = axs[1].contourf(mus, sigmas,nll, levels = 20)
# Plotting MLE estimate
axs[1].plot(mle_mu,
mle_sigma,
'-ro', linewidth=2, markersize=12, label = "Best estimate")
axs[1].plot(3,
1,
'-P', color = "orange", linewidth=5, markersize=12, label = "True value")
fig.colorbar(lscan, ax=axs[1])
axs[1].legend()
axs[1].set_xlabel("$\mu$")
axs[1].set_ylabel("$-\Delta log \mathcal{L}$")
# -
# ## Маргинализация параметров
# В случае если нам не интересны функции правдоподобия всех параметров, мы можем маргинализовать (проинтегрировать) ненужные параметры:
#
# $$ p(\theta_1|\mathcal{D}) = \int p(\theta_1,\theta_2|\mathcal{D}) d \theta_2 $$
# Допустим, что в предыдущем примере нам интересна только функция правдоподобия ширины распределения из предыдущего примера. Маргинализуем параметр $\mu$
nll = nll.T
# +
dsigma = (sigmas.max()-sigmas.min())/len(sigmas)
nll_mu = np.zeros((101))
for i,mu in enumerate(mus):
likelihood = np.sum(np.exp(-nll[i]))*dsigma
nll_mu[i] = -np.log(likelihood)
nll_mu = nll_mu - min(nll_mu )
# -
# Сравним полученную функцию правдоподобия со сканом двумерной функции правдоподобия с параметром $\sigma$ зафиксированным в максимумею
plt.plot(mus, nll_mu, label = "Scan at MLE $\sigma$")
plt.plot(mus, nll.T[pos_max[0]][0], label = "Marginalised $\sigma$")
plt.legend()
# ## Метод моментов
# Помимо минимизации NLL, можно использовать метод моментов: мы находим моменты выборки и приравниваем их к моментам распределения. Из полученных уравнений мы находим параметры распределения.
# Для некоторых распределений (гауссиана) это работает, а для некоторых - нет.
# Рассмотрим метод моментов примененный к равномерному распределению:
# $$ p(y|\theta) = \frac{1}{\theta_2-\theta_1}I(\theta_1\leq y\leq \theta_2) $$
# Первые два момента распределения записыаются как:
# $$\mu_1 = \frac{1}{2}(\theta_1+\theta_2)$$
# $$\mu_1 = \frac{1}{3}(\theta_1^2+\theta_1\theta_2+\theta_2^2)$$
# Отсюда:
# $$\theta_1 = \mu_1-\sqrt{3(\mu_2-\mu_1^2)}$$
# $$\theta_2 = \mu_1+\sqrt{3(\mu_2-\mu_1^2)}$$
# Давайте определим функции для определения этих параметров из мометов:
#
# +
def theta_1(mu_1, mu_2):
return mu_1 - np.sqrt(3*(mu_2-mu_1**2))
def theta_2(mu_1, mu_2):
return mu_1 + np.sqrt(3*(mu_2-mu_1**2))
# -
# Рассмотрим случайную выборку из равномерного распределения и оценим моменты.
vals[vals<theta_1(mu_1, mu_2)]
# +
# vals = np.array([0, 0, 0, 0, 1])
vals = uniform(0, 1).rvs(5)
mu_1 = np.mean(vals)
mu_2 = np.mean(vals**2)
plt.hist(vals, range = [0, 1], bins = 20)
plt.hist(vals[vals<theta_1(mu_1, mu_2)], range = [0, 1], bins = 20)
plt.hist(vals[vals>theta_2(mu_1, mu_2)], range = [0, 1], bins = 20)
plt.axvline(theta_1(mu_1, mu_2), label = "Low = {:.3f}".format(theta_1(mu_1, mu_2)))
plt.axvline(theta_2(mu_1, mu_2), label = "Hi = {:.3f}".format(theta_2(mu_1, mu_2)))
plt.legend()
# -
# При оценке методом моентов может сложиться ситуация при которой существующие данные не могли бы быть произведены из данного распределения. В то же время мтожно показать, что оценка методом наибольшего правдоподобия дает минимальные и максимальные значения выборки как границы модели.
# ## Регуляризация
# Оценка методом наибольшего правдоподобия позволяет найти модель которая наилучшим образом опиывает данные. Но такая модель не всегда оптимальна.
# Рассмотрим выборку из распределения Бернулли
# Посмотрим как будет меняться оценка вероятности положительного исхода методом максимального прадоподобия.
# +
from scipy.stats import bernoulli
p_true = 0.1
x = bernoulli(p_true).rvs(100)
MLE = [np.mean(x[:i+1]) for i, j in enumerate(x)]
plt.plot(np.arange(0, 100),x,"o")
plt.plot(np.arange(0, 100),MLE,label = "MLE")
plt.axhline(p_true, ls = '--',c = 'r')
plt.xlabel("Номер эксперимента")
plt.legend()
# -
# С течением времени оценка приближается к истинному значению, но сначала она принимет экстремальное значение и не меняется до тех пор пока не появится хотя бы одно положительное значение.
# Интуитивно, мы бы ожидали что оценка будет меняться с каждым новым измерением.
# В данном случае мы можем улучшить поведение модели добавив небольшое число к числителю и знаменателю модели.
MAP = [(x[:i+1].sum()+1)/(i+1+2) for i, j in enumerate(x)]
plt.plot(np.arange(0, 100),x,"o")
plt.plot(np.arange(0, 100),MLE,label = "MLE")
plt.plot(np.arange(0, 100),MAP,label = "MAP")
plt.axhline(p_true, ls = '--',c = 'r')
plt.xlabel("Номер эксперимента")
plt.legend()
# Оценка MPF дает лучший результат в начале и сходится к MLE с набором данных.
# ## Апостериорная вероятность
# Итак, функция правдоподобия описывает вероятность получить наши значения выборки $\mathcal{D}$ при условии того что модель задается какими-то параметрами $\theta$:
#
# $$ \mathcal{L}(\mathcal{D}|\theta) $$
# Используя формулу Байеса, можно преобразовать функцию правдоподобия в функцию плотности вероятности значений модели при условии наблюдения нашего набора данных:
#
# $$p(\theta|\mathcal{D}) = \frac{\mathcal{L}(\mathcal{D}|\theta)p(\theta)}{p(\mathcal{D})}\equiv \frac{\mathcal{L}(\mathcal{D}|\theta)p(\theta)}{\int p(\theta^\prime)\mathcal{L}(\mathcal{D}|\theta^\prime)d\theta^\prime}$$
# $p(\theta)$ называется априроным распределением и отражает наши представления о параметре до начала измерений, а $\int p(\theta^\prime)\mathcal{L}(\mathcal{D}|\theta^\prime)d\theta^\prime$ назыается маргинальной функцие правдоподобия и по сути яваляется нормировкой. Вероятность $p(\theta|\mathcal{D})$ называется апостериорной вероятностью.
# Максимизация апостериорной вероятности дает оценку параметров с учетом априорного знания - в отличие от наибольшего правдоподобия.
# Как и функция правдоподобия, априорная вероятность максимизируется через минимизацию отрицательного логарифма:
#
# $$ - \log p(\theta|\mathcal{D}) = -\log\mathcal{L}(\mathcal{D}|\theta)-\log p(\theta)+\log p(\mathcal{D}) \equiv -\log\mathcal{L}(\mathcal{D}|\theta)-\log p(\theta)+C$$
# Разница с NLL заключается в дополнительном слагаемом $-\log p(\mathcal{D})$ и константе
# Вернемся к распределению Бернулли из предыдущего примера. Можно показать, что выбранная регуляризация превращает оценку максимального правдоподобия в оценку максимумальной апостерироной вероятности с априорным бета распределением параметра $p$: $\mathcal{B}(2,2)$
| Lecture5-Likelihood-and-Posterior.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img style="float: left; margin: 30px 15px 15px 15px;" src="https://pngimage.net/wp-content/uploads/2018/06/logo-iteso-png-5.png" width="300" height="500" />
#
#
# ### <font color='navy'> Simulación de procesos financieros.
#
# **Nombres:** <NAME> y <NAME>.
#
# **Fecha:** 10 de febrero del 2021.
#
# **Expediente** : if726622, .
# **Profesor:** <NAME>.
#
# # Tarea 2: Clase 4
# ### Ejercicio 1:
#
# 1. Imagine una rutina contable utilizada en una librería. Funciona en una lista con sublistas, que se ven así:
# 
# Escriba un programa en Python, que devuelve una lista con tuplas de tamaño 2. Cada tupla consiste en el número de pedido y el costo total del pedido. El producto debe aumentarse en 10€ si el valor de la orden es inferior a 100,00 €.
# Escribe un programa Python usando unicamente las funciones **lambda y map**.
#
# `orders = [ ["34587", "Learning Python, Mark Lutz", 4, 40.95],
# ["98762", "Programming Python, Mark Lutz", 5, 56.80],
# ["77226", "Head First Python, <NAME>", 3,32.95],
# ["88112", "Einführung in Python3, <NAME>", 3, 24.99]]`
#
# 2. La misma librería, pero esta vez trabajamos en una lista diferente. Las sublistas de nuestras listas se ven así:
# [número de orden, (número de artículo, cantidad, precio por unidad), ... (número de artículo, cantidad, precio por unidad)]
#
# `orders = [[1, ("5464", 4, 9.99), ("8274",18,12.99), ("9744", 9, 44.95)],
# [2, ("5464", 9, 9.99), ("9744", 9, 44.95)],
# [3, ("5464", 9, 9.99), ("88112", 11, 24.99)],
# [4, ("8732", 7, 11.99), ("7733",11,18.99), ("88112", 5, 39.95)] ]`
#
# Escriba un programa que devuelva una lista de dos tuplas que tengan la información de (número de pedido, cantidad total de pedido). Utilice la función `Reduce`. Recuerde el precio del pedido del producto debe aumentarse en 10€ si el valor de la orden es inferior a 100,00 €.
#
# 3. Resolver los anteriores ejercicios usando pandas (DataFrame)
# ### Solución Alumno 1
# Código de solución
# Ejercicio 1
# esta es la lista que recibe el programa
orders1 = [ ["34587", "Learning Python, <NAME>", 4, 40.95],
["98762", "Programming Python, <NAME>", 5, 56.80],
["77226", "Head First Python, <NAME>", 3,32.95],
["88112", "Einführung in Python3, <NAME>", 3, 24.99]]
# solución
list(map(lambda o: [o[0],o[-1]*o[-2]] if (o[-1]*o[-2] > 101) else[o[0],(o[-1]*o[-2])+10],orders1))
# ### Respuesta planteada en el ejercicio usando (markdown)
# La respuesta es:
#
# `[['34587', 163.8],
# ['98762', 284.0],
# ['77226', 108.85000000000001],
# ['88112', 84.97]]`
# +
# Ejercicio 2
from functools import reduce
import pandas as pd
import numpy as np
orders2 = [[1, ("5464", 4, 9.99), ("8274",18,12.99), ("9744", 9, 44.95)],
[2, ("5464", 9, 9.99), ("9744", 9, 44.95)],
[3, ("5464", 9, 9.99), ("88112", 11, 24.99)],
[4, ("8732", 7, 11.99), ("7733",11,18.99), ("88112", 5, 39.95)] ]
tot = lambda o: o[-1]*o[-2] if (o[-1]*o[-2] > 101) else (o[-1]*o[-2])+10
prod = lambda a,b: a*b
r = [[tot(orders2[i][j]) for j in range(1,len(orders2[i]))] for i in range(len(orders2))]
result = [[orders2[i][0],reduce(prod,r[i])]for i in range(len(orders2))]
result
# -
# ### Respuesta planteada en el ejercicio usando (markdown)
# La respuesta es:
#
# `[[1, 4725810.37476],
# [2, 40418.5905],
# [3, 27464.259899999997],
# [4, 3919302.280575]]`
#
sol1 = pd.DataFrame(list(map(lambda o: [o[0],o[-1]*o[-2]] if (o[-1]*o[-2] > 101) else[o[0],(o[-1]*o[-2])+10],orders1)),columns=["Order Number","Total"])
sol1
sol2 = pd.DataFrame(result, columns=["Order number","Total"])
sol2.set_index("Order number",inplace=True)
sol2.round(3)
# 4. Use filter to eliminate all words that are shorter than 4 letters from a list of words
# `list='the notion of a lambda function goes all the way back to the origin of computer science'`
lista='the notion of a lambda function goes all the way back to the origin of computer science'
list(filter(lambda x: len(x)<= 4,lista.split()))
# ### Respuesta planteada en el ejercicio usando (markdown)
# La respuesta es:
#
# `['notion',
# 'lambda',
# 'function',
# 'goes',
# 'back',
# 'origin',
# 'computer',
# 'science']`
#
# 5. Use filter to determine the percentage of Fahrenheit temperatures in a list are within the range 32 to 80
import numpy as np
np.random.seed(55555)
temperatures = np.random.uniform(25,110,30)
print(temperatures)
x = list(filter(lambda x: x>32 and x<80,temperatures))
print(x, len(x)*100/len(temperatures),"%")
# ### Respuesta planteada en el ejercicio usando (markdown)
# La respuesta es:
#
# `[35.97387961379455,
# 63.211195349983264,
# 73.90387696517755,
# 36.982512376595906,
# 68.75395211338406,
# 45.86132066984196,
# 36.58307493022292,
# 56.322083650154724,
# 33.9621930206693,
# 45.29071317816652,
# 74.86289938282341,
# 66.51091382521807,
# 46.831316244170544,
# 44.55796498523391]`
#
# 6. Use reduce to find the lower left corner (minimum x and minimum y value) for a list of point locations
# > **Hint**: Explore the command `np.minimum.reduce` [link](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduce.html)
np.random.seed(55555)
point_xy = np.random.randint(0,100,[30,2])
print(point_xy)
x = np.minimum.reduce(point_xy)
x
# ### Respuesta planteada en el ejercicio usando (markdown)
# La respuesta es:
#
# `array([0, 2])`
#
| ProyectoConjunto_DPintor_RMartinez.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import glob
import random
import shutil
from tempfile import gettempdir
from IPython.display import clear_output, Image
from torchvision.datasets.folder import pil_loader
from model import Net
from utils import pil_to_model_tensor_transform
import consts
# UTKFace constants
MALE = 0
FEMALE = 1
WHITE = 0
BLACK = 1
ASIAN = 2
INDIAN = 3
OTHER = 4
# User constants
dset_path = os.path.join('.', 'data', 'UTKFace', 'unlabeled')
tempdir = gettempdir()
# -
consts.NUM_Z_CHANNELS = 100 # we have two trained models, with 50 and 100
net = Net()
load_path = {50: r".\trained_models\2018_09_08\01_44\epoch76", 100: r"C:\Users\Mattan\Downloads\epoch_200_no_tf"}[consts.NUM_Z_CHANNELS]
net.load(load_path, slim=True) # slim tells the net to load only the encoder and generator
# +
# Game 3: Kids
# Set the attributes of the first random person you want to test
age_1 = 30
gender_1 = FEMALE
race_1 = WHITE
image_path_1 = random.choice(glob.glob(os.path.join(dset_path, '{a}_{g}_{r}*'.format(a=age_1, g=gender_1, r=race_1))))
Image(filename=image_path_1) # Will select and show a person with the attributes you selected
# +
# Game 3: Kids
# Set the attributes of the second random person you want to test
age_2 = 35
gender_2 = MALE
race_2 = BLACK
image_path_2 = random.choice(glob.glob(os.path.join(dset_path, '{a}_{g}_{r}*'.format(a=age_2, g=gender_2, r=race_2))))
Image(filename=image_path_2) # Will select and show a person with the attributes you selected
# -
image_tensor_1 = pil_to_model_tensor_transform(pil_loader(image_path_1))
image_tensor_2 = pil_to_model_tensor_transform(pil_loader(image_path_2))
Image(filename=net.kids(image_tensors=(image_tensor_1, image_tensor_2), length=10, target=tempdir))
| .ipynb_checkpoints/kids_game-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="plx25OnQ4vUe"
# ## Importing Required Modules
# + colab={"base_uri": "https://localhost:8080/"} id="KS0WygoF4vUf" outputId="3293de69-6523-46f4-c8b7-58d6d9dcfca8"
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import NearestNeighbors
import tensorflow as tf
print(tf.__version__)
# + [markdown] id="oT5MD5Y54vUi"
# ## Fetching the Dataset as a Pandas DataFrame
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="hHBLXeTH4vUj" outputId="75774b33-c1aa-4685-8f88-54d7c924ec6b"
anime_df = pd.read_csv('anime.csv')
anime_df.head()
# -
anime_df['type'].unique()
anime_df_new = pd.read_csv('anime_2.csv')
anime_df_new.head()
# ### A second dataset to merge with the first one for a wider range of data and a more accurate recommendation system
anime_df_new = anime_df_new[['anime_id' , 'title' , 'type' , 'episodes', 'genre' , 'score' , 'members']]
anime_df_new.rename(columns = {'title' : 'name' , 'score' : 'rating'} , inplace = True)
anime_df_new.head()
anime_df_new = anime_df_new[anime_df_new['type'] != 'Unknown']
anime_df_new['type'].unique()
# + tags=[]
print(len(anime_df_new.index))
print(len(anime_df.index))
for anime_id in anime_df_new['anime_id']:
if len(anime_df[anime_df['anime_id'] == anime_id].index) == 0:
anime_df_temp = anime_df_new[anime_df_new['anime_id'] == anime_id]
anime_df = anime_df.append(anime_df_temp , ignore_index=True , verify_integrity=True)
# -
print(len(anime_df.index))
# + [markdown] id="klLiS8c845wo"
# Analysing all the types in dataset , replacing the nan type is required
# + colab={"base_uri": "https://localhost:8080/"} id="IiM3m1Qu46C2" outputId="adfd47f1-51ee-4776-94c8-bba9d89f6284"
types = anime_df['type'].unique().tolist()
print("Number of Types : {} \nTypes: \n".format(len(types)) , types)
# + [markdown] id="CJ_Z0ut74vUm"
# ## Tuning the Dataset
# First , We will fill all the empty/'Unknown data' data cells with data individually.
# Like for the type movie , the episodes should be 1 so I will fill all the empty episode cells of type movie with 1
# Then , for Hentai also it has mostly 1 ep so I will do the same thing for hentai as well.
#
# Getting the number of None type datacells in each column
# + colab={"base_uri": "https://localhost:8080/"} id="81XdEt2I4vUn" outputId="c00eb677-8e74-48ba-be77-a42e99b6fb3d"
anime_df.isnull().sum()
# + id="0H5N36Su4vUq"
anime_df.loc[(anime_df['genre'] == 'Hentai') & (anime_df['episodes'] == 'Unknown')] = 1
# + id="SBGpGClc7ajS"
anime_df.loc[(anime_df['genre'] == 'Movie') & (anime_df['episodes'] == 'Unknown')] = 1
# + [markdown] id="vkymjqsG85Aj"
# As you can see below songs also have only one episode so we will replace the empty datacells in episodes column of Music type by 1
# + colab={"base_uri": "https://localhost:8080/", "height": 136} id="3yncjtJR8um_" outputId="c2bb7404-4692-4f45-a449-32699423e87a"
anime_df[anime_df['type'] == 'Music'].head(3)
# + id="IkhrYaN68lZC"
anime_df.loc[(anime_df['type'] == 'Music') & (anime_df['episodes'] == 'Unknown')] = 1
# + [markdown] id="6S9z8Ev49o6T"
# #### Replacing the remaining 'Unknown' data cells in episodes by nan type
# + id="TIQtDqBo9I1h"
anime_df['episodes'] = anime_df['episodes'].map(lambda x: np.nan if x == 'Unknown' else x)
# + [markdown] id="ZmG1KhHt-Gpb"
# Filling all the nan types by the episodes column's median
# + id="ah4_TeoI-M-k"
anime_df['episodes'].fillna(anime_df['episodes'].median() , inplace = True)
# + colab={"base_uri": "https://localhost:8080/"} id="_gu0e_OG9V8Z" outputId="46cf31ad-6228-4538-e04c-9ad1d0f5428b"
anime_df['episodes'].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="h6LASxQ8-dm3" outputId="5f3cd9f9-d5c1-4c56-fe87-396d6cc65b1f"
anime_df.info()
# + [markdown] id="tGfGZVMA_5tL"
# Changing data types of columns
# + colab={"base_uri": "https://localhost:8080/"} id="fU9FH3l6Zx_x" outputId="8ba08fd7-f7d4-44a5-f3fb-373d50414707"
anime_df['name'] == anime_df['name'].replace(['Itadaki! Seieki♥'] , 'Itadaki! Seieki')
# + id="-RIoM2pP-9gR"
anime_df['members'] = anime_df['members'].astype(float)
# + id="F9DCBva5OKuS"
anime_df['rating'].fillna(anime_df['rating'].median() , inplace = True)
# + colab={"base_uri": "https://localhost:8080/"} id="uNLH1PJYBVlx" outputId="193e2259-07dc-47e6-cc0c-bd0772406f6d"
anime_df.info()
# + id="DecvE-hfbKCf"
anime_df.replace(to_replace ="Itadaki! Seieki♥",
value ='Itadaki! Seieki' , inplace = True)
# + [markdown] id="Lo5qxqnib5MZ"
# ## Creating another dataset with Relevant Features and one hot encoding using get_dummies on genre and type column
# + id="qeurd7N_BoDW"
anime_data = pd.concat([anime_df['genre'].str.get_dummies(sep = ','),
pd.get_dummies(anime_df['type']),
anime_df['episodes'],
anime_df['rating'],
anime_df['members'],
] , axis = 1)
# + [markdown] id="7ZN41lU5cIkT"
# ## How the Dataset looks after one hot encoding
# + colab={"base_uri": "https://localhost:8080/", "height": 278} id="UkjraYyzEkn4" outputId="f4035af8-467d-4760-e118-a380590ddae9"
anime_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="K4Moh-vUF1u_" outputId="228fb206-40dc-4e9c-94d2-a45abc433e46"
scaler = MinMaxScaler(feature_range=(0,1))
anime_data_scaled = scaler.fit_transform(anime_data)
np.round(anime_data_scaled , decimals = 2)
anime_data_scaled
# + colab={"base_uri": "https://localhost:8080/"} id="4DghhTEsJKhf" outputId="8527f5ed-dadb-4965-c6fd-35a47d7f0718"
anime_data_scaled.shape
# + [markdown] id="UwtqeoPWcRen"
# ## Using Nearest Neighbours unsupervised learning with ball_tree algorithm and 6 nearest neighbors
#
# Here the nearest neighbours will represent the animes that are similar to an anime
# + [markdown] id="jF8bJuBkc6_N"
# ### Fitting Data
# + id="PALou5sQKT0h"
nn_bt = NearestNeighbors(n_neighbors=6 , algorithm='ball_tree').fit(anime_data_scaled)
# + [markdown] id="1g3IUsq6c8f8"
# ### Taking Distances and Indices of the 5 closest Animes and itself (5+1 = 6) from it
# + id="paAkXEacNE4m"
distances , indices = nn_bt.kneighbors(anime_data_scaled)
# + colab={"base_uri": "https://localhost:8080/"} id="JfSpz3d4PqwI" outputId="c19de547-7420-46e6-90d1-8720cfa05188"
print("Distances shape : {} \nIndices Shape: {} \nDistances data overview : {} \nIndices data overview : {}".format(distances.shape , indices.shape , distances[0], indices[0]))
# + [markdown] id="KI2WcGjDdIIz"
# ### Creating a function that would return the index of the anime when provided its name
# + colab={"base_uri": "https://localhost:8080/"} id="1uGwkxF3SLfT" outputId="32c4f52d-098a-4d1c-8b9c-404ee1ea494c"
def get_index(name):
try:
index = anime_df[anime_df['name'] == name].index.tolist()[0]
except:
return "Could not find the Anime"
return index
get_index('Steins;Gate')
# + [markdown] id="7Eqmvw8IdSrJ"
# ### Creating a function that would return the name of the anime when provided its index
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="KKYg0y3VUizJ" outputId="d3f8221c-919a-4b8b-c3e9-528b2154dda5"
def get_name(id):
try:
name = anime_df[anime_df.index == id]['name'].tolist()[0]
except:
return "Could not find the Anime"
return name
get_name(2)
# + [markdown] id="NBTg2Z_hdVIl"
# ## Creating a function that prints all the relevant data about the anime
# + colab={"base_uri": "https://localhost:8080/"} id="U1Fyqj7cVnFT" outputId="23bfdc97-70ce-43d8-9e50-604188ec77e0"
def get_info(id):
print("Name :" , anime_df[anime_df.index == id]['name'].tolist()[0])
print("Rating :" , anime_df[anime_df.index == id]['rating'].tolist()[0])
print("Number of Episodes :" , anime_df[anime_df.index == id]['episodes'].tolist()[0])
print("Genre :" , anime_df[anime_df.index == id]['genre'].tolist()[0])
print("Type :" , anime_df[anime_df.index == id]['type'].tolist()[0])
print("Number of Members : " , anime_df[anime_df.index == id]['members'].tolist()[0])
get_info(3)
# + [markdown] id="Lo93vnoPda6f"
# ## Creating a function that will recommend the user anime based on an anime the user likes
# + colab={"base_uri": "https://localhost:8080/"} id="AMlOX22pSq8Z" outputId="8775db85-a6d1-4892-a4d8-8bd1982277c0"
def recommend_me(name = None , id = None):
if name != None:
id = get_index(name)
print("Here are some of the Animes you would like to watch :")
for index in indices[id][1:]: #the first index in indices will be the anime itself so we have to print [1:] i.e. the other animes
print("------------------------------------------------------------------")
get_info(index)
recommend_me('Shingeki no Kyojin')
# + [markdown] id="l4JU-_dYYz9h"
# ## Run this cell if you want Recommendations
# + colab={"base_uri": "https://localhost:8080/"} id="pP6p_tOiYqfj" outputId="691dbae4-055a-4528-e8a0-88de0e444b46"
anime_name = input("Enter Name of an anime you like : ")
try:
anime_df[anime_df['name'] == anime_name]
except:
print("NO SUCH ANIME FOUND")
recommend_me(anime_name)
# + [markdown] id="betl2Yv7XUEI"
# ## Trying the recommendation system on Dragon Ball Z (shounen , action)
# + colab={"base_uri": "https://localhost:8080/"} id="orcGtArTU_Db" outputId="ee2f1569-65f7-4278-ccdc-561dcb759a8e"
recommend_me('Dragon Ball Z')
# + [markdown] id="HbbinMIUYFRW"
# ## Trying recommendation system on Haikyuu!! (Sports)
# + colab={"base_uri": "https://localhost:8080/"} id="e-ZQpAs6XhQp" outputId="a4c0a5d9-200f-412e-b50f-40f8c07c1cbf"
recommend_me('Haikyuu!!')
# + [markdown] id="FXWVAtkWbowR"
# ## Trying recommendation system on Hentai genre
# + colab={"base_uri": "https://localhost:8080/"} id="b_T4cmFNYR6o" outputId="81d02ef6-b7ea-48f3-ebf1-5358e7dfeff3"
recommend_me('Itadaki! Seieki')
| anime_recommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import pymc3 as pm
import theano.tensor as tt
import theano
from scipy.stats import norm, invgamma
from tqdm.notebook import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import logging
logger = logging.getLogger("pymc3")
logger.setLevel(logging.INFO)
logger = logging.getLogger("theano")
logger.setLevel(logging.ERROR)
np.random.seed(12345)
# -
# ### Generate Ideal Data
n_days = 400
n_teams = 32
gpd = 8
lv_df = pd.read_csv('results/lv_df.csv')
games = pd.read_csv('results/games.csv')
# ### Model 1: Daily Updates, No Deltas
# +
def get_m1_posteriors(trace):
posteriors = {}
h_μ, h_σ = norm.fit(trace['h'])
posteriors['h'] = [h_μ, h_σ]
i_μ, i_σ = norm.fit(trace['i'])
posteriors['i'] = [i_μ, i_σ]
o_μ = []
o_σ = []
d_μ = []
d_σ = []
for i in range(n_teams):
oᵢ_μ, oᵢ_σ = norm.fit(trace['o'][:,i])
o_μ.append(oᵢ_μ)
o_σ.append(oᵢ_σ)
dᵢ_μ, dᵢ_σ = norm.fit(trace['d'][:,i])
d_μ.append(dᵢ_μ)
d_σ.append(dᵢ_σ)
posteriors['o'] = [np.array(o_μ), np.array(o_σ)]
posteriors['d'] = [np.array(d_μ), np.array(d_σ)]
# Unified o and d variances
#o_σ_α, _, o_σ_β = invgamma.fit(trace['o_σ'])
#posteriors['o_σ'] = [o_σ_α, o_σ_β]
#d_σ_α, _, d_σ_β = invgamma.fit(trace['d_σ'])
#posteriors['d_σ'] = [d_σ_α, d_σ_β]
return posteriors
def fatten_priors(prev_posteriors, init_priors, ratio):
priors = prev_posteriors.copy()
priors['h'][1] = np.minimum(priors['h'][1] * ratio, init_priors['h'][1] * ratio)
priors['i'][1] = np.minimum(priors['i'][1] * ratio, init_priors['i'][1] * ratio)
priors['o'][1] = np.minimum(priors['o'][1] * ratio, init_priors['o'][1] * ratio)
priors['d'][1] = np.minimum(priors['d'][1] * ratio, init_priors['d'][1] * ratio)
#priors['o_σ'][1] = priors['o_σ'][1] * ratio
#priors['d_σ'][1] = priors['d_σ'][1] * ratio
return priors
def m1_iteration(obs_data, priors):
idₕ = obs_data['idₕ'].to_numpy()
sₕ_obs = obs_data['sₕ'].to_numpy()
idₐ = obs_data['idₐ'].to_numpy()
sₐ_obs = obs_data['sₐ'].to_numpy()
hw_obs = obs_data['hw'].to_numpy()
with pm.Model() as model:
# Global model parameters
h = pm.Normal('h', mu=priors['h'][0], sigma=priors['h'][1])
i = pm.Normal('i', mu=priors['i'][0], sigma=priors['i'][1])
# Team-specific poisson model parameters
o_star = pm.Normal('o_star', mu=priors['o'][0], sigma=priors['o'][1], shape=n_teams)
d_star = pm.Normal('d_star', mu=priors['d'][0], sigma=priors['d'][1], shape=n_teams)
o = pm.Deterministic('o', o_star - tt.mean(o_star))
d = pm.Deterministic('d', d_star - tt.mean(d_star))
λₕ = tt.exp(i + h + o[idₕ] - d[idₐ])
λₐ = tt.exp(i + o[idₐ] - d[idₕ])
# OT/SO home win bernoulli model parameter
# P(T < Y), where T ~ a, Y ~ b: a/(a + b)
pₕ = λₕ/(λₕ + λₐ)
# Likelihood of observed data
sₕ = pm.Poisson('sₕ', mu=λₕ, observed=sₕ_obs)
sₐ = pm.Poisson('sₐ', mu=λₐ, observed=sₐ_obs)
hw = pm.Bernoulli('hw', p=pₕ, observed=hw_obs)
trace = pm.sample(500, tune=500, cores=2, progressbar=True)
posteriors = get_m1_posteriors(trace)
return posteriors
# -
start_day = 170
starting_priors = pickle.load(open('results/starting_priors.pkl', 'rb'))
# +
window_sizes = [1] #[30, 60, 90]
fattening_factors = [1.5] #, 1.001, 1.01]
for ws in window_sizes:
for f in fattening_factors:
print('ws:{} and f:{}'.format(ws, f))
priors = starting_priors.copy()
iv1_rows = []
for t in tqdm(range(start_day, n_days+1)):
obs_data = games[((games['day'] <= t) & (games['day'] > (t - ws)))]
posteriors = m1_iteration(obs_data, priors);
iv_row = posteriors['h'] + posteriors['i'] + list(posteriors['o'][0]) + list(posteriors['o'][1]) + \
list(posteriors['d'][0]) + list(posteriors['d'][1])
iv1_rows.append(iv_row)
priors = fatten_priors(posteriors, starting_priors, f)
col_names = ['h_μ', 'h_σ', 'i_μ', 'i_σ'] + ['o{}_μ'.format(i) for i in range(n_teams)] + \
['o{}_σ'.format(i) for i in range(n_teams)] + ['d{}_μ'.format(i) for i in range(n_teams)] + \
['d{}_σ'.format(i) for i in range(n_teams)]
iv1_df = pd.DataFrame(iv1_rows, columns=col_names)
iv1_df['day'] = list(range(start_day, start_day+len(iv1_rows)))
iv1_df.to_csv('results/m1_{}d_f{}_iv_df.csv'.format(ws, f))
# -
col_names = ['h_μ', 'h_σ', 'i_μ', 'i_σ'] + ['o{}_μ'.format(i) for i in range(n_teams)] + \
['o{}_σ'.format(i) for i in range(n_teams)] + ['d{}_μ'.format(i) for i in range(n_teams)] + \
['d{}_σ'.format(i) for i in range(n_teams)]
def plot_parameter_estimate(param):
plt.figure(figsize=(10, 6))
plt.title('Estimates for: ' + param)
plt.plot(lv_df['day'], lv_df[param], color='blue')
plt.plot(iv1_df['day'], iv1_df[param+'_μ'], color='red')
upper1sd = iv1_df[param+'_μ'] + iv1_df[param+'_σ']
lower1sd = iv1_df[param+'_μ'] - iv1_df[param+'_σ']
upper2sd = iv1_df[param+'_μ'] + 2 * iv1_df[param+'_σ']
lower2sd = iv1_df[param+'_μ'] - 2 * iv1_df[param+'_σ']
plt.fill_between(iv1_df['day'], upper2sd, lower2sd, color='red', alpha=0.2)
plt.fill_between(iv1_df['day'], upper1sd, lower1sd, color='red', alpha=0.2)
plt.show()
def plot_multi_parameter_estimate(param_list, y_lim=(-0.6, 0.6), grid_lines=0.10):
imgsize = 4
figsize = (15,15)
rows = int(np.ceil(np.sqrt(len(param_list))))
fig, axs = plt.subplots(rows, rows, figsize=figsize)
ax = axs.flatten()
for i in range(len(param_list)):
param = param_list[i]
ax[i].set_title('Estimates for: ' + param)
ax[i].plot(lv_df['day'], lv_df[param], color='blue')
ax[i].plot(iv1_df['day'], iv1_df[param+'_μ'], color='red')
upper1sd = iv1_df[param+'_μ'] + iv1_df[param+'_σ']
lower1sd = iv1_df[param+'_μ'] - iv1_df[param+'_σ']
upper2sd = iv1_df[param+'_μ'] + 2 * iv1_df[param+'_σ']
lower2sd = iv1_df[param+'_μ'] - 2 * iv1_df[param+'_σ']
ax[i].fill_between(iv1_df['day'], upper2sd, lower2sd, color='red', alpha=0.2)
ax[i].fill_between(iv1_df['day'], upper1sd, lower1sd, color='red', alpha=0.2)
for y in np.arange(y_lim[0] + grid_lines, y_lim[1], grid_lines):
ax[i].hlines(y, 1, n_days, colors='k', linestyles='dotted', alpha=0.4)
ax[i].set_ylim(y_lim[0], y_lim[1])
fig.tight_layout()
plt.show()
plot_parameter_estimate('i')
plot_parameter_estimate('h')
plot_multi_parameter_estimate(['o{}'.format(i) for i in range(32)])
plot_multi_parameter_estimate(['d{}'.format(i) for i in range(32)])
plot_parameter_estimate('o4')
plot_parameter_estimate('o19')
plot_parameter_estimate('d10')
plot_parameter_estimate('d2')
| _drafts/modeling-the-nhl-better/Hockey Model Ideal Data RC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["hide-input"]
import numpy as np
# +
#print("\n\033[1m Provided are:\033[0m\n") # \033[1m \033[0m bold font, \n - new line
K_a = 8.5e-05 # m/s, Hydraulic conductivity
Z_bot = 120 # m, aquifer bottom
Z_top = 150 # m, aquifer top
h_a = 139 # m, hydraulic head in aquifer
# interim calculation
A_t = Z_top-Z_bot # m, Aquifer thickness
A_wt = h_a - Z_bot # m, water_table level
S_t = min(A_t, A_wt) # m, saturated thickness
# result
if h_a<Z_top:
print("It is Unconfined Aquifer \n")
else:
print("It is Confined Aquifer \n")
T_a = K_a*S_t # m^2/s, transmissivity
print("The required transmissivity is {0:1.2e}".format(T_a), "m\u00b2/s")
# +
print("\n\033[1m Provided are:\033[0m\n")
Q = 9 # m^3/min, Given discharge
r1 = 8 # m, distance from well to point 1
h1 = 9 # m, head at well 1
R2 = 22 # m, distance from well to point 2
H2 = 10 # m, head at well 2
print(" The given dscharge is: {}".format(Q), "m\u00b3/min \n")
print(" The distance to Well 1 and well 2 are: {}m and {}m \n".format(r1, R2))
print(" The head at Well 1 and well 2 are: {}m and {}m".format(h1, H2))
#interim calculation
Q_min = Q * 1440 # m^3/d
#Calculation
T = Q/(2*np.pi*(H2-h1))*np.log(R2/r1) # m^2/d, Transmissivity - inverting Theim equation
print("\n\033[1m Result:\033[0m\n")
print("The transmissivity in the aquifer is {0:0.2f} m\u00b2/d".format(T))
# +
print("\n\033[1m Provided are:\033[0m\n")
K = 24.50 # m/d, conductivity
r_1 = 0.23 # m, distance from well to point 1
h_1 = 12 # m, head at well 1
R_2 = 275 # m, distance from well to point 2
H_2 = 18 # m, head at well 2
print(" The given conductivity is: {}".format(K), "m/d \n")
print(" The distance to Well 1 and well 2 are: {} m and {} m are \n".format(r_1, R_2))
print(" The head at Well 1 and well 2 are: {} m and {} m".format(h_1, H_2))
#Calculation
Q_1 = (np.pi*K*(H_2**2-h_1**2))/(np.log(R_2/r_1)) # m^2/d, Transmissivity - inverting Theim equation
print("\n\033[1m Result:\033[0m\n")
print("Discharge from the well is {0:0.2f} m\u00b3/d".format(Q_1))
# -
| _build/.jupyter_cache/executed/8d82911682c846405d0238cb675d953f/base.ipynb |
-- -*- coding: utf-8 -*-
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- # Testudinal Haskell Notebook 2
--
-- ## More Turtle Drawing
--
-- This is the second Testudinal Haskell Notebook. It has more turtle drawing examples.
--
-- “Testudinal” means “having to do with turtles.” This Haskell learning
-- notebook shows you how to do computer graphics in a style called
-- “[turtle graphics](https://en.wikipedia.org/wiki/Turtle_graphics).”
--
-- ## Run this code cell first
--
-- You must run next code cell first, before running any other code cells.
-- The next code cell contains some commands to make this notebook work right.
--
-- Now hold down <kbd>🡅 Shift</kbd> and press <kbd>↵ Enter</kbd> two times to run the next code cell.
:extension NoMonomorphismRestriction FlexibleContexts TypeFamilies FlexibleInstances BlockArguments
:load lib/TestudinalPrelude
import TestudinalPrelude
default (Rational, Integer, Double)
putStrLn "Notebook works right!"
-- # Running the Notebook Cells
--
-- Here is a reference of keyboard commands. You'll want to look at this later. For now, just press <kbd>🡅 Shift</kbd><kbd>↵ Enter</kbd> again.
--
-- | Keyboard Command | Action |
-- |-------------:|---------------|
-- |<kbd>🡅 Shift</kbd><kbd>↵ Enter</kbd> | Run the selected Jupyter cell and advance to the next cell |
-- |<kbd>Ctrl</kbd><kbd>↵ Enter</kbd> | Run the selected Jupyter cell and don't advance |
-- |<kbd>Alt</kbd><kbd>↵ Enter</kbd> | Run the selected Jupyter cell and insert a new cell after |
-- |<kbd>↵ Enter</kbd> | Edit the selected Jupyter cell |
-- |<kbd>Ctrl</kbd><kbd>🡅 Shift</kbd><kbd>-_</kbd> | Split the Jupyter cell at the cursor position |
-- The `resetTurtle` function will make the turtle do some action and then return to the same place where it started.
--
-- Here is a function to draw a [fern](http://www.pool.rnd.team/en/Galeria).
--
-- The fern is “recursive.” The word “recursive” means “a function which calls itself.” The `fern1` function calls itself three times, can you see where?
fern1 size sign = do
setPenColor orange
setPenWidth 1
if size < 0.1
then pure ()
else resetTurtle do
forward size
resetTurtle do
right (70 * sign)
fern1 (0.5 * size) (negate sign)
forward size
resetTurtle do
left (70 * sign)
fern1 (0.5 * size) sign
resetTurtle do
right (7 * sign)
fern1 (size - 1) sign
turtle do fern1 21 (-1)
fern2 size = do
setPenColor green
setPenWidth 1
if size < 0.2
then pure ()
else resetTurtle do
forward size
left 80
fern2 (0.3 * size)
right 82
forward size
right 80
fern2 (0.3 * size)
left 78
fern2 (0.9 * size)
turtle do fern2 24
-- +
sliderCat <- mkFloatSlider
setField sliderCat MinFloat (-100.0)
setField sliderCat MaxFloat 100.0
setField sliderCat StepFloat (Just 1.0)
setField sliderCat FloatValue 0.0
outCat <- mkOutput
drawCat = do
val <- getField sliderCat FloatValue
-- replaceOutput outCat $ diagram $ rotate ((-90.0) @@ deg) $ sizeDiagram $ drawTurtle do
replaceOutput outCat $ turtle do
setPenWidth 4
for_ [1.0,2.0..30.0] \i -> do
forward 10.0
left (val * i / 100.0)
-- where
-- sizeDiagram = withEnvelope (fromPoints [mkP2 0.0 (-180.0), mkP2 300.0 180.0])
setField sliderCat ChangeHandler drawCat
sliderCat
outCat
drawCat
| TestudinalHaskell2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Catalog utilities
# The following snippets will acomplish the goals:
# 1. Catalog propagation to a given date.
# 2. Catalog selection of stars with a VMag lesser than a given value.
# 3. Generate a guide star catalog with the angular distance to each other.
# 4. Plot the guide stars
#
# ## Catalog propagation to a given data
# ### Input parameters
# - (-i) *Hipparcos catalog file path*: can be downloaded from https://cdsarc.u-strasbg.fr/viz-bin/Cat
# - (-d) *Date to propagate the catalog to*: e.g. "2020 1 1 12.00"
# - (-o) *File name of the CSV file with propagated catalog*
# +
from typing import NamedTuple
import math
import numpy as np
class CatEntry(NamedTuple):
"""Represents a catalog entry"""
starname: str
catalog: str
starnumber: int
ra: float
dec: float
promora: float
promodec: float
parallax: float
radialvelocity: float
vmag: float
def convertRADEC(ra, dec):
"""Converts the given ra and dec to its cartesian coordinates"""
r = 1
x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(ra))
y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(ra))
z = r * math.cos(np.deg2rad(dec))
return [x, y, z]
# +
def data_complete(star):
"""Checks if the star has the required data"""
try:
float(star[8])
except ValueError:
return False
return True
def deg_to_time(degress):
"""Converts a given degrees into time format"""
decimal_time = deg_to_decimal_time(degress)
hours = int(decimal_time)
minutes = (decimal_time*60) % 60
seconds = (decimal_time*3600) % 60
return f"{hours} {minutes} {seconds:.2f}"
def deg_to_decimal_time(degrees):
"""Converts a given degrees into decimal time float"""
return degrees / 15.0
def decimal_time_to_degrees(decimal_time):
"""Converts a given decimal time into degrees"""
return decimal_time * 15.0
def deg_to_deg_min_sec(degrees):
"""Convert decimal degrees to degress, minutes and seconds"""
d = int(degrees)
minutes = int((degrees - d) * 60)
seconds = (degrees - d - minutes / 60) * 3600
# Append a '+' for positive degrees
if d >= 0:
return f"+{d:02d} {minutes} {seconds:.1f}"
return f"{d:03d} {minutes} {seconds:.1f}"
# -
# ## Propagate the Catalog
# +
import csv
from novas import compat as novas
original_catalog_filepath = "hip_main.dat"
converted_catalog_filepath = "./out/hip_main_converted.csv"
convert_date = "2020 1 1 12.00"
date = convert_date.split(' ')
leap_secs = 37
epoc_hip = 2448349.0625
# Date to convert the catalog to
jd_utc = novas.julian_date(int(date[0]), int(date[1]),
int(date[2]), float(date[3]))
jd_tt = jd_utc + (leap_secs + 32.184) / 86400
# Read the original catalog content
with open(original_catalog_filepath, 'r') as raw:
content = [
[field.strip() for field in line.split('|')]
for line in raw
]
incomplete_stars = []
for line in content:
if not data_complete(line):
incomplete_stars.append(line[1])
continue
ra_hours = deg_to_decimal_time(float(line[8]))
parallax = float(line[11]) if float(line[11]) > 0 else 0.0
vmag = float(line[5])
star = novas.make_cat_entry(line[1], "HIP", int(line[1]),
float(ra_hours), float(line[9]),
float(line[12]), float(line[13]),
parallax, 0.0)
star_con = novas.transform_cat(1, epoc_hip, star, jd_tt, "HP2")
star_entry = CatEntry(star_con.starname, star_con.catalog,
star_con.starnumber, star_con.ra,
star_con.dec, star_con.promora,
star_con.promodec, star_con.parallax,
star_con.radialvelocity, vmag)
# Write new catalog into a file
with open(converted_catalog_filepath, mode='w') as csv_file:
fieldnames = ['HIP_number', 'ra_degrees', 'dec_degrees',
'promora', 'promodec', 'parallax', 'vmag']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for star in converted:
writer.writerow({
'HIP_number': star.starnumber,
'ra_degrees': "{0:.8f}".format(decimal_time_to_degrees(star.ra)),
'dec_degrees': "{0:.8f}".format(star.dec),
'promora': "{0:.8f}".format(star.promora),
'promodec': "{0:.8f}".format(star.promodec),
'parallax': "{0:.8f}".format(star.parallax),
'vmag': "{0:.2f}".format(star.vmag),
})
# -
# ## At this point we have a new CSV file with the stars propagated to the date 01/01/2020 at 12:00.
#
# +
catalog = []
catalog_vmag = []
with open("./out/hip_main_converted.csv", 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count != 0:
star = CatEntry(row['HIP_number'], "HIP",
row['HIP_number'], float(row['ra_degrees']),
float(row['dec_degrees']), row['promora'],
row['promodec'], row['parallax'],
0.0, float(row['vmag']))
catalog.append(star)
if float(row['vmag']) < CVmag:
catalog_vmag.append(star)
line_count += 1
print("Total number of stars: {}".format(len(catalog)))
print("Total number of stars with Vmag < 4.5: {}".format(len(catalog_vmag)))
# -
# ## There are some stars with incomplete data which we are not going to include in our new propagated catalog.
# ### The total number and their identifier in the HIP catalog is shown below.
print("There are {} incomplete stars.".format(len(incomplete_stars)))
print(incomplete_stars)
# ## Guide star Catalog
# ### Having a propagated catalog, we can create a relation between a set of stars (CVmag < 4.5) and write down their angular distance
#
# +
import math
import itertools
guide_catalog = []
CVmag = 4.5
FOV_h = 2 * 14.455
FOV_v = 2 * 10.94
FOV = math.sqrt(FOV_h**2 + FOV_v**2)
for a, b in itertools.combinations(catalog_vmag, 2):
a_car = convertRADEC(a.ra, a.dec + 90)
b_car = convertRADEC(b.ra, b.dec + 90)
dab = math.degrees(math.acos(a_car[0] * b_car[0] +
a_car[1] * b_car[1] +
a_car[2] * b_car[2]))
if dab < FOV:
guide_catalog.append([a.starnumber, b.starnumber, dab])
guide_catalog.sort(key=lambda x: x[2])
# -
# size of the guide star Catalog
print(len(guide_catalog))
# ## Plot the guide stars
# Save the propagated catalog in a dictionary to access stars data quickly
hip_catalog_dic = {}
with open(converted_catalog_filepath, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count != 0:
hip_catalog_dic[row['HIP_number']] = [
row['ra_degrees'],
row['dec_degrees'],
row['promora'],
row['promodec'],
row['parallax'],
row['vmag']
]
line_count += 1
# +
# Get the unique stars from the guide_catalog
guide_stars = [set(x) for x in zip(*guide_catalog)][0]
# Build their cartesian coordinates to plot them
stars_car = []
for star in guide_stars:
star_ra = float(hip_catalog_dic[star][0])
star_dec = float(hip_catalog_dic[star][1])
star_car = convertRADEC(star_ra, star_dec + 90)
stars_car.append(star_car)
# -
# Check the number of guide stars
print(len(guide_stars))
print(len(stars_car))
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
plt.figure()
ax = plt.axes(projection="3d")
list_zip = [x for x in zip(*stars_car)]
ax.scatter(list_zip[0], list_zip[1], list_zip[2], color='g')
ax.set_xlabel('X Axes')
ax.set_ylabel('Y Axes')
ax.set_zlabel('Z Axes')
plt.show()
# -
| startrackerpy/server/startracker/catalogs/catalog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook Intentions
#
# The purpose of the notebook is to start exploring the types of utilization trends we can analyze with the new RespondentHistoryGenerator class. We will start off following utilization rates across years. At the time of generation only 2011-2018 data was accessible.
# +
import os
from os.path import expanduser
import sys
sys.path.append(os.path.join(expanduser("~"), "meps", "meps_dev"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meps_db.settings")
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
import django
django.setup();
import random
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
from datetime import date
from dateutil.relativedelta import *
from scipy.stats import zscore
from statsmodels.stats.weightstats import DescrStatsW
from meps_db.components.reference import DATA_FILES_YEARS
from meps_db.processors import RespondentHistoryGenerator
# -
rhg = RespondentHistoryGenerator(years=DATA_FILES_YEARS[:8])
respondent_history = rhg.run()
# ## Sample Patient Timeline
#
# We want to build a function that will plot all the events in a respondent's year. This will help to illustrate the different types of respondents in the dataset.
#
# The code below is about as good as we can get a dynamic timeline to perform. There are multiple challenges, as most respondent have very sparse timelines, yet we will likely be highlighting outlier timelines in the future. Additionally all event counts are treated equally, which affect the scatterplot point sizes. One month with many office based visits can make the entire timeline seem less impactful.
#
# Note: presciption_medicines and other_medical utilization types are not currently linked to events so there are no dates associated with these events.
# +
HIGH_UTILIZATION_SAMPLE = True
rng_year = random.choice(list(respondent_history.keys()))
if HIGH_UTILIZATION_SAMPLE:
# enforce high utilization sample
high_utils = []
for resp_id, util_dict in respondent_history[rng_year].items():
util_tracker = [
len(util_list) for util_type, util_list in util_dict.items()
if util_type not in {"characteristics", "presciption_medicines", "other_medical"}
]
if all(util_type_count > 0 for util_type_count in util_tracker):
high_utils.append(resp_id)
rng_resp = random.choice(high_utils)
else:
rng_resp = random.choice(list(respondent_history[rng_year].keys()))
# +
# aggregate events by date
date_tracker = {}
empty_util_types = []
for util_type, history in respondent_history[rng_year][rng_resp].items():
if util_type in {"characteristics", "presciption_medicines", "other_medical"}:
continue
if len(history) > 0:
for event in history:
edate = event["date"]
if edate not in date_tracker:
date_tracker[edate] = {}
if util_type not in date_tracker[edate]:
date_tracker[edate][util_type] = 0
date_tracker[edate][util_type] += 1
else:
empty_util_types.append(util_type)
# convert to list of dictionary
util_list = []
for date_obj, util_type_dict in date_tracker.items():
for util_type, count in util_type_dict.items():
util_list.append(
{
"date": date_obj,
"util_type": util_type.replace("_" ," ").title(),
"count": count,
}
)
for util_type in empty_util_types:
util_list.append(
{
"date": date(year=rng_year, month=1, day=1),
"util_type": util_type.replace("_" ," ").title(),
"count": None,
}
)
# Enforce Order of Categories when plotting
util_df = pd.DataFrame(util_list)
util_df["util_type"] = pd.Categorical(
util_df['util_type'],
categories=[
"Office Based",
"Outpatient",
"Emergency Room",
"Hosptial Inpatient",
"Home Health",
"Dental Care",
],
ordered=True
)
util_df.sort_values(["util_type"], inplace=True)
util_df.head(10)
# +
# Initialize
plt.figure(figsize=(10, 6))
ax = sns.scatterplot(
data=util_df,
x="date",
y="util_type",
hue="util_type",
size="count",
alpha=1,
sizes = (20,200)
)
# Clean labels
plt.xticks(rotation=45)
ax.set_xlabel("Year-Month", fontsize=15)
# Enforce full year is visible
tracker = []
start_date = date(year=rng_year, month=1, day=1)
for month in range(12):
tracker.append(str(start_date+relativedelta(months=+month)))
tracker = [date_str[:7] for date_str in tracker]
ax.set(xticks=tracker)
ax.set_xticklabels(labels=tracker, rotation=45, ha='right')
ax.set_ylabel("Utilization Type", fontsize=15)
# split legends
handles, labels = ax.axes.get_legend_handles_labels()
clean_labels = [
'Event',
'Office Based',
'Outpatient',
'Emergency Room',
'Hosptial Inpatient',
'Home Health',
'Dental Care',
]
fixed_labels, fixed_handles = [],[]
for label, handle in zip(clean_labels, handles):
fixed_labels.append(label)
fixed_handles.append(handle)
fixed_size_labels, fixed_size_handles = [],[]
for size, handle in zip(labels[len(clean_labels):], handles[len(clean_labels):]):
fixed_size_labels.append(size)
fixed_size_handles.append(handle)
fixed_size_labels[0] = "Size"
# Put the legends out of the figure
leg1 = plt.legend(fixed_handles, fixed_labels, bbox_to_anchor=(1.01, 1.), loc=2, borderaxespad=0.1)
leg2 = plt.legend(fixed_size_handles, fixed_size_labels, bbox_to_anchor=(1.01, .65), loc=2, borderaxespad=0.1)
ax.add_artist(leg1)
ax.add_artist(leg2)
# Display
plt.tight_layout()
plt.show()
# -
# ## Average Population Utilization Per Year
#
# We want to plot the average persons utilization across years. For this we will incorporate the "weight" variable of each respondent. We will experiment with different types of plots to determine which builds the clearest picture. One thing we want to handle is the distribution of data, we know that event's like ER visits are very rare but significant. The average number of ER visits will likely be close to zero, but we are likely going to be more interested in the number of ER visits for the 75% percentile.
def get_utilization_year(year, resp_id):
""" Takes a year and a respondent id. Return a list of dictionaries. Each dictionary items contains a year,
a utilization type and the total number of events. """
date_tracker = {}
for util_type, history in respondent_history[year][resp_id].items():
if util_type in {"characteristics", "presciption_medicines", "other_medical"}:
continue
if len(history) > 0:
for event in history:
if util_type not in date_tracker:
date_tracker[util_type] = 0
date_tracker[util_type] += 1
else:
date_tracker[util_type] = 0
# convert to list of dictionary
util_list = []
for util_type, count in date_tracker.items():
util_list.append(
{
#"date": date_obj,
"util_type": util_type.replace("_" ," ").title(),
"count": count,
"year": year,
"weight": respondent_history[year][resp_id]["characteristics"]["weight"],
}
)
return util_list
# +
year = 2018
util_list = []
for resp_id in respondent_history[year]:
util_list.extend(get_utilization_year(year=year, resp_id=resp_id))
# Enforce Order of Categories when plotting
util_df = pd.DataFrame(util_list)
util_df["util_type"] = pd.Categorical(
util_df['util_type'],
categories=[
"Office Based",
"Outpatient",
"Emergency Room",
"Hosptial Inpatient",
"Home Health",
"Dental Care",
],
ordered=True
)
util_df.sort_values(["util_type"], inplace=True)
util_df.head(10)
# -
# ### Violin Plots
#
# Violin plots are essentially a mix of KDEs and Box plots
#
# We had hoped these would be useful for displaying transitions in utilization over years, but they are not well suited for this data. In most utilization types the overwhelming majority of respondents did not have an event during the year. This makes it such that the distribution is heavily left skewed. On the other hand, a small minority of respondents are heavy utilizers for an event type. This forces the Y axis to be very large, squishing the distribution down. Trying to compensate for both of these factors results in a conflict between relevance and readablility. We'll need to identify another type of plot. However violin plots may be useful when we start to evaluate spending.
# Basic violinplot for 2018, unreadable due to extreme outliers such as the office based member
plt.figure(figsize=(10, 10))
ax = sns.violinplot(data=util_df, x="util_type", y="count")
# +
# filter out outliers
z_scores = zscore(util_df["count"])
abs_z_scores = np.abs(z_scores)
util_df["abs_z_scores"] = abs_z_scores
# Violin plot with lazy outlier removal, still dominated by extreme members
plt.figure(figsize=(10, 10))
ax = sns.violinplot(data=util_df.loc[(util_df["abs_z_scores"]<3)], x="util_type", y="count")
# +
# filter out outliers
z_scores = zscore(util_df["count"])
abs_z_scores = np.abs(z_scores)
util_df["abs_z_scores"] = abs_z_scores
# Violin plot with lazy outlier removal, and logarithm scaling. Closer to what we want but the log scale make it
# less interpretable. However we can no identify some issues. Essentially most respondents have close to no
# activity during a year except for office based visits.
plt.figure(figsize=(10, 10))
plt.yscale('log')
ax = sns.violinplot(data=util_df.loc[(util_df["abs_z_scores"]<3)], x="util_type", y="count")
# -
util_df
# ## Facet Grids
#
# Facet grids can be useful when stack graphs would create a lot of noise. However now it seems like the big probably is our distributions. The right skew is simple to long to display anything meaningful. A table of descriptive statistics seems like the best option at this point.
g = sns.FacetGrid(util_df, col="util_type", col_wrap=3, height=3)
g.map(sns.kdeplot, "count")
# ## Table
#
# Using statsmodels we can quickly calculate weight statistics. We are able to generate a table that displays all utilization types, across years. We generate the following weighted statistics:
#
# - mean: average annual events
# - std: standard deviation of annual events
# - quantile_25: 25% of the population has x or less annual events
# - quantile_50: 50% of the population has x or less annual events
# - quantile_75: 75% of the population has x or less annual events
# - quantile_95: 95% of the population has x or less annual events
def get_utilization_year(year, resp_id):
""" Takes a year and a respondent id. Return a list of dictionaries. Each dictionary items contains a year,
a utilization type and the total number of events. """
date_tracker = {}
for util_type, history in respondent_history[year][resp_id].items():
if util_type in {"characteristics", "presciption_medicines", "other_medical"}:
continue
if len(history) > 0:
for event in history:
if util_type not in date_tracker:
date_tracker[util_type] = 0
date_tracker[util_type] += 1
else:
date_tracker[util_type] = 0
# convert to list of dictionary
util_list = []
for util_type, count in date_tracker.items():
util_list.append(
{
#"date": date_obj,
"util_type": util_type,
"count": count,
"year": year,
"weight": respondent_history[year][resp_id]["characteristics"]["weight"],
}
)
return util_list
annual_util_type = {}
for year in DATA_FILES_YEARS[:8]:
util_list = []
for resp_id in respondent_history[year]:
util_list.extend(get_utilization_year(year=year, resp_id=resp_id))
annual_util_type[year] = {}
for resp_util in util_list:
if resp_util["util_type"] not in annual_util_type[year]:
annual_util_type[year][resp_util["util_type"]] = {"counts": [], "weights": []}
annual_util_type[year][resp_util["util_type"]]["counts"].append(resp_util["count"])
annual_util_type[year][resp_util["util_type"]]["weights"].append(resp_util["weight"])
annualized_data = []
for year in DATA_FILES_YEARS[:8]:
for util_type in [
"office_based",
"outpatient",
"emergency_room",
"hosptial_inpatient",
"home_health",
"dental_care",
]:
wdf = DescrStatsW(
annual_util_type[year][util_type]["counts"],
weights=annual_util_type[year][util_type]["weights"],
ddof=1
)
annualized_data.append(
{
"year": year,
"util_type": util_type,
"mean": wdf.mean,
"std": wdf.std,
"quantile_25": list(wdf.quantile([0.25]))[0],
"quantile_50": list(wdf.quantile([0.5]))[0],
"quantile_75": list(wdf.quantile([0.75]))[0],
"quantile_95": list(wdf.quantile([0.95]))[0],
}
)
annualized_df = pd.DataFrame(annualized_data)
annualized_df.sort_values(["util_type", "year"], ascending=False, inplace=True)
annualized_df.reset_index(inplace=True, drop=True)
annualized_df
| sandbox/2021_03_test_history_generator/simple_trends.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Run this cell to install the necessary dependencies
import pandas as pd
import numpy as np
from datascience import *
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# -
# # Project 2: Spotify
# ## The Data Science Life Cycle - Table of Contents
#
# <a href='#section 0'>Background Knowledge</a>
#
# <a href='#subsection 1a'>Formulating a question or problem</a>
#
# <a href='#subsection 1b'>Acquiring and preparing data</a>
#
# <a href='#subsection 1c'>Conducting exploratory data analysis</a>
#
# <a href='#subsection 1d'>Using prediction and inference to draw conclusions</a>
# <br><br>
# ### Background Knowledge <a id='section 0'></a>
#
# If you listen to music, chances are you use Spotify, Apple Music, or another similar streaming service. This new era of the music industry curates playlists, recommends new artists, and is based on the number of streams more than the number of albums sold. The way these streaming services do this is (you guessed it) data!
#
# Spotify, like many other companies, hire many full-time data scientists to analyze all the incoming user data and use it to make predictions and recommendations for users. If you're interested, feel free to check out [Spotify's Engineering Page](https://engineering.atspotify.com/) for more information!
# <img src="images/spotify.png" width = 700/>
#
# <center><a href=https://hrblog.spotify.com/2018/02/08/amping-up-diversity-inclusion-at-spotify/>Image Reference</a></center>
# # The Data Science Life Cycle <a id='section 1'></a>
# ## Formulating a Question or Problem <a id='subsection 1a'></a>
# It is important to ask questions that will be informative and can be answered using the data. There are many different questions we could ask about music data. For example, there are many artists who want to find out how to get their music on Spotify's Discover Weekly playlist in order to gain exposure. Similarly, users love to see their *Spotify Wrapped* listening reports at the end of each year.
# <div class="alert alert-warning">
# <b>Question:</b> Recall the questions you developed with your group on Tuesday. Write down that question below, and try to add on to it with the context from the articles from Wednesday. Think about what data you would need to answer your question. You can review the articles on the bCourses page under Module 4.3.
# </div>
# **Original Question(s):** *here*
#
#
# **Updated Question(s):** *here*
#
#
# **Data you would need:** *here*
#
# ## Acquiring and Cleaning Data <a id='subsection 1b'></a>
#
# We'll be looking at song data from Spotify. You can find the raw data [here](https://github.com/rfordatascience/tidytuesday/tree/master/data/2020/2020-01-21). We've cleaned up the datasets a bit, and we will be investigating the popularity and the qualities of songs from this dataset.
#
# The following table, `spotify`, contains a list of tracks identified by their unique song ID along with attributes about that track.
#
# Here are the descriptions of the columns for your reference. (We will not be using all of these fields):
#
# |Variable Name | Description |
# |--------------|------------|
# |`track_id` | Song unique ID |
# |`track_name` | Song Name |
# |`track_artist `| Song Artist |
# |`track_popularity` | Song Popularity (0-100) where higher is better |
# |`track_album_id`| Album unique ID |
# |`track_album_name` | Song album name |
# |`track_album_release_date`| Date when album released |
# |`playlist_name`| Name of playlist |
# |`playlist_id`| Playlist ID |
# |`playlist_genre`| Playlist genre |
# |`playlist_subgenre `| Playlist subgenre |
# |`danceability`| Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable. |
# |`energy`| Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale. Perceptual features contributing to this attribute include dynamic range, perceived loudness, timbre, onset rate, and general entropy. |
# |`key`| The estimated overall key of the track. Integers map to pitches using standard Pitch Class notation . E.g. 0 = C, 1 = C♯/D♭, 2 = D, and so on. If no key was detected, the value is -1. |
# |`loudness`| The overall loudness of a track in decibels (dB). Loudness values are averaged across the entire track and are useful for comparing relative loudness of tracks. Loudness is the quality of a sound that is the primary psychological correlate of physical strength (amplitude). Values typical range between -60 and 0 db. |
# |`mode`| Mode indicates the modality (major or minor) of a track, the type of scale from which its melodic content is derived. Major is represented by 1 and minor is 0. |
# |`speechiness`| Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks. |
# |`acousticness`| A confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic. |
# |`instrumentalness`| Predicts whether a track contains no vocals. “Ooh” and “aah” sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly “vocal”. The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. Values above 0.5 are intended to represent instrumental tracks, but confidence is higher as the value approaches 1.0. |
# |`liveness`| Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live. |
# |`valence`| A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry). |
# |`tempo`| The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration. |
# |`duration_ms`| Duration of song in milliseconds |
# |`creation_year`| Year when album was released |
#
#
#
spotify = Table.read_table('data/spotify.csv')
spotify.show(10)
# <div class="alert alert-info">
# <b>Question:</b> It's important to evalute our data source. What do you know about the source? What motivations do they have for collecting this data? What data is missing?
# </div>
# *Insert answer here*
# <div class="alert alert-info">
# <b>Question:</b> Do you see any missing (nan) values? Why might they be there?
# </div>
# *Insert answer here*
# <div class="alert alert-info">
# <b>Question:</b> We want to learn more about the dataset. First, how many total rows are in this table? What does each row represent?
#
# </div>
total_rows = ...
total_rows
# *Insert answer here*
# ## Conducting Exploratory Data Analysis <a id='subsection 1c'></a>
# Visualizations help us to understand what the dataset is telling us. We will be using bar charts, scatter plots, and line plots to try to answer questions like the following:
# > What audio features make a song popular and which artists have these songs? How have features changed over time?
# ### Part 1: We'll start by looking at the length of songs using the `duration_ms` column.
# Right now, the `duration` array contains the length of each song in milliseconds. However, that's not a common measurement when describing the length of a song - often, we use minutes and seconds. Using array arithmetic, we can find the length of each song in seconds and in minutes. There are 1000 milliseconds in a second, and 60 seconds in a minute. First, we will convert milliseconds to seconds.
#
#Access the duration column as an array.
duration = ...
duration
#Divide the milliseconds by 1000
duration_seconds = ...
duration_seconds
#Now convert duration_seconds to minutes.
duration_minutes = ...
duration_minutes
# <div class="alert alert-info">
# <b>Question:</b> How would we find the average duration (in minutes) of the songs in this dataset?
# </div>
avg_song_length_mins = ...
avg_song_length_mins
# Now, we can add in the duration for each song (in minutes) by adding a column to our `spotify` table called `duration_min`. Run the following cell to do so.
#This cell will add the duration in minutes column we just created to our dataset.
spotify = spotify.with_columns('duration_min', duration_minutes)
spotify
# ### Artist Comparison
# Let's see if we can find any meaningful difference in the average length of song for different artists.
# <div class="alert alert-success">
# <b>Note: </b>Now that we have the average duration for each song, you can compare average song length between two artists. Below is an example!
# </div>
sam_smith = spotify.where("track_artist", are.equal_to("<NAME>"))
sam_smith
sam_smith_mean = ...
sam_smith_mean
#In this cell, choose an artist you want to look at.
artist_name = ...
artist_name
#In this cell, choose another artist you want to compare it to.
artist_name_2 = ...
artist_name_2
# This exercise was just one example of how you can play around with data and answer questions.
# ### Top Genres and Artists
# In this section, we are interested in the categorical information in our dataset, such as the playlist each song comes from or the genre. There are almost 33,000 songs in our dataset, so let's do some investigating. What are the most popular genres? We can figure this out by grouping by the playlist genre.
# <div class="alert alert-info">
# <b>Question:</b> How can we group our data by unique genres?
# </div>
genre_counts = spotify.group('...')
genre_counts
# <div class="alert alert-info">
# <b>Question:</b> In our dataset, it looks like the most popular genre is EDM. Make a barchart below to show how the other genres compare.
# </div>
genre_counts.barh('...', '...')
# Notice that it was difficult to analyze the above bar chart because the data wasn't sorted first. Let's sort our data and make a new bar chart so that it is much easier to make comparisons.
genre_counts_sorted = genre_counts.sort('...', descending = ...)
genre_counts_sorted
genre_counts_sorted.barh('...', '...')
# <div class="alert alert-info">
# <b>Question:</b> Was this what you expected? Which genre did you think would be the most popular?
# </div>
# *Insert answer here.*
# <div class="alert alert-info">
# <b>Question:</b> Let's take a look at all the artists in the dataset. We can take a look at the top 25 artists based on the number of songs they have in our dataset. We'll follow a similar method as we did when grouping by genre above. First, we will group our data by artist and sort by count.
# </div>
# +
#Here, we will group and sort in the same line.
artists_grouped = spotify.group('...').sort('...', ...)
artists_grouped
# -
top_artists = artists_grouped.take(np.arange(0, 25))
top_artists
top_artists.barh('track_artist', '...')
# <div class="alert alert-info">
# <b>Question:</b> What do you notice about the top 25 artists in our dataset?
# </div>
# *insert answer here*
# ### Playlist Popularity
# In our dataset, each song is listed as belonging to a particular playlist, and each song is given a "popularity score", called the `track_popularity`. Using the `track_popularity`, we can calculate an *aggregate popularity* for each playlist, which is just the sum of all the popularity scores for the songs on the playlist.
#
# In order to create this aggregate popularity score, we need to group our data by playlist, and sum all of the popularity scores. First, we will create a subset of our `spotify` table using the `select` method. This lets us create a table with only the relevant columns we want. In this case, we only care about the name of the playlist and the popularity of each track. Keep in mind that each row still represents one track, even though we no longer have the track title in our table.
spotify_subset = spotify.select(['playlist_name', 'track_popularity'])
spotify_subset
# <div class="alert alert-success">
# <b>Note:</b> By grouping, we can get the number of songs from each playlist.
# </div>
playlists = spotify_subset.group('playlist_name')
playlists
# <div class="alert alert-info">
# <b>Question:</b> We can use the <code>group</code> method again, this time passing in a second argument <code>collect</code>, which says that we want to take the sum rather than the count when grouping. This results in a table with the total aggregate popularity of each playlist.
# </div>
#Run this cell.
total_playlist_popularity = spotify_subset.group('playlist_name', collect = sum)
total_playlist_popularity
# Similar to when we found duration in minutes, we can once again use the `column` method to access just the `track_popularity sum` column, and add it to our playlists table using the `with_column` method.
agg_popularity = total_playlist_popularity.column('track_popularity sum')
playlists = playlists.with_column('aggregate_popularity', agg_popularity)
playlists
# <div class="alert alert-info">
# <b>Question:</b> Do you think that the most popular playlist would be the one with the highest aggregate_popularity score, or the one with the highest number of songs? We can sort our playlists table and compare the outputs.
playlists.sort('...', descending=True)
# <div class="alert alert-info">
# <b>Question:</b> Now sort by aggregate popularity.
# </div>
playlists.sort('...', descending=True)
# Comparing these two outputs shows us that the "most popular playlist" depends on how we judge popularity. If we have a playlist that has only a few songs, but each of those songs are really popular, should that playlist be higher on the popularity rankings? By way of calculation, playlists with more songs will have a higher aggregate popularity, since more popularity values are being added together. We want a metric that will let us judge the actual quality and popularity of a playlist, not just how many songs it has.
#
# In order to take into account the number of songs on each playlist, we can calculate the "average popularity" of each song on the playlist, or the proportion of aggregate popularity that each song takes up. We can do this by dividing `aggregate_popularity` by `count`. Remember, since the columns are just arrays, we can use array arithmetic to calculate these values.
#Run this cell to get the average.
avg_popularity = playlists.column('aggregate_popularity') / playlists.column('count')
#Now add it to the playlists table.
playlists = playlists.with_column('average_popularity', avg_popularity)
playlists
# Let's see if our "most popular playlist" changes when we judge popularity by the average popularity of the songs on a playlist.
playlists.sort('average_popularity', descending=True)
# Looking at the table above, we notice that 8/10 of the top 10 most popular playlists by the `average_popularity` metric are playlists with less than 100 songs. Just because a playlist has a lot of songs, or a high aggregate popularity, doesn't mean that the average popularity of a song on that playlist is high. Our new metric of `average_popularity` lets us rank playlists where the size of a playlist has no effect on it's overall score. We can visualize the top 25 playlists by average popularity in a bar chart.
top_25_playlists = playlists.sort('average_popularity', descending=True).take(np.arange(25))
top_25_playlists.barh('...', '...')
# Creating a new metric like `average_popularity` helps us more accurately and fairly measure the popularity of a playlist.
#
# We saw before when looking at the top 25 artists that they were all male. Now looking at the top playlists, we see that the current landscape of popular playlists and music may have an effect on the artists that are popular. For example, the RapCaviar is the second most popular playlist, and generally there tends to be fewer female rap artists than male. This shows that the current landscape of popular music can affect the types of artists topping the charts.
# ## Using prediction and inference to draw conclusions <a id='subsection 1a'></a>
# Now that we have some experience making these visualizations, let's go back to the visualizations others are working on to analyze Spotify data using more complex techniques.
#
# [Streaming Dashboard](https://public.tableau.com/profile/vaibhavi.gaekwad#!/vizhome/Spotify_15858686831320/Dashboard1)
#
# [Audio Analysis Visualizer](https://developer.spotify.com/community/showcase/spotify-audio-analysis/)
# Music and culture are very intertwined so it's interesting to look at when songs are released and what is popular during that time. In this last exercise, you will be looking at the popularity of artists and tracks based on the dates you choose.
#
# Let's look back at the first five rows of our `spotify` table once more.
spotify.show(5)
# <div class="alert alert-info">
# <b>Question:</b> Fill in the following cell the data according to the <code>creation_year</code> you choose.
# </div>
#Fill in the year as an integer.
by_year = spotify.where("creation_year", are.equal_to(...))
by_year
# Based on the dataset you have now, use previous techniques to find the most popular song during that year. First group by what you want to look at, for example, artist/playlist/track.
your_grouped = by_year.group("...")
pop_track = your_grouped.sort("...", descending = True)
pop_track
pop_track.take(np.arange(25)).barh("...", "count")
# <div class="alert alert-info">
# <b>Question:</b> Finally, use this cell if you want to look at the popularity of a track released on a specific date. It's very similar to the process above.
# </div>
by_date = spotify.where("track_album_release_date", are.equal_to("..."))
your_grouped = by_date.group("...")
pop_track = your_grouped.sort("count", descending = True)
pop_track.take(np.arange(10)).barh("track_artist", "count")
# <div class="alert alert-info">
# <b>Question:</b> Tell us something interesting about this data.
# </div>
# *Insert answer here.*
# Notebook Authors: <NAME>, <NAME>
| Project_2/Spotify/Spotify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import pandas as pd
data = pd.read_csv("0_1_classification_data.csv")
data[0:1]
# + pycharm={"name": "#%%\n"}
import numpy as np
eta = 0.2
w2_1 = np.random.normal(size=12) # 1층에서 2층 1번으로의 가중치 12가지
w2_2 = np.random.normal(size=12) # 1층에서 2층 2번으로의 가중치 12가지
w2_3 = np.random.normal(size=12) # 1층에서 2층 3번으로의 가중치 12가지
w3_1 = np.random.normal(size=3) # 2층에서 3층 1번으로의 가중치 3가지
w3_2 = np.random.normal(size=3) # 2층에서 3층 2번으로의 가중치 3가지
b2 = np.random.normal(size=3) # 2층 3개 유닛의 편향 3가지
b3 = np.random.normal(size=2) # 3층 2개 유닛의 편향 2가지
# + pycharm={"name": "#%%\n"}
def a(z):
return 1 / (1 +np.exp(-z))
#시그모이드 함수를 활성화 함수로 사용
def ap(z):
return a(z)*(1-a(z))
#시그모이드 함수의 미분성질을 이용해 a prime 값을 구한다.
# + pycharm={"name": "#%%\n"}
CT_history = [0]*50
for N in range(50): # 경사하강을 50번에 걸쳐 반복한다.
dw2_1 = [0]*12
dw2_2 = [0]*12
dw2_3 = [0]*12
dw3_1 = [0]*3
dw3_2 = [0]*3
db2 = [0]*3
db3 = [0]*3
CT = 0
for idx, items in data.iterrows():
x = items[:12].to_numpy()
t1 = items.t1
t2 = items.t2
# 입력 x와 실젯값 t1, t2
z2_1 = np.dot(w2_1,x)+b2[0]
z2_2 = np.dot(w2_2,x)+b2[1]
z2_3 = np.dot(w2_3,x)+b2[2]
# z 값 ( 2층으로의 입력될 값) 구하기
a2_1 = a(z2_1) # a는 z에 활성화함수.
a2_2 = a(z2_2)
a2_3 = a(z2_3)
a2 = [a2_1,a2_2,a2_3]
# 2층의 a값 구하기
z3_1 = np.dot(w3_1,a2)+b3[0]
z3_2 = np.dot(w3_2,a2)+b3[1]
a3_1 = a(z3_1)
a3_2 = a(z3_2)
a3 = [a3_1,a3_2]
# 3층의 a값 구하기
C = 0.5*( (t1 - a3[0])**2 + (t2 - a3[1])**2 )
CT+= C
# 비용 C 구하기
delta3_1 = (a3[0]-t1) * ap(z3_1) # 시그모이드 함수의 미분 성질을 이용하여 식을 간단히 정리
delta3_2 = (a3[1]-t2) * ap(z3_2)
delta3 = [delta3_1, delta3_2]
#delta 마지막 항 구하기
delta2_1 = np.dot(delta3,[w3_1[0],w3_2[0]] ) * ap(z2_1)
delta2_2 = np.dot(delta3,[w3_1[1],w3_2[1]] ) * ap(z2_2)
delta2_3 = np.dot(delta3,[w3_1[2],w3_2[2]] ) * ap(z2_3)
delta2 = [delta2_1, delta2_2, delta2_3]
#3층의 delta 를 이용해 오차역전파법으로 2층의 delta 구하기
for i in range(12):
dw2_1[i] += delta2_1*x[i]
dw2_2[i] += delta2_2*x[i]
dw2_3[i] += delta2_3*x[i]
#1층에서 2층으로의 36가지 가중치 편미분 값을 더해서 저장해둔다.
for i in range(3):
dw3_1[i] += delta3_1*a2[i]
dw3_2[i] += delta3_2*a2[i]
#3층에서 2층으로의 6가지 가중치 편미분 값을 더해서 저장해둔다.
db2[0] += delta2_1
db2[1] += delta2_2
db2[2] += delta2_3
db3[0] += delta3_1
db3[1] += delta3_2
#2층, 3층 5개 유닛의 편향 편미분 값을 더해서 저장해둔다.
CT_history[N]= CT # 비용의 합을 저장해둔다.
#64개 데이터에 대해 모든 편미분값들의 합을 구한 후, 이에 학습률을 곱하여 가중치와 편향들을 변화시킨다.
for i in range(12):
w2_1[i] += -eta*dw2_1[i]
w2_2[i] += -eta*dw2_2[i]
w2_3[i] += -eta*dw2_3[i]
#1층에서 2층으로의 36가지 가중치를 정답에 가까운 곳으로 변화시킨다.
for i in range(3):
w3_1[i] += -eta*dw3_1[i]
w3_2[i] += -eta*dw3_2[i]
#3층에서 2층으로의 6가지 가중치를 정답에 가까운 곳으로 변화시킨다.
b2[0] += -eta*b2[0]
b2[1] += -eta*b2[1]
b2[2] += -eta*b2[2]
b3[0] += -eta*b3[0]
b3[1] += -eta*b3[1]
#2층, 3층 5개 유닛의 편향을 정답에 가까운 곳으로 변화시킨다.
# + pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
plt.plot(CT_history)
plt.show()
# + pycharm={"name": "#%%\n"}
test = [
0,1,0,
1,0,1,
1,0,1,
1,0,1
]
x = test
# 입력 x와 실젯값 t1, t2
z2_1 = np.dot(w2_1,x)+b2[0]
z2_2 = np.dot(w2_2,x)+b2[1]
z2_3 = np.dot(w2_3,x)+b2[2]
# z 값 ( 2층으로의 입력될 값) 구하기
a2_1 = a(z2_1) # a는 z에 활성화함수.
a2_2 = a(z2_2)
a2_3 = a(z2_3)
a2 = [a2_1,a2_2,a2_3]
# 2층의 a값 구하기
z3_1 = np.dot(w3_1,a2)+b3[0]
z3_2 = np.dot(w3_2,a2)+b3[1]
a3_1 = a(z3_1)
a3_2 = a(z3_2)
print(a3_1)
print(a3_2)
| back_propagation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SentencePiece and BPE
# ## Introduction to Tokenization
# In order to process text in neural network models it is first required to **encode** text as numbers with ids, since the tensor operations act on numbers. Finally, if the output of the network is to be words, it is required to **decode** the predicted tokens ids back to text.
#
# To encode text, the first decision that has to be made is to what level of graularity are we going to consider the text? Because ultimately, from these **tokens**, features are going to be created about them. Many different experiments have been carried out using *words*, *morphological units*, *phonemic units*, *characters*. For example,
#
# - Tokens are tricky. (raw text)
# - Tokens are tricky . ([words](https://arxiv.org/pdf/1301.3781))
# - Token s _ are _ trick _ y . ([morphemes](https://arxiv.org/pdf/1907.02423.pdf))
# - t oʊ k ə n z _ ɑː _ ˈt r ɪ k i. ([phonemes](https://www.aclweb.org/anthology/W18-5812.pdf), for STT)
# - T o k e n s _ a r e _ t r i c k y . ([character](https://www.aclweb.org/anthology/C18-1139/))
# But how to identify these units, such as words, is largely determined by the language they come from. For example, in many European languages a space is used to separate words, while in some Asian languages there are no spaces between words. Compare English and Mandarin.
#
# - Tokens are tricky. (original sentence)
# - 标记很棘手 (Mandarin)
# - Biāojì hěn jíshǒu (pinyin)
# - 标记 很 棘手 (Mandarin with spaces)
#
#
# So, the ability to **tokenize**, i.e. split text into meaningful fundamental units is not always straight-forward.
#
# Also, there are practical issues of how large our *vocabulary* of words, `vocab_size`, should be, considering memory limitations vs. coverage. A compromise may be need to be made between:
# * the finest-grained models employing characters which can be memory intensive and
# * more computationally efficient *subword* units such as [n-grams](https://arxiv.org/pdf/1712.09405) or larger units.
#
# In [SentencePiece](https://www.aclweb.org/anthology/D18-2012.pdf) unicode characters are grouped together using either a [unigram language model](https://www.aclweb.org/anthology/P18-1007.pdf) (used in this week's assignment) or [BPE](https://arxiv.org/pdf/1508.07909.pdf), **byte-pair encoding**. We will discuss BPE, since BERT and many of its variants use a modified version of BPE and its pseudocode is easy to implement and understand... hopefully!
# ## SentencePiece Preprocessing
# ### NFKC Normalization
# Unsurprisingly, even using unicode to initially tokenize text can be ambiguous, e.g.,
eaccent = '\u00E9'
e_accent = '\u0065\u0301'
print(f'{eaccent} = {e_accent} : {eaccent == e_accent}')
# SentencePiece uses the Unicode standard normalization form, [NFKC](https://en.wikipedia.org/wiki/Unicode_equivalence), so this isn't an issue. Looking at our example from above but with normalization:
# +
from unicodedata import normalize
norm_eaccent = normalize('NFKC', '\u00E9')
norm_e_accent = normalize('NFKC', '\u0065\u0301')
print(f'{norm_eaccent} = {norm_e_accent} : {norm_eaccent == norm_e_accent}')
# -
# Normalization has actually changed the unicode code point (unicode unique id) for one of these two characters.
# +
def get_hex_encoding(s):
return ' '.join(hex(ord(c)) for c in s)
def print_string_and_encoding(s):
print(f'{s} : {get_hex_encoding(s)}')
# -
for s in [eaccent, e_accent, norm_eaccent, norm_e_accent]:
print_string_and_encoding(s)
# This normalization has other side effects which may be considered useful such as converting curly quotes “ to " their ASCII equivalent. (<sup>*</sup>Although we *now* lose directionality of the quote...)
# ### Lossless Tokenization<sup>*</sup>
# SentencePiece also ensures that when you tokenize your data and detokenize your data the original position of white space is preserved. <sup>*</sup>However, tabs and newlines are converted to spaces, please try this experiment yourself later below.
# To ensure this **lossless tokenization**, SentencePiece replaces white space with _ (U+2581). So that a simple join of the tokens by replace underscores with spaces can restore the white space, even if there are consecutive symbols. But remember first to normalize and then replace spaces with _ (U+2581). As the following example shows.
s = 'Tokenization is hard.'
s_ = s.replace(' ', '\u2581')
s_n = normalize('NFKC', 'Tokenization is hard.')
print(get_hex_encoding(s))
print(get_hex_encoding(s_))
print(get_hex_encoding(s_n))
# So the special unicode underscore was replaced by the ASCII unicode. Reversing the order of the second and third operations, we that the special unicode underscore was retained.
s = 'Tokenization is hard.'
sn = normalize('NFKC', 'Tokenization is hard.')
sn_ = s.replace(' ', '\u2581')
print(get_hex_encoding(s))
print(get_hex_encoding(sn))
print(get_hex_encoding(sn_))
# ## BPE Algorithm
#
# Now that we have discussed the preprocessing that SentencePiece performs, we will go get our data, preprocess, and apply the BPE algorithm. We will show how this reproduces the tokenization produced by training SentencePiece on our example dataset (from this week's assignment).
#
# ### Preparing our Data
# First, we get our Squad data and process as above.
# +
import ast
def convert_json_examples_to_text(filepath):
example_jsons = list(map(ast.literal_eval, open(filepath))) # Read in the json from the example file
texts = [example_json['text'].decode('utf-8') for example_json in example_jsons] # Decode the byte sequences
text = '\n\n'.join(texts) # Separate different articles by two newlines
text = normalize('NFKC', text) # Normalize the text
with open('example.txt', 'w') as fw:
fw.write(text)
return text
# -
text = convert_json_examples_to_text('./data/data.txt')
print(text[:900])
# In the algorithm the `vocab` variable is actually a frequency dictionary of the words. Further, those words have been prepended with an *underscore* to indicate that they are the beginning of a word. Finally, the characters have been delimited by spaces so that the BPE algorithm can group the most common characters together in the dictionary in a greedy fashion. We will see how that is done shortly.
# +
from collections import Counter
vocab = Counter(['\u2581' + word for word in text.split()])
vocab = {' '.join([l for l in word]): freq for word, freq in vocab.items()}
# -
def show_vocab(vocab, end='\n', limit=20):
"""Show word frequencys in vocab up to the limit number of words"""
shown = 0
for word, freq in vocab.items():
print(f'{word}: {freq}', end=end)
shown +=1
if shown > limit:
break
show_vocab(vocab)
# We check the size of the vocabulary (frequency dictionary) because this is the one hyperparameter that BPE depends on crucially on how far it breaks up a word into SentencePieces. It turns out that for our trained model on our small dataset that 60% of 455 merges of the most frequent characters need to be done to reproduce the upperlimit of a 32K `vocab_size` over the entire corpus of examples.
print(f'Total number of unique words: {len(vocab)}')
print(f'Number of merges required to reproduce SentencePiece training on the whole corpus: {int(0.60*len(vocab))}')
# ### BPE Algorithm
# Directly from the BPE paper we have the following algorithm.
# +
import re, collections
def get_stats(vocab):
pairs = collections.defaultdict(int)
for word, freq in vocab.items():
symbols = word.split()
for i in range(len(symbols) - 1):
pairs[symbols[i], symbols[i+1]] += freq
return pairs
def merge_vocab(pair, v_in):
v_out = {}
bigram = re.escape(' '.join(pair))
p = re.compile(r'(?<!\S)' + bigram + r'(?!\S)')
for word in v_in:
w_out = p.sub(''.join(pair), word)
v_out[w_out] = v_in[word]
return v_out
def get_sentence_piece_vocab(vocab, frac_merges=0.60):
sp_vocab = vocab.copy()
num_merges = int(len(sp_vocab)*frac_merges)
for i in range(num_merges):
pairs = get_stats(sp_vocab)
best = max(pairs, key=pairs.get)
sp_vocab = merge_vocab(best, sp_vocab)
return sp_vocab
# -
# To understand what's going on first take a look at the third function `get_sentence_piece_vocab`. It takes in the current `vocab` word-frequency dictionary and the fraction, `frac_merges`, of the total `vocab_size` to merge characters in the words of the dictionary, `num_merges` times. Then for each *merge* operation it `get_stats` on how many of each pair of character sequences there are. It gets the most frequent *pair* of symbols as the `best` pair. Then it merges that pair of symbols (removes the space between them) in each word in the `vocab` that contains this `best` (= `pair`). Consequently, `merge_vocab` creates a new `vocab`, `v_out`. This process is repeated `num_merges` times and the result is the set of SentencePieces (keys of the final `sp_vocab`).
# ### Additional Discussion of BPE Algorithm
# Please feel free to skip the below if the above description was enough.
#
# In a little more detail then, we can see in `get_stats` we initially create a list of bigram (two character sequence) frequencies from our vocabulary. Later, this may include trigrams, quadgrams, etc. Note that the key of the `pairs` frequency dictionary is actually a 2-tuple, which is just shorthand notation for a pair.
#
# In `merge_vocab` we take in an individual `pair` (of character sequences, note this is the most frequency `best` pair) and the current `vocab` as `v_in`. We create a new `vocab`, `v_out`, from the old by joining together the characters in the pair (removing the space), if they are present in a word of the dictionary.
#
# [Warning](https://regex101.com/): the expression `(?<!\S)` means that either a whitespace character follows before the `bigram` or there is nothing before the bigram (it is the beginning of the word), similarly for `(?!\S)` for preceding whitespace or the end of the word.
sp_vocab = get_sentence_piece_vocab(vocab)
show_vocab(sp_vocab)
# ## Train SentencePiece BPE Tokenizer on Example Data
# ### Explore SentencePiece Model
# First let us explore the SentencePiece model provided with this week's assignment. Remember you can always use Python's built in `help` command to see the documentation for any object or method.
import sentencepiece as spm
sp = spm.SentencePieceProcessor(model_file='./data/sentencepiece.model')
# +
# help(sp)
# -
# Let's work with the first sentence of our example text.
s0 = 'Beginners BBQ Class Taking Place in Missoula!'
# +
# encode: text => id
print(sp.encode_as_pieces(s0))
print(sp.encode_as_ids(s0))
# decode: id => text
print(sp.decode_pieces(sp.encode_as_pieces(s0)))
print(sp.decode_ids([12847, 277]))
# -
# Notice how SentencePiece breaks the words into seemingly odd parts, but we've seen something similar from our work with BPE. But how close were we to this model trained on the whole corpus of examples with a `vocab_size` of 32,000 instead of 455? Here you can also test what happens to white space, like '\n'.
#
# But first let us note that SentencePiece encodes the SentencePieces, the tokens, and has reserved some of the ids as can be seen in this week's assignment.
# +
uid = 15068
spiece = "\u2581BBQ"
unknown = "__MUST_BE_UNKNOWN__"
# id <=> piece conversion
print(f'SentencePiece for ID {uid}: {sp.id_to_piece(uid)}')
print(f'ID for Sentence Piece {spiece}: {sp.piece_to_id(spiece)}')
# returns 0 for unknown tokens (we can change the id for UNK)
print(f'ID for unknown text {unknown}: {sp.piece_to_id(unknown)}')
# -
print(f'Beginning of sentence id: {sp.bos_id()}')
print(f'Pad id: {sp.pad_id()}')
print(f'End of sentence id: {sp.eos_id()}')
print(f'Unknown id: {sp.unk_id()}')
print(f'Vocab size: {sp.vocab_size()}')
# We can also check what are the ids for the first part and last part of the vocabulary.
# +
print('\nId\tSentP\tControl?')
print('------------------------')
# <unk>, <s>, </s> are defined by default. Their ids are (0, 1, 2)
# <s> and </s> are defined as 'control' symbol.
for uid in range(10):
print(uid, sp.id_to_piece(uid), sp.is_control(uid), sep='\t')
# for uid in range(sp.vocab_size()-10,sp.vocab_size()):
# print(uid, sp.id_to_piece(uid), sp.is_control(uid), sep='\t')
# -
# ### Train SentencePiece BPE model with our example.txt
# Finally, let's train our own BPE model directly from the SentencePiece library and compare it to the results of our implemention of the algorithm from the BPE paper itself.
# +
spm.SentencePieceTrainer.train('--input=example.txt --model_prefix=example_bpe --vocab_size=450 --model_type=bpe')
sp_bpe = spm.SentencePieceProcessor()
sp_bpe.load('example_bpe.model')
print('*** BPE ***')
print(sp_bpe.encode_as_pieces(s0))
# -
show_vocab(sp_vocab, end = ', ')
# Our implementation of BPE's code from the paper matches up pretty well with the library itself! The differences are probably accounted for by the `vocab_size`. There is also another technical difference in that in the SentencePiece implementation of BPE a priority queue is used to more efficiently keep track of the *best pairs*. Actually, there is a priority queue in the Python standard library called `heapq` if you would like to give that a try below!
# ## Optionally try to implement BPE using a priority queue below
from heapq import heappush, heappop
def heapsort(iterable):
h = []
for value in iterable:
heappush(h, value)
return [heappop(h) for i in range(len(h))]
a = [1,4,3,1,3,2,1,4,2]
heapsort(a)
# For a more extensive example consider looking at the [SentencePiece repo](https://github.com/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb). The last few sections of this code was repurposed from that tutorial. Thanks for your participation! Next stop BERT and T5!
| Week-3/Ungraded-Assignments/C4_W3_SentencePiece_and_BPE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/moviedatascience/DS-Unit-1-Sprint-3-Statistical-Tests-and-Experiments/blob/master/LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="11OzdxWTM7UR" colab_type="text"
# ## Assignment - Build a confidence interval
#
# A confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.
#
# 52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.
#
# In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.
#
# But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.
#
# How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."
#
# For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.
#
# Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.
#
# Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)):
#
#
# ### Confidence Intervals:
# 1. Generate and numerically represent a confidence interval
# 2. Graphically (with a plot) represent the confidence interval
# 3. Interpret the confidence interval - what does it tell you about the data and its distribution?
#
# ### Chi-squared tests:
# 4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data
# - By hand using Numpy
# - In a single line using Scipy
#
# + id="Ckcr4A4FM7cs" colab_type="code" colab={}
import pandas as pd
# + id="3cOUmw4obeH7" colab_type="code" outputId="482df674-fc9f-416f-f737-e270c7e0f731" colab={"base_uri": "https://localhost:8080/", "height": 224}
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
# + id="GQoyneT_biIp" colab_type="code" colab={}
from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel
import scipy.stats
import seaborn as sns
from matplotlib import style
import numpy as np
# + id="JVwx4zVqbmkN" colab_type="code" outputId="899635bb-4385-49e1-833e-532124a184f8" colab={"base_uri": "https://localhost:8080/", "height": 275}
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
# + id="JM-j9ohBeffn" colab_type="code" outputId="e925b6bb-bc75-4f1b-9229-f9f0a3df1665" colab={"base_uri": "https://localhost:8080/", "height": 258}
#replacing the question marks with NaN
#I don't know what the n set to zero means though
#or why the Y is set to 1
df = df.replace({'?':np.NaN, 'n':0, 'y':1}) #can I drop here???
df.head()
# + id="SGRGrIIbbp_g" colab_type="code" outputId="13b03201-54a5-424e-df3f-5af18b94d1a1" colab={"base_uri": "https://localhost:8080/", "height": 323}
df.isnull().sum()
# + id="Bb4nTqs3euz0" colab_type="code" outputId="b3fe3cce-8d8a-4b65-e164-8cd72b9f98f2" colab={"base_uri": "https://localhost:8080/", "height": 275}
rep = df[df.party == 'republican']
print(rep.shape)
rep.head()
# + id="4WizENVSeyvD" colab_type="code" outputId="6a62b030-607f-474e-fbed-dab5d61f1997" colab={"base_uri": "https://localhost:8080/", "height": 275}
dem = df[df.party == 'democrat']
print(dem.shape)
dem.head()
# + id="Z2tYPFqke4eT" colab_type="code" outputId="3730f2e7-3c30-43bb-e3f4-08371f9bc871" colab={"base_uri": "https://localhost:8080/", "height": 68}
df.party.value_counts()
# + id="gSSs_vM6fC6A" colab_type="code" outputId="359a7b2e-ae6c-462e-af4e-32c9ffc63e60" colab={"base_uri": "https://localhost:8080/", "height": 306}
dem_mean = dem.mean()
dem.mean()
# + id="eZtoqIqLf7AF" colab_type="code" outputId="38ada639-36e9-41d7-eca4-9c847fc2ce94" colab={"base_uri": "https://localhost:8080/", "height": 306}
mean_rep = rep.mean()
mean_rep
# + id="AbpT085EhZ9E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ae91f443-f96c-4d3a-bcab-d9e9a134122c"
sample_size_rep = len(rep)
sample_size_dem = len(dem)
print(sample_size_dem)
print(sample_size_rep)
# + id="-3ZWbHR5S81S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="26884e9b-e067-4ce4-8670-747048197ff9"
sample_std_dev_rep = np.std(rep, ddof=16)
sample_std_dev_dem = np.std(dem, ddof=16)
print(sample_std_dev_dem)
print(sample_std_dev_rep)
# + id="lh-fovUtUywM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="2c168f03-9423-42e3-d115-e19e71c3dda5"
standard_error_dem = sample_std_dev_dem / (sample_size_dem**.5)
standard_error_rep = sample_std_dev_rep / (sample_size_rep**.5)
print(standard_error_dem)
print(standard_error_rep)
# + id="qMOOeFr3YQXz" colab_type="code" colab={}
from scipy import stats
# + id="1x0XHH-odQ2z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9fa4d28d-326d-4431-e42c-00c868cab09f"
type('immigrant')
# + id="MqZxe2UqWi1K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="b7c1c661-a297-438e-8d18-dcea4e9afc03"
margin_of_error_dem = standard_error_dem * stats.t.ppf((1 + .95) / 2.0, sample_size_dem - 1)
margin_of_error_rep = standard_error_rep * stats.t.ppf((1 + .95) / 2.0, sample_size_rep - 1)
print(margin_of_error_dem)
print(margin_of_error_rep)
# + id="G720IJMspKPS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="0aaa84dc-bc10-42a6-c5d1-292d87a8a9ae"
dem = dem.dropna()
dem.isnull().sum()
# + id="cgg3HvkIlD8p" colab_type="code" colab={}
# dem_drop = dem.dropna()
# dem_drop.isnull().sum()
# + id="yJIt8KaFZN0K" colab_type="code" colab={}
def confidence_interval(data, confidence_level=0.95):
data = np.array(data) #why run np.array()??
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=16)
standard_error = sample_std_dev / (sample_size**.5)
margin_of_error = standard_error * stats.t.ppf((1 + confidence_level) / 2.0, sample_size - 1)
return (sample_mean, sample_mean - margin_of_error, sample_mean + margin_of_error)
# + id="0oHGnc1ynHql" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="12b2f20f-333a-4988-f68c-abe6c5b9519e"
dem.head()
# + id="CDwyJ8rAjri3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ee0f19fb-28c8-4498-96d2-d728e9b72e28"
confidence_interval(dem['budget'])
# + [markdown] id="UcTMDDr4peRs" colab_type="text"
#
# + id="NkdIMIqgl16Z" colab_type="code" colab={}
# + [markdown] id="Lx5u_GDZju_4" colab_type="text"
# ###Yikes.
# + [markdown] id="obvluuwKj2P1" colab_type="text"
# ###Why won't this run???
# + [markdown] id="4ohsJhQUmEuS" colab_type="text"
# ## Stretch goals:
#
# 1. Write a summary of your findings, mixing prose and math/code/results. *Note* - yes, this is by definition a political topic. It is challenging but important to keep your writing voice *neutral* and stick to the facts of the data. Data science often involves considering controversial issues, so it's important to be sensitive about them (especially if you want to publish).
# 2. Apply the techniques you learned today to your project data or other data of your choice, and write/discuss your findings here.
# 3. Refactor your code so it is elegant, readable, and can be easily run for all issues.
# + [markdown] id="nyJ3ySr7R2k9" colab_type="text"
# ## Resources
#
# - [Interactive visualize the Chi-Squared test](https://homepage.divms.uiowa.edu/~mbognar/applets/chisq.html)
# - [Calculation of Chi-Squared test statistic](https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test)
# - [Visualization of a confidence interval generated by R code](https://commons.wikimedia.org/wiki/File:Confidence-interval.svg)
# - [Expected value of a squared standard normal](https://math.stackexchange.com/questions/264061/expected-value-calculation-for-squared-normal-distribution) (it's 1 - which is why the expected value of a Chi-Squared with $n$ degrees of freedom is $n$, as it's the sum of $n$ squared standard normals)
| LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mm4sight
# language: python
# name: mm4sight
# ---
# +
from pomegranate import BayesianNetwork
import seaborn, time
import numpy
seaborn.set_style('whitegrid')
X = numpy.random.randint(2, size=(2000, 7))
X[:,3] = X[:,1]
X[:,6] = X[:,1]
X[:,0] = X[:,2]
X[:,4] = X[:,5]
model = BayesianNetwork.from_samples(X, algorithm='exact')
print(model.structure)
model.plot()
# +
import networkx
G = networkx.DiGraph()
G.add_edge(tuple([3]), tuple([0]))
G.add_edge(tuple([3]), tuple([4]))
G.add_edge(tuple([3]), tuple([1]))
G.add_edge(tuple([2]), tuple([0]))
G.add_edge(tuple([6]), tuple([3]))
G.add_edge(tuple([5]), tuple([4]))
model = BayesianNetwork.from_samples(X, algorithm='exact', constraint_graph=G)
model.plot()
# -
| server/exploratory/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] code_folding=[]
# # Goal
#
# * Simulating a fullCyc control gradient
# * Not simulating incorporation (all 0% isotope incorp.)
# * Don't know how much true incorporatation for emperical data
# * Using parameters inferred from TRIMMED emperical data (fullCyc seq data), or if not available, default SIPSim parameters
# * Determining whether simulated taxa show similar distribution to the emperical data
# -
# ## Input parameters
# * phyloseq.bulk file
# * taxon mapping file
# * list of genomes
# * fragments simulated for all genomes
# * bulk community richness
#
#
# ## workflow
#
# * Creating a community file from OTU abundances in bulk soil samples
# * phyloseq.bulk --> OTU table --> filter to sample --> community table format
# * Fragment simulation
# * simulated_fragments --> parse out fragments for target OTUs
# * simulated_fragments --> parse out fragments from random genomes to obtain richness of interest
# * combine fragment python objects
# * Convert fragment lists to kde object
# * Add diffusion
# * Make incorp config file
# * Add isotope incorporation
# * Calculating BD shift from isotope incorp
# * Simulating gradient fractions
# * Simulating OTU table
# * Simulating PCR
# * Subsampling from the OTU table
# ## Init
import os
import glob
import re
import nestly
# %load_ext rpy2.ipython
# %load_ext pushnote
# + language="R"
# library(ggplot2)
# library(dplyr)
# library(tidyr)
# library(gridExtra)
# library(phyloseq)
# -
# ### BD min/max
# + language="R"
# ## min G+C cutoff
# min_GC = 13.5
# ## max G+C cutoff
# max_GC = 80
# ## max G+C shift
# max_13C_shift_in_BD = 0.036
#
#
# min_BD = min_GC/100.0 * 0.098 + 1.66
# max_BD = max_GC/100.0 * 0.098 + 1.66
#
# max_BD = max_BD + max_13C_shift_in_BD
#
# cat('Min BD:', min_BD, '\n')
# cat('Max BD:', max_BD, '\n')
# -
# # Nestly
#
# * assuming fragments already simulated
# +
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/'
buildDir = os.path.join(workDir, 'rep3')
R_dir = '/home/nick/notebook/SIPSim/lib/R/'
fragFile= '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags.pkl'
nreps = 3
# +
# building tree structure
nest = nestly.Nest()
# varying params
nest.add('rep', [x + 1 for x in xrange(nreps)])
## set params
nest.add('abs', ['1e9'], create_dir=False)
nest.add('percIncorp', [0], create_dir=False)
nest.add('percTaxa', [0], create_dir=False)
nest.add('np', [2], create_dir=False)
nest.add('subsample_dist', ['lognormal'], create_dir=False)
nest.add('subsample_mean', [9.432], create_dir=False)
nest.add('subsample_scale', [0.5], create_dir=False)
nest.add('subsample_min', [10000], create_dir=False)
nest.add('subsample_max', [30000], create_dir=False)
### input/output files
nest.add('buildDir', [buildDir], create_dir=False)
nest.add('R_dir', [R_dir], create_dir=False)
nest.add('fragFile', [fragFile], create_dir=False)
nest.add('bandwidth', [0.6], create_dir=False)
nest.add('comm_params', ['mean:-7.6836085,sigma:0.9082843'], create_dir=False)
# building directory tree
nest.build(buildDir)
# bash file to run
bashFile = os.path.join(buildDir, 'SIPSimRun.sh')
# +
# %%writefile $bashFile
# #!/bin/bash
export PATH={R_dir}:$PATH
# echo '#-- SIPSim pipeline --#'
# echo '# converting fragments to KDE'
SIPSim fragment_KDE \
{fragFile} \
> ampFrags_KDE.pkl
# echo '# making a community file'
SIPSim KDE_info \
-t ampFrags_KDE.pkl \
> taxon_names.txt
SIPSim communities \
--abund_dist_p {comm_params} \
taxon_names.txt \
> comm.txt
# echo '# adding diffusion'
SIPSim diffusion \
ampFrags_KDE.pkl \
--bw {bandwidth} \
--np {np} \
> ampFrags_KDE_dif.pkl
# echo '# adding DBL contamination'
SIPSim DBL \
ampFrags_KDE_dif.pkl \
--bw {bandwidth} \
--np {np} \
> ampFrags_KDE_dif_DBL.pkl
# echo '# making incorp file'
SIPSim incorpConfigExample \
--percTaxa {percTaxa} \
--percIncorpUnif {percIncorp} \
> {percTaxa}_{percIncorp}.config
# echo '# adding isotope incorporation to BD distribution'
SIPSim isotope_incorp \
ampFrags_KDE_dif_DBL.pkl \
{percTaxa}_{percIncorp}.config \
--comm comm.txt \
--bw {bandwidth} \
--np {np} \
> ampFrags_KDE_dif_DBL_inc.pkl
# echo '# simulating gradient fractions'
SIPSim gradient_fractions \
comm.txt \
> fracs.txt
# echo '# simulating an OTU table'
SIPSim OTU_table \
ampFrags_KDE_dif_DBL_inc.pkl \
comm.txt \
fracs.txt \
--abs {abs} \
--np {np} \
> OTU_abs{abs}.txt
#-- w/ PCR simulation --#
# echo '# simulating PCR'
SIPSim OTU_PCR \
OTU_abs{abs}.txt \
> OTU_abs{abs}_PCR.txt
# echo '# subsampling from the OTU table (simulating sequencing of the DNA pool)'
SIPSim OTU_subsample \
--dist {subsample_dist} \
--dist_params mean:{subsample_mean},sigma:{subsample_scale} \
--min_size {subsample_min} \
--max_size {subsample_max} \
OTU_abs{abs}_PCR.txt \
> OTU_abs{abs}_PCR_sub.txt
# echo '# making a wide-formatted table'
SIPSim OTU_wideLong -w \
OTU_abs{abs}_PCR_sub.txt \
> OTU_abs{abs}_PCR_sub_w.txt
# echo '# making metadata (phyloseq: sample_data)'
SIPSim OTU_sampleData \
OTU_abs{abs}_PCR_sub.txt \
> OTU_abs{abs}_PCR_sub_meta.txt
#-- w/out PCR simulation --#
# echo '# subsampling from the OTU table (simulating sequencing of the DNA pool)'
SIPSim OTU_subsample \
--dist {subsample_dist} \
--dist_params mean:{subsample_mean},sigma:{subsample_scale} \
--min_size {subsample_min} \
--max_size {subsample_max} \
OTU_abs{abs}.txt \
> OTU_abs{abs}_sub.txt
# echo '# making a wide-formatted table'
SIPSim OTU_wideLong -w \
OTU_abs{abs}_sub.txt \
> OTU_abs{abs}_sub_w.txt
# echo '# making metadata (phyloseq: sample_data)'
SIPSim OTU_sampleData \
OTU_abs{abs}_sub.txt \
> OTU_abs{abs}_sub_meta.txt
# -
# !chmod 777 $bashFile
# !cd $workDir; \
# nestrun --template-file $bashFile -d rep3 --log-file log.txt -j 3
# %pushnote SIPsim rep3 complete
# # BD min/max
#
# * what is the min/max BD that we care about?
# + language="R"
# ## min G+C cutoff
# min_GC = 13.5
# ## max G+C cutoff
# max_GC = 80
# ## max G+C shift
# max_13C_shift_in_BD = 0.036
#
#
# min_BD = min_GC/100.0 * 0.098 + 1.66
# max_BD = max_GC/100.0 * 0.098 + 1.66
#
# max_BD = max_BD + max_13C_shift_in_BD
#
# cat('Min BD:', min_BD, '\n')
# cat('Max BD:', max_BD, '\n')
# -
# # Loading non-PCR subsampled OTU tables
# OTU_files = !find $buildDir -name "OTU_abs1e9_sub.txt"
OTU_files
# + magic_args="-i OTU_files" language="R"
# # loading files
#
# df.SIM = list()
# for (x in OTU_files){
# SIM_rep = gsub('/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep3/', '', x)
# SIM_rep = gsub('/OTU_abs1e9_sub.txt', '', SIM_rep)
# df.SIM[[SIM_rep]] = read.delim(x, sep='\t')
# }
# df.SIM = do.call('rbind', df.SIM)
# df.SIM$SIM_rep = gsub('\\.[0-9]+$', '', rownames(df.SIM))
# rownames(df.SIM) = 1:nrow(df.SIM)
# df.SIM %>% head(n=3)
# -
# # BD range where an OTU is detected
#
# * Do the simulated OTU BD distributions span the same BD range of the emperical data?
# comm_files = !find $buildDir -name "comm.txt"
comm_files
# + magic_args="-i comm_files" language="R"
#
# df.SIM.comm = list()
# for (x in comm_files){
# SIM_rep = gsub('/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep3/', '', x)
# SIM_rep = gsub('/comm.txt', '', SIM_rep)
# df.SIM.comm[[SIM_rep]] = read.delim(x, sep='\t')
# }
#
# df.SIM.comm = do.call(rbind, df.SIM.comm)
# df.SIM.comm$SIM_rep = gsub('\\.[0-9]+$', '', rownames(df.SIM.comm))
# rownames(df.SIM.comm) = 1:nrow(df.SIM.comm)
# df.SIM.comm = df.SIM.comm %>%
# rename('bulk_abund' = rel_abund_perc) %>%
# mutate(bulk_abund = bulk_abund / 100)
# df.SIM.comm %>% head(n=3)
# + magic_args="-w 800 -h 400" language="R"
# # Plotting the pre-fractionation abundances of each taxon
#
# df.SIM.comm.s = df.SIM.comm %>%
# group_by(taxon_name) %>%
# summarize(median_rank = median(rank),
# mean_abund = mean(bulk_abund),
# sd_abund = sd(bulk_abund))
#
# df.SIM.comm.s$taxon_name = reorder(df.SIM.comm.s$taxon_name, -df.SIM.comm.s$mean_abund)
#
# ggplot(df.SIM.comm.s, aes(taxon_name, mean_abund,
# ymin=mean_abund-sd_abund,
# ymax=mean_abund+sd_abund)) +
# geom_linerange(alpha=0.4) +
# geom_point(alpha=0.6, size=1.2) +
# scale_y_log10() +
# labs(x='taxon', y='Relative abundance', title='Pre-fractionation abundance') +
# theme_bw() +
# theme(
# text = element_text(size=16),
# axis.text.x = element_blank()
# )
# + language="R"
#
# ## joining SIP & comm (pre-fractionation)
# df.SIM.j = inner_join(df.SIM, df.SIM.comm, c('library' = 'library',
# 'taxon' = 'taxon_name',
# 'SIM_rep' = 'SIM_rep')) %>%
# filter(BD_mid >= min_BD,
# BD_mid <= max_BD)
#
# df.SIM.j %>% head(n=3)
# + language="R"
# # calculating BD range
# df.SIM.j.f = df.SIM.j %>%
# filter(count > 0) %>%
# group_by(SIM_rep) %>%
# mutate(max_BD_range = max(BD_mid) - min(BD_mid)) %>%
# ungroup() %>%
# group_by(SIM_rep, taxon) %>%
# summarize(mean_bulk_abund = mean(bulk_abund),
# min_BD = min(BD_mid),
# max_BD = max(BD_mid),
# BD_range = max_BD - min_BD,
# BD_range_perc = BD_range / first(max_BD_range) * 100) %>%
# ungroup()
#
# df.SIM.j.f %>% head(n=3) %>% as.data.frame
# + magic_args="-h 300 -w 550" language="R"
# ## plotting
# ggplot(df.SIM.j.f, aes(mean_bulk_abund, BD_range_perc, color=SIM_rep)) +
# geom_point(alpha=0.5, shape='O') +
# scale_x_log10() +
# scale_y_continuous() +
# labs(x='Pre-fractionation abundance', y='% of total BD range') +
# #geom_vline(xintercept=0.001, linetype='dashed', alpha=0.5) +
# theme_bw() +
# theme(
# text = element_text(size=16),
# panel.grid = element_blank(),
# legend.position = 'none'
# )
# -
# # Assessing diversity
# ### Asigning zeros
# + language="R"
# # giving value to missing abundances
# min.pos.val = df.SIM.j %>%
# filter(rel_abund > 0) %>%
# group_by() %>%
# mutate(min_abund = min(rel_abund)) %>%
# ungroup() %>%
# filter(rel_abund == min_abund)
#
# min.pos.val = min.pos.val[1,'rel_abund'] %>% as.numeric
# imp.val = min.pos.val / 10
#
#
# # convert numbers
# df.SIM.j[df.SIM.j$rel_abund == 0, 'abundance'] = imp.val
#
# # another closure operation
# df.SIM.j = df.SIM.j %>%
# group_by(SIM_rep, fraction) %>%
# mutate(rel_abund = rel_abund / sum(rel_abund))
#
#
# # status
# cat('Below detection level abundances converted to: ', imp.val, '\n')
# -
# ## Plotting Shannon diversity for each
# + language="R"
# shannon_index_long = function(df, abundance_col, ...){
# # calculating shannon diversity index from a 'long' formated table
# ## community_col = name of column defining communities
# ## abundance_col = name of column defining taxon abundances
# df = df %>% as.data.frame
# cmd = paste0(abundance_col, '/sum(', abundance_col, ')')
# df.s = df %>%
# group_by_(...) %>%
# mutate_(REL_abundance = cmd) %>%
# mutate(pi__ln_pi = REL_abundance * log(REL_abundance),
# shannon = -sum(pi__ln_pi, na.rm=TRUE)) %>%
# ungroup() %>%
# dplyr::select(-REL_abundance, -pi__ln_pi) %>%
# distinct_(...)
# return(df.s)
# }
# + magic_args="-w 800 -h 300" language="R"
# # calculating shannon
# df.SIM.shan = shannon_index_long(df.SIM.j, 'count', 'library', 'fraction') %>%
# filter(BD_mid >= min_BD,
# BD_mid <= max_BD)
#
# df.SIM.shan.s = df.SIM.shan %>%
# group_by(BD_bin = ntile(BD_mid, 24)) %>%
# summarize(mean_BD = mean(BD_mid),
# mean_shannon = mean(shannon),
# sd_shannon = sd(shannon))
#
# # plotting
# p = ggplot(df.SIM.shan.s, aes(mean_BD, mean_shannon,
# ymin=mean_shannon-sd_shannon,
# ymax=mean_shannon+sd_shannon)) +
# geom_pointrange() +
# labs(x='Buoyant density', y='Shannon index') +
# theme_bw() +
# theme(
# text = element_text(size=16),
# legend.position = 'none'
# )
# p
# -
# # Plotting variance
#
# + magic_args="-w 800 -h 350" language="R"
# df.SIM.j.var = df.SIM.j %>%
# group_by(SIM_rep, fraction) %>%
# mutate(variance = var(rel_abund)) %>%
# ungroup() %>%
# distinct(SIM_rep, fraction) %>%
# select(SIM_rep, fraction, variance, BD_mid)
#
# ggplot(df.SIM.j.var, aes(BD_mid, variance, color=SIM_rep)) +
# geom_point() +
# geom_line() +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# -
# #### Notes
#
# * spikes at low & high G+C
# * absence of taxa or presence of taxa at those locations?
# # Plotting absolute abundance distributions
# OTU_files = !find $buildDir -name "OTU_abs1e9.txt"
OTU_files
# + magic_args="-i OTU_files" language="R"
# # loading files
#
# df.abs = list()
# for (x in OTU_files){
# SIM_rep = gsub('/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep3/', '', x)
# SIM_rep = gsub('/OTU_abs1e9.txt', '', SIM_rep)
# df.abs[[SIM_rep]] = read.delim(x, sep='\t')
# }
# df.abs = do.call('rbind', df.abs)
# df.abs$SIM_rep = gsub('\\.[0-9]+$', '', rownames(df.abs))
# rownames(df.abs) = 1:nrow(df.abs)
# df.abs %>% head(n=3)
# + magic_args="-w 800 " language="R"
#
# ggplot(df.abs, aes(BD_mid, count, fill=taxon)) +
# geom_area(stat='identity', position='dodge', alpha=0.5) +
# labs(x='Buoyant density', y='Subsampled community\n(absolute abundance)') +
# facet_grid(SIM_rep ~ .) +
# theme_bw() +
# theme(
# text = element_text(size=16),
# legend.position = 'none',
# axis.title.y = element_text(vjust=1),
# axis.title.x = element_blank()
# )
# + magic_args="-w 800 " language="R"
#
# p1 = ggplot(df.abs %>% filter(BD_mid < 1.7), aes(BD_mid, count, fill=taxon, color=taxon)) +
# labs(x='Buoyant density', y='Subsampled community\n(absolute abundance)') +
# facet_grid(SIM_rep ~ .) +
# theme_bw() +
# theme(
# text = element_text(size=16),
# legend.position = 'none',
# axis.title.y = element_text(vjust=1),
# axis.title.x = element_blank()
# )
#
# p2 = p1 + geom_line(alpha=0.25) + scale_y_log10()
# p1 = p1 + geom_area(stat='identity', position='dodge', alpha=0.5)
#
# grid.arrange(p1, p2, ncol=2)
# + magic_args="-w 800 " language="R"
#
# p1 = ggplot(df.abs %>% filter(BD_mid > 1.72), aes(BD_mid, count, fill=taxon, color=taxon)) +
# labs(x='Buoyant density', y='Subsampled community\n(absolute abundance)') +
# facet_grid(SIM_rep ~ .) +
# theme_bw() +
# theme(
# text = element_text(size=16),
# legend.position = 'none',
# axis.title.y = element_text(vjust=1),
# axis.title.x = element_blank()
# )
#
#
# p2 = p1 + geom_line(alpha=0.25) + scale_y_log10()
# p1 = p1 + geom_area(stat='identity', position='dodge', alpha=0.5)
#
# grid.arrange(p1, p2, ncol=2)
# -
# # Conclusions
#
# * DBL is a bit too permissive
# * low abundant taxa are spread out a bit more than emperical
# * Variance spiking:
# * abundance distributions are too tight
# * emperical data variance suggests some extra unevenness in heavy fractions
# * some taxon DNA seems to be 'smeared' out into the heavy fractions
# * possible fixes:
# * more abundant, high G+C genomes
# * more diffusion
# * more 'smearing' into the heavy fractions
# * TODO:
# * determine what's changing in emperical data between Days 1,3,6 & 14,30,48
| ipynb/bac_genome/fullCyc/trimDataset/.ipynb_checkpoints/rep3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Your First GAN
# ### Goal
# In this notebook, you're going to create your first generative adversarial network (GAN) for this course! Specifically, you will build and train a GAN that can generate hand-written images of digits (0-9). You will be using PyTorch in this specialization, so if you're not familiar with this framework, you may find the [PyTorch documentation](https://pytorch.org/docs/stable/index.html) useful. The hints will also often include links to relevant documentation.
#
# ### Learning Objectives
# 1. Build the generator and discriminator components of a GAN from scratch.
# 2. Create generator and discriminator loss functions.
# 3. Train your GAN and visualize the generated images.
#
# ## Getting Started
# You will begin by importing some useful packages and the dataset you will use to build and train your GAN. You are also provided with a visualizer function to help you investigate the images your GAN will create.
#
# +
import torch
from torch import nn
from tqdm.auto import tqdm
from torchvision import transforms
from torchvision.datasets import MNIST # Training dataset
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
torch.manual_seed(0) # Set for testing purposes, please do not change!
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in a uniform grid.
'''
image_unflat = image_tensor.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
# -
# #### MNIST Dataset
# The training images your discriminator will be using is from a dataset called [MNIST](http://yann.lecun.com/exdb/mnist/). It contains 60,000 images of handwritten digits, from 0 to 9, like these:
#
# 
#
# You may notice that the images are quite pixelated -- this is because they are all only 28 x 28! The small size of its images makes MNIST ideal for simple training. Additionally, these images are also in black-and-white so only one dimension, or "color channel", is needed to represent them (more on this later in the course).
#
# #### Tensor
# You will represent the data using [tensors](https://pytorch.org/docs/stable/tensors.html). Tensors are a generalization of matrices: for example, a stack of three matrices with the amounts of red, green, and blue at different locations in a 64 x 64 pixel image is a tensor with the shape 3 x 64 x 64.
#
# Tensors are easy to manipulate and supported by [PyTorch](https://pytorch.org/), the machine learning library you will be using. Feel free to explore them more, but you can imagine these as multi-dimensional matrices or vectors!
#
# #### Batches
# While you could train your model after generating one image, it is extremely inefficient and leads to less stable training. In GANs, and in machine learning in general, you will process multiple images per training step. These are called batches.
#
# This means that your generator will generate an entire batch of images and receive the discriminator's feedback on each before updating the model. The same goes for the discriminator, it will calculate its loss on the entire batch of generated images as well as on the reals before the model is updated.
# ## Generator
# The first step is to build the generator component.
#
# You will start by creating a function to make a single layer/block for the generator's neural network. Each block should include a [linear transformation](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) to map to another shape, a [batch normalization](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html) for stabilization, and finally a non-linear activation function (you use a [ReLU here](https://pytorch.org/docs/master/generated/torch.nn.ReLU.html)) so the output can be transformed in complex ways. You will learn more about activations and batch normalization later in the course.
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_generator_block
def get_generator_block(input_dim, output_dim):
'''
Function for returning a block of the generator's neural network
given input and output dimensions.
Parameters:
input_dim: the dimension of the input vector, a scalar
output_dim: the dimension of the output vector, a scalar
Returns:
a generator neural network layer, with a linear transformation
followed by a batch normalization and then a relu activation
'''
return nn.Sequential(
# Hint: Replace all of the "None" with the appropriate dimensions.
# The documentation may be useful if you're less familiar with PyTorch:
# https://pytorch.org/docs/stable/nn.html.
#### START CODE HERE ####
nn.Linear(None, None),
nn.BatchNorm1d(None),
nn.ReLU(inplace=True),
#### END CODE HERE ####
)
# +
# Verify the generator block function
def test_gen_block(in_features, out_features, num_test=1000):
block = get_generator_block(in_features, out_features)
# Check the three parts
assert len(block) == 3
assert type(block[0]) == nn.Linear
assert type(block[1]) == nn.BatchNorm1d
assert type(block[2]) == nn.ReLU
# Check the output shape
test_input = torch.randn(num_test, in_features)
test_output = block(test_input)
assert tuple(test_output.shape) == (num_test, out_features)
assert test_output.std() > 0.55
assert test_output.std() < 0.65
test_gen_block(25, 12)
test_gen_block(15, 28)
print("Success!")
# -
# Now you can build the generator class. It will take 3 values:
#
# * The noise vector dimension
# * The image dimension
# * The initial hidden dimension
#
# Using these values, the generator will build a neural network with 5 layers/blocks. Beginning with the noise vector, the generator will apply non-linear transformations via the block function until the tensor is mapped to the size of the image to be outputted (the same size as the real images from MNIST). You will need to fill in the code for final layer since it is different than the others. The final layer does not need a normalization or activation function, but does need to be scaled with a [sigmoid function](https://pytorch.org/docs/master/generated/torch.nn.Sigmoid.html).
#
# Finally, you are given a forward pass function that takes in a noise vector and generates an image of the output dimension using your neural network.
#
# <details>
#
# <summary>
# <font size="3" color="green">
# <b>Optional hints for <code><font size="4">Generator</font></code></b>
# </font>
# </summary>
#
# 1. The output size of the final linear transformation should be im_dim, but remember you need to scale the outputs between 0 and 1 using the sigmoid function.
# 2. [nn.Linear](https://pytorch.org/docs/master/generated/torch.nn.Linear.html) and [nn.Sigmoid](https://pytorch.org/docs/master/generated/torch.nn.Sigmoid.html) will be useful here.
# </details>
#
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: Generator
class Generator(nn.Module):
'''
Generator Class
Values:
z_dim: the dimension of the noise vector, a scalar
im_dim: the dimension of the images, fitted for the dataset used, a scalar
(MNIST images are 28 x 28 = 784 so that is your default)
hidden_dim: the inner dimension, a scalar
'''
def __init__(self, z_dim=10, im_dim=784, hidden_dim=128):
super(Generator, self).__init__()
# Build the neural network
self.gen = nn.Sequential(
get_generator_block(z_dim, hidden_dim),
get_generator_block(hidden_dim, hidden_dim * 2),
get_generator_block(hidden_dim * 2, hidden_dim * 4),
get_generator_block(hidden_dim * 4, hidden_dim * 8),
# There is a dropdown with hints if you need them!
#### START CODE HERE ####
#### END CODE HERE ####
)
def forward(self, noise):
'''
Function for completing a forward pass of the generator: Given a noise tensor,
returns generated images.
Parameters:
noise: a noise tensor with dimensions (n_samples, z_dim)
'''
return self.gen(noise)
# Needed for grading
def get_gen(self):
'''
Returns:
the sequential model
'''
return self.gen
# +
# Verify the generator class
def test_generator(z_dim, im_dim, hidden_dim, num_test=10000):
gen = Generator(z_dim, im_dim, hidden_dim).get_gen()
# Check there are six modules in the sequential part
assert len(gen) == 6
test_input = torch.randn(num_test, z_dim)
test_output = gen(test_input)
# Check that the output shape is correct
assert tuple(test_output.shape) == (num_test, im_dim)
assert test_output.max() < 1, "Make sure to use a sigmoid"
assert test_output.min() > 0, "Make sure to use a sigmoid"
assert test_output.min() < 0.5, "Don't use a block in your solution"
assert test_output.std() > 0.05, "Don't use batchnorm here"
assert test_output.std() < 0.15, "Don't use batchnorm here"
test_generator(5, 10, 20)
test_generator(20, 8, 24)
print("Success!")
# -
# ## Noise
# To be able to use your generator, you will need to be able to create noise vectors. The noise vector z has the important role of making sure the images generated from the same class don't all look the same -- think of it as a random seed. You will generate it randomly using PyTorch by sampling random numbers from the normal distribution. Since multiple images will be processed per pass, you will generate all the noise vectors at once.
#
# Note that whenever you create a new tensor using torch.ones, torch.zeros, or torch.randn, you either need to create it on the target device, e.g. `torch.ones(3, 3, device=device)`, or move it onto the target device using `torch.ones(3, 3).to(device)`. You do not need to do this if you're creating a tensor by manipulating another tensor or by using a variation that defaults the device to the input, such as `torch.ones_like`. In general, use `torch.ones_like` and `torch.zeros_like` instead of `torch.ones` or `torch.zeros` where possible.
#
# <details>
#
# <summary>
# <font size="3" color="green">
# <b>Optional hint for <code><font size="4">get_noise</font></code></b>
# </font>
# </summary>
#
# 1.
# You will probably find [torch.randn](https://pytorch.org/docs/master/generated/torch.randn.html) useful here.
# </details>
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_noise
def get_noise(n_samples, z_dim, device='cpu'):
'''
Function for creating noise vectors: Given the dimensions (n_samples, z_dim),
creates a tensor of that shape filled with random numbers from the normal distribution.
Parameters:
n_samples: the number of samples to generate, a scalar
z_dim: the dimension of the noise vector, a scalar
device: the device type
'''
# NOTE: To use this on GPU with device='cuda', make sure to pass the device
# argument to the function you use to generate the noise.
#### START CODE HERE ####
return None
#### END CODE HERE ####
# +
# Verify the noise vector function
def test_get_noise(n_samples, z_dim, device='cpu'):
noise = get_noise(n_samples, z_dim, device)
# Make sure a normal distribution was used
assert tuple(noise.shape) == (n_samples, z_dim)
assert torch.abs(noise.std() - torch.tensor(1.0)) < 0.01
assert str(noise.device).startswith(device)
test_get_noise(1000, 100, 'cpu')
if torch.cuda.is_available():
test_get_noise(1000, 32, 'cuda')
print("Success!")
# -
# ## Discriminator
# The second component that you need to construct is the discriminator. As with the generator component, you will start by creating a function that builds a neural network block for the discriminator.
#
# *Note: You use leaky ReLUs to prevent the "dying ReLU" problem, which refers to the phenomenon where the parameters stop changing due to consistently negative values passed to a ReLU, which result in a zero gradient. You will learn more about this in the following lectures!*
#
#
# REctified Linear Unit (ReLU) | Leaky ReLU
# :-------------------------:|:-------------------------:
#  | 
#
#
#
#
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_discriminator_block
def get_discriminator_block(input_dim, output_dim):
'''
Discriminator Block
Function for returning a neural network of the discriminator given input and output dimensions.
Parameters:
input_dim: the dimension of the input vector, a scalar
output_dim: the dimension of the output vector, a scalar
Returns:
a discriminator neural network layer, with a linear transformation
followed by an nn.LeakyReLU activation with negative slope of 0.2
(https://pytorch.org/docs/master/generated/torch.nn.LeakyReLU.html)
'''
return nn.Sequential(
#### START CODE HERE ####
#### END CODE HERE ####
)
# +
# Verify the discriminator block function
def test_disc_block(in_features, out_features, num_test=10000):
block = get_discriminator_block(in_features, out_features)
# Check there are two parts
assert len(block) == 2
test_input = torch.randn(num_test, in_features)
test_output = block(test_input)
# Check that the shape is right
assert tuple(test_output.shape) == (num_test, out_features)
# Check that the LeakyReLU slope is about 0.2
assert -test_output.min() / test_output.max() > 0.1
assert -test_output.min() / test_output.max() < 0.3
assert test_output.std() > 0.3
assert test_output.std() < 0.5
test_disc_block(25, 12)
test_disc_block(15, 28)
print("Success!")
# -
# Now you can use these blocks to make a discriminator! The discriminator class holds 2 values:
#
# * The image dimension
# * The hidden dimension
#
# The discriminator will build a neural network with 4 layers. It will start with the image tensor and transform it until it returns a single number (1-dimension tensor) output. This output classifies whether an image is fake or real. Note that you do not need a sigmoid after the output layer since it is included in the loss function. Finally, to use your discrimator's neural network you are given a forward pass function that takes in an image tensor to be classified.
#
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: Discriminator
class Discriminator(nn.Module):
'''
Discriminator Class
Values:
im_dim: the dimension of the images, fitted for the dataset used, a scalar
(MNIST images are 28x28 = 784 so that is your default)
hidden_dim: the inner dimension, a scalar
'''
def __init__(self, im_dim=784, hidden_dim=128):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
get_discriminator_block(im_dim, hidden_dim * 4),
get_discriminator_block(hidden_dim * 4, hidden_dim * 2),
get_discriminator_block(hidden_dim * 2, hidden_dim),
# Hint: You want to transform the final output into a single value,
# so add one more linear map.
#### START CODE HERE ####
#### END CODE HERE ####
)
def forward(self, image):
'''
Function for completing a forward pass of the discriminator: Given an image tensor,
returns a 1-dimension tensor representing fake/real.
Parameters:
image: a flattened image tensor with dimension (im_dim)
'''
return self.disc(image)
# Needed for grading
def get_disc(self):
'''
Returns:
the sequential model
'''
return self.disc
# +
# Verify the discriminator class
def test_discriminator(z_dim, hidden_dim, num_test=100):
disc = Discriminator(z_dim, hidden_dim).get_disc()
# Check there are three parts
assert len(disc) == 4
# Check the linear layer is correct
test_input = torch.randn(num_test, z_dim)
test_output = disc(test_input)
assert tuple(test_output.shape) == (num_test, 1)
# Don't use a block
assert not isinstance(disc[-1], nn.Sequential)
test_discriminator(5, 10)
test_discriminator(20, 8)
print("Success!")
# -
# ## Training
# Now you can put it all together!
# First, you will set your parameters:
# * criterion: the loss function
# * n_epochs: the number of times you iterate through the entire dataset when training
# * z_dim: the dimension of the noise vector
# * display_step: how often to display/visualize the images
# * batch_size: the number of images per forward/backward pass
# * lr: the learning rate
# * device: the device type, here using a GPU (which runs CUDA), not CPU
#
# Next, you will load the MNIST dataset as tensors using a dataloader.
#
#
# +
# Set your parameters
criterion = nn.BCEWithLogitsLoss()
n_epochs = 200
z_dim = 64
display_step = 500
batch_size = 128
lr = 0.00001
# Load MNIST dataset as tensors
dataloader = DataLoader(
MNIST('.', download=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True)
### DO NOT EDIT ###
device = 'cuda'
# -
# Now, you can initialize your generator, discriminator, and optimizers. Note that each optimizer only takes the parameters of one particular model, since we want each optimizer to optimize only one of the models.
gen = Generator(z_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator().to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
# Before you train your GAN, you will need to create functions to calculate the discriminator's loss and the generator's loss. This is how the discriminator and generator will know how they are doing and improve themselves. Since the generator is needed when calculating the discriminator's loss, you will need to call .detach() on the generator result to ensure that only the discriminator is updated!
#
# Remember that you have already defined a loss function earlier (`criterion`) and you are encouraged to use `torch.ones_like` and `torch.zeros_like` instead of `torch.ones` or `torch.zeros`. If you use `torch.ones` or `torch.zeros`, you'll need to pass `device=device` to them.
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_disc_loss
def get_disc_loss(gen, disc, criterion, real, num_images, z_dim, device):
'''
Return the loss of the discriminator given inputs.
Parameters:
gen: the generator model, which returns an image given z-dimensional noise
disc: the discriminator model, which returns a single-dimensional prediction of real/fake
criterion: the loss function, which should be used to compare
the discriminator's predictions to the ground truth reality of the images
(e.g. fake = 0, real = 1)
real: a batch of real images
num_images: the number of images the generator should produce,
which is also the length of the real images
z_dim: the dimension of the noise vector, a scalar
device: the device type
Returns:
disc_loss: a torch scalar loss value for the current batch
'''
# These are the steps you will need to complete:
# 1) Create noise vectors and generate a batch (num_images) of fake images.
# Make sure to pass the device argument to the noise.
# 2) Get the discriminator's prediction of the fake image
# and calculate the loss. Don't forget to detach the generator!
# (Remember the loss function you set earlier -- criterion. You need a
# 'ground truth' tensor in order to calculate the loss.
# For example, a ground truth tensor for a fake image is all zeros.)
# 3) Get the discriminator's prediction of the real image and calculate the loss.
# 4) Calculate the discriminator's loss by averaging the real and fake loss
# and set it to disc_loss.
# Note: Please do not use concatenation in your solution. The tests are being updated to
# support this, but for now, average the two losses as described in step (4).
# *Important*: You should NOT write your own loss function here - use criterion(pred, true)!
#### START CODE HERE ####
#### END CODE HERE ####
return disc_loss
# +
def test_disc_reasonable(num_images=10):
# Don't use explicit casts to cuda - use the device argument
import inspect, re
lines = inspect.getsource(get_disc_loss)
assert (re.search(r"to\(.cuda.\)", lines)) is None
assert (re.search(r"\.cuda\(\)", lines)) is None
z_dim = 64
gen = torch.zeros_like
disc = lambda x: x.mean(1)[:, None]
criterion = torch.mul # Multiply
real = torch.ones(num_images, z_dim)
disc_loss = get_disc_loss(gen, disc, criterion, real, num_images, z_dim, 'cpu')
assert torch.all(torch.abs(disc_loss.mean() - 0.5) < 1e-5)
gen = torch.ones_like
criterion = torch.mul # Multiply
real = torch.zeros(num_images, z_dim)
assert torch.all(torch.abs(get_disc_loss(gen, disc, criterion, real, num_images, z_dim, 'cpu')) < 1e-5)
gen = lambda x: torch.ones(num_images, 10)
disc = lambda x: x.mean(1)[:, None] + 10
criterion = torch.mul # Multiply
real = torch.zeros(num_images, 10)
assert torch.all(torch.abs(get_disc_loss(gen, disc, criterion, real, num_images, z_dim, 'cpu').mean() - 5) < 1e-5)
gen = torch.ones_like
disc = nn.Linear(64, 1, bias=False)
real = torch.ones(num_images, 64) * 0.5
disc.weight.data = torch.ones_like(disc.weight.data) * 0.5
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
criterion = lambda x, y: torch.sum(x) + torch.sum(y)
disc_loss = get_disc_loss(gen, disc, criterion, real, num_images, z_dim, 'cpu').mean()
disc_loss.backward()
assert torch.isclose(torch.abs(disc.weight.grad.mean() - 11.25), torch.tensor(3.75))
def test_disc_loss(max_tests = 10):
z_dim = 64
gen = Generator(z_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator().to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
num_steps = 0
for real, _ in dataloader:
cur_batch_size = len(real)
real = real.view(cur_batch_size, -1).to(device)
### Update discriminator ###
# Zero out the gradient before backpropagation
disc_opt.zero_grad()
# Calculate discriminator loss
disc_loss = get_disc_loss(gen, disc, criterion, real, cur_batch_size, z_dim, device)
assert (disc_loss - 0.68).abs() < 0.05
# Update gradients
disc_loss.backward(retain_graph=True)
# Check that they detached correctly
assert gen.gen[0][0].weight.grad is None
# Update optimizer
old_weight = disc.disc[0][0].weight.data.clone()
disc_opt.step()
new_weight = disc.disc[0][0].weight.data
# Check that some discriminator weights changed
assert not torch.all(torch.eq(old_weight, new_weight))
num_steps += 1
if num_steps >= max_tests:
break
test_disc_reasonable()
test_disc_loss()
print("Success!")
# -
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_gen_loss
def get_gen_loss(gen, disc, criterion, num_images, z_dim, device):
'''
Return the loss of the generator given inputs.
Parameters:
gen: the generator model, which returns an image given z-dimensional noise
disc: the discriminator model, which returns a single-dimensional prediction of real/fake
criterion: the loss function, which should be used to compare
the discriminator's predictions to the ground truth reality of the images
(e.g. fake = 0, real = 1)
num_images: the number of images the generator should produce,
which is also the length of the real images
z_dim: the dimension of the noise vector, a scalar
device: the device type
Returns:
gen_loss: a torch scalar loss value for the current batch
'''
# These are the steps you will need to complete:
# 1) Create noise vectors and generate a batch of fake images.
# Remember to pass the device argument to the get_noise function.
# 2) Get the discriminator's prediction of the fake image.
# 3) Calculate the generator's loss. Remember the generator wants
# the discriminator to think that its fake images are real
# *Important*: You should NOT write your own loss function here - use criterion(pred, true)!
#### START CODE HERE ####
#### END CODE HERE ####
return gen_loss
# +
def test_gen_reasonable(num_images=10):
# Don't use explicit casts to cuda - use the device argument
import inspect, re
lines = inspect.getsource(get_gen_loss)
assert (re.search(r"to\(.cuda.\)", lines)) is None
assert (re.search(r"\.cuda\(\)", lines)) is None
z_dim = 64
gen = torch.zeros_like
disc = nn.Identity()
criterion = torch.mul # Multiply
gen_loss_tensor = get_gen_loss(gen, disc, criterion, num_images, z_dim, 'cpu')
assert torch.all(torch.abs(gen_loss_tensor) < 1e-5)
#Verify shape. Related to gen_noise parametrization
assert tuple(gen_loss_tensor.shape) == (num_images, z_dim)
gen = torch.ones_like
disc = nn.Identity()
criterion = torch.mul # Multiply
real = torch.zeros(num_images, 1)
gen_loss_tensor = get_gen_loss(gen, disc, criterion, num_images, z_dim, 'cpu')
assert torch.all(torch.abs(gen_loss_tensor - 1) < 1e-5)
#Verify shape. Related to gen_noise parametrization
assert tuple(gen_loss_tensor.shape) == (num_images, z_dim)
def test_gen_loss(num_images):
z_dim = 64
gen = Generator(z_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator().to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
gen_loss = get_gen_loss(gen, disc, criterion, num_images, z_dim, device)
# Check that the loss is reasonable
assert (gen_loss - 0.7).abs() < 0.1
gen_loss.backward()
old_weight = gen.gen[0][0].weight.clone()
gen_opt.step()
new_weight = gen.gen[0][0].weight
assert not torch.all(torch.eq(old_weight, new_weight))
test_gen_reasonable(10)
test_gen_loss(18)
print("Success!")
# -
# Finally, you can put everything together! For each epoch, you will process the entire dataset in batches. For every batch, you will need to update the discriminator and generator using their loss. Batches are sets of images that will be predicted on before the loss functions are calculated (instead of calculating the loss function after each image). Note that you may see a loss to be greater than 1, this is okay since binary cross entropy loss can be any positive number for a sufficiently confident wrong guess.
#
# It’s also often the case that the discriminator will outperform the generator, especially at the start, because its job is easier. It's important that neither one gets too good (that is, near-perfect accuracy), which would cause the entire model to stop learning. Balancing the two models is actually remarkably hard to do in a standard GAN and something you will see more of in later lectures and assignments.
#
# After you've submitted a working version with the original architecture, feel free to play around with the architecture if you want to see how different architectural choices can lead to better or worse GANs. For example, consider changing the size of the hidden dimension, or making the networks shallower or deeper by changing the number of layers.
#
# <!-- In addition, be warned that this runs very slowly on a CPU. One way to run this more quickly is to use Google Colab:
#
# 1. Download the .ipynb
# 2. Upload it to Google Drive and open it with Google Colab
# 3. Make the runtime type GPU (under “Runtime” -> “Change runtime type” -> Select “GPU” from the dropdown)
# 4. Replace `device = "cpu"` with `device = "cuda"`
# 5. Make sure your `get_noise` function uses the right device -->
#
# But remember, don’t expect anything spectacular: this is only the first lesson. The results will get better with later lessons as you learn methods to help keep your generator and discriminator at similar levels.
# You should roughly expect to see this progression. On a GPU, this should take about 15 seconds per 500 steps, on average, while on CPU it will take roughly 1.5 minutes:
# 
# +
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION:
cur_step = 0
mean_generator_loss = 0
mean_discriminator_loss = 0
test_generator = True # Whether the generator should be tested
gen_loss = False
error = False
for epoch in range(n_epochs):
# Dataloader returns the batches
for real, _ in tqdm(dataloader):
cur_batch_size = len(real)
# Flatten the batch of real images from the dataset
real = real.view(cur_batch_size, -1).to(device)
### Update discriminator ###
# Zero out the gradients before backpropagation
disc_opt.zero_grad()
# Calculate discriminator loss
disc_loss = get_disc_loss(gen, disc, criterion, real, cur_batch_size, z_dim, device)
# Update gradients
disc_loss.backward(retain_graph=True)
# Update optimizer
disc_opt.step()
# For testing purposes, to keep track of the generator weights
if test_generator:
old_generator_weights = gen.gen[0][0].weight.detach().clone()
### Update generator ###
# Hint: This code will look a lot like the discriminator updates!
# These are the steps you will need to complete:
# 1) Zero out the gradients.
# 2) Calculate the generator loss, assigning it to gen_loss.
# 3) Backprop through the generator: update the gradients and optimizer.
#### START CODE HERE ####
#### END CODE HERE ####
# For testing purposes, to check that your code changes the generator weights
if test_generator:
try:
assert lr > 0.0000002 or (gen.gen[0][0].weight.grad.abs().max() < 0.0005 and epoch == 0)
assert torch.any(gen.gen[0][0].weight.detach().clone() != old_generator_weights)
except:
error = True
print("Runtime tests have failed")
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_loss.item() / display_step
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item() / display_step
### Visualization code ###
if cur_step % display_step == 0 and cur_step > 0:
print(f"Epoch {epoch}, step {cur_step}: Generator loss: {mean_generator_loss}, discriminator loss: {mean_discriminator_loss}")
fake_noise = get_noise(cur_batch_size, z_dim, device=device)
fake = gen(fake_noise)
show_tensor_images(fake)
show_tensor_images(real)
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1
| .ipynb_checkpoints/gans_mnist-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/junanote/test_deeplearning/blob/master/Notescale_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="h0hngdm-lCr2"
note_seq = ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4',
'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4',
'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4',
'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4']
# + colab={"base_uri": "https://localhost:8080/"} id="sUbVe5Z5ur3d" outputId="07bc78c2-c44f-4c83-cf68-fdecc2ee0cae"
note_seq[0:5]
# + colab={"base_uri": "https://localhost:8080/"} id="WyoJyu0on4qO" outputId="c036ee29-0d3b-43f2-bea2-3683e533af8b"
note_seq[0:5], note_seq[1:6], note_seq[2:7]
# + id="qj4Wq-xtldr-"
code2idx = {'c4':0, 'd4':1, 'e4':2, 'f4':3, 'g4':4, 'a4':5, 'b4':6,
'c8':7, 'd8':8, 'e8':9, 'f8':10, 'g8':11, 'a8':12, 'b8':13}
# + [markdown] id="raH_tbYISnl0"
#
# + colab={"base_uri": "https://localhost:8080/"} id="iAZhHoxLlg68" outputId="043eeb40-8508-4658-8a25-609e0a3e34eb"
len(note_seq),range(len(note_seq)-5) #[5, 10, 15.....]
# + colab={"base_uri": "https://localhost:8080/"} id="pv-t9fzwUkFs" outputId="97175c7b-df46-4ba4-b8e4-cbd2273641de"
code2idx['g8']
# + id="DdKhVcXgnjTC"
dataset = list()
for i in range(len(note_seq)-5):
subset = note_seq[i:i+5]
items = list()
print(subset)
for item in subset:
#print(code2idx[item])
items.append(code2idx[item])
#print(items)
dataset.append(items)
print(dataset)
# + id="0ofpkAo3V1m6"
import numpy as np
datasets = np.array(dataset)
# + id="kvQTGdLHoyyS" colab={"base_uri": "https://localhost:8080/"} outputId="b379c505-c6cb-4dba-ac3c-f46b992bd0de"
x_train = datasets[:,0:4]
x_train.shape, #x_train
# + colab={"base_uri": "https://localhost:8080/"} id="AVxOfIwRWMR2" outputId="067ddfd5-1cde-4ded-9e6b-c2d4e718e445"
y_train = datasets[:,4]
y_train.shape,
# + colab={"base_uri": "https://localhost:8080/"} id="sz9vt8RPWWC0" outputId="f6a237e0-5da5-4872-dd4c-171cc89c3511"
len(code2idx)
# + colab={"base_uri": "https://localhost:8080/"} id="19SZDjnlXSEI" outputId="d0a561cd-224e-49c2-cf2c-6fefe548880b"
x_train = x_train / 13 #len(code2idx) 정규화를 시킴 (맥시멈값으로 나눈다)
x_train[4]
# + [markdown] id="8bLSykxYXs7B"
# # make model
# + id="sMoaFISIXcW2"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="Z-EfOqqBZIjp" outputId="6eb93fe0-2be8-41d8-84f5-f6c0286b8e9e"
x_train.shape, x_train[2] # --> tonsorfolw type(tensor)(50,4,1) # 순수한 넘파이의 모습 scale
# + colab={"base_uri": "https://localhost:8080/"} id="musvhbyT0xIW" outputId="a838ef49-10df-41bd-df60-ea23faacc781"
X_train = np.reshape(x_train,(-1, 4, 1)) # tensor
X_train.shape, X_train[2]
# + colab={"base_uri": "https://localhost:8080/"} id="7Kc3XyvLgld1" outputId="78c7b40e-a53d-4a21-8f60-b6d47fd58390"
np.unique(y_train) # 악보.. 계명상 11개, code2idx를보며 14
# + id="emJQfDnwX8ef" colab={"base_uri": "https://localhost:8080/"} outputId="9a9820f7-a45b-47ca-be49-6130d6db48cf"
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(4,1)))
model.add(tf.keras.layers.LSTM(128)) # input and hidden layer 타임시리즈(batch_sixe, timesteps, input_dim)-->(rows, cols, 1)
model.add(tf.keras.layers.Dense(13, activation='softmax')) # ouptut layer
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) # gadget
# + colab={"base_uri": "https://localhost:8080/"} id="TuUoA21mi0EO" outputId="7690a0d2-bca0-4cd1-e768-3cec27d74299"
hist = model.fit(X_train, y_train, epochs=100, batch_size=5)
# + [markdown] id="M53Nq-LPlqNY"
# # evaluate
# + colab={"base_uri": "https://localhost:8080/"} id="sGPSHcZEjzDp" outputId="d84d3c38-5189-4d9b-c122-ceb3547728ac"
model.evaluate(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="bov-4aOgryyo" outputId="7faca0b0-fd1b-47b8-f0f2-ebcd108f0f7d"
X_train[0:1]
# + id="8AaPDWSvr67Z"
first = 0.84615385
second = 0.69230769
third = 0.15384615
fourth = 0.76923077
# + colab={"base_uri": "https://localhost:8080/"} id="nDXhO0kArgiD" outputId="dee264a8-0a65-4a1f-8377-2e3cee230bad"
pred = model.predict([[[first],[second],[third],[fourth]]])
pred
# + id="23DKKbhqteUX"
pred = model.predict(X_train[0:1])
# + colab={"base_uri": "https://localhost:8080/"} id="l_QT9FKOr5R2" outputId="d5254663-dd09-4464-87ab-c16f79bc0df6"
np.argmax(pred)
# + id="epcqwAk21dQ9"
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="W5dre1D-4_3h" outputId="4a48f2ce-2469-4204-de18-ec5cee2a9791"
plt.plot(hist.history['acc'])
plt.plot(hist.history['loss'],'r-')
plt.show()
# + id="Y0fFFsWK5B0M"
# + id="ojGLA1Ag5EPk"
| Notescale_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0m2JWFliFfKT" colab_type="code" colab={}
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# + id="h_Cx9q2QFgM7" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1) #input -? OUtput? RF
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.conv4 = nn.Conv2d(128, 256, 3, padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv5 = nn.Conv2d(256, 512, 3)
self.conv6 = nn.Conv2d(512, 1024, 3)
self.conv7 = nn.Conv2d(1024, 10, 3)
def forward(self, x):
x = self.pool1(F.relu(self.conv2(F.relu(self.conv1(x)))))
x = self.pool2(F.relu(self.conv4(F.relu(self.conv3(x)))))
x = F.relu(self.conv6(F.relu(self.conv5(x))))
x = F.relu(self.conv7(x))
x = x.view(-1, 10)
return F.log_softmax(x)
# + colab_type="code" id="xdydjYTZFyi3" colab={}
# !pip install torchsummary
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = Net().to(device)
summary(model, input_size=(1, 28, 28))
# + id="DqTWLaM5GHgH" colab_type="code" colab={}
torch.manual_seed(1)
batch_size = 128
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
# + id="8fDefDhaFlwH" colab_type="code" colab={}
from tqdm import tqdm
def train(model, device, train_loader, optimizer, epoch):
model.train()
pbar = tqdm(train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
pbar.set_description(desc= f'loss={loss.item()} batch_id={batch_idx}')
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# + id="MMWbLWO6FuHb" colab_type="code" colab={}
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for epoch in range(1, 2):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
# + id="So5uk4EkHW6R" colab_type="code" colab={}
| Session 2/Copy of EVA4 - Session 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Polynomial Regression
# ## Libraries are helpful
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
# ## Importing Data
# Path of the file to read
data_path = '../path/to/data.csv'
the_data = pd.read_csv(data_path)
test_data_path = '../path/to/test.csv'
test_data = pd.read_csv(test_data_path)
# ## Checking initial column names
the_data.columns
# ## Checking the first few rows of the data
the_data.head()
# ## Selecting the target variable
y = the_data["Target"]
# ## Setting up the feature list that are used to predict the target
# +
# Create the list of features below
feature_names = ["Feature_1","Feature_2","Feature_3"]
# Select data corresponding to features in feature_names
X = the_data[feature_names]
# -
# ## Reviewing the features
# +
# print description or statistics from X
print(X.describe())
# print the top few lines
print(X.head())
# -
# ## Spliting up the model into training and validation
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 7)
# ## Making the model
# +
# Creating the polynomial model
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
model = LinearRegression(random_state=7)
# Fit the model
model.fit(train_X, train_y)
# -
# ## Making predictions and checking how well the model did
val_predictions = model.predict(val_X)
#print(val_predictions[0:20])
print(mean_absolute_error(val_y, val_predictions))
# ## Making test predicitons and the output dataframe
predictions = model.predict(X_test)
output = pd.DataFrame({"ID": test_data.ID, "Target": predictions})
# +
## Visualizing the linear regression and the data
# -
import matplotlib.pyplot as plt
plt.scatter(X, y, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Title of Plot')
plt.xlabel('Independent variable')
plt.ylabel('Dependent variable')
plt.show()
# # References
# #### Machine Learning A-Z™: Hands-On Python & R In Data Science on Udemy: https://www.udemy.com/course/machinelearning/
#
| Regressions/polinomial-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
"""extract_faces.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1n94ly5DXYIAb4UqV0DOZn9QKlqKTH1-y
"""
from cv2 import imread
from cv2 import waitKey
from cv2 import destroyAllWindows
from cv2 import CascadeClassifier
from cv2 import rectangle
import cv2
import cv2
import numpy as np
# load the pre-trained model
classifier = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# load the photograph
pixels = cv2.imread("photo.jpg")
recs = classifier.detectMultiScale(pixels)
print( dict(np.ndenumerate(recs)))
# -
ret = {}
cnt = 0
for box in bboxes:
ret['box'+str(cnt)] = box
cnt+=1
print(ret)
| api/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="j96UhgkICKpl" executionInfo={"status": "ok", "timestamp": 1641234337571, "user_tz": -180, "elapsed": 295, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
from sklearn import datasets, metrics
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="PplxFVMzCdGl" executionInfo={"status": "ok", "timestamp": 1641234338768, "user_tz": -180, "elapsed": 9, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="a69e185d-507d-43db-ef71-1ba96a451e95"
heart_disease = pd.read_excel('Processed_Cleveland.xlsx')
heart_disease
# + id="h6Rd2naoClaZ" executionInfo={"status": "ok", "timestamp": 1641234340243, "user_tz": -180, "elapsed": 4, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}}
# Define the features and the outcome
X = heart_disease.iloc[:, :13]
y = heart_disease.iloc[:, 13]
# Replace missing values (marked by ?) with a 0
X = X.replace(to_replace='?', value=0)
# Binarize y so that 1 means heart disease diagnosis and 0 means no diagnosis
y = np.where(y > 0, 0, 1)
# + id="sK9EyjZ8DLpw" executionInfo={"status": "ok", "timestamp": 1641234341743, "user_tz": -180, "elapsed": 3, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}}
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
# + [markdown] id="CAiUNl2PEuU-"
# # Linkages
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="FVg2JD5AE5xt" executionInfo={"status": "ok", "timestamp": 1641234352179, "user_tz": -180, "elapsed": 7097, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="490ec62c-efc4-4867-9b31-91a7c2ca8437"
# Average Linkage
plt.figure(figsize=(25,10))
dendrogram(linkage(X_std, method='average'))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="LETE4IweD4bo" executionInfo={"status": "ok", "timestamp": 1641234359174, "user_tz": -180, "elapsed": 7000, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="39ec172e-f880-4443-ced0-f13111eab5ef"
# Complete Linkage
plt.figure(figsize=(25,10))
dendrogram(linkage(X_std, method='complete'))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="YwrUNc-dEL-J" executionInfo={"status": "ok", "timestamp": 1641234365554, "user_tz": -180, "elapsed": 6487, "user": {"displayName": "<NAME>00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="2f7671ea-0d13-4d93-a10f-ab079bd463a5"
# Ward Linkage
plt.figure(figsize=(25,10))
dendrogram(linkage(X_std, method='ward'))
plt.show()
# + [markdown] id="36OayY7gSyeo"
# The ward linkage seems to be the best linkage, but anyway, we'll check the metrics below.
# + [markdown] id="WKgBCPVTGBEg"
# # ARI & Silhouette Scores
# + [markdown] id="cQuea7K8RQaJ"
# Recall that for 2 clusters in the previous lesson we had;
#
#
# * ARI score:
# 0.7453081340344547
# * Silhouette Score:
# 0.1757847117726187
#
# + [markdown] id="BwsZquWNOpL8"
# **For complete linkage**
# + id="8n62Ejn4De_g" executionInfo={"status": "ok", "timestamp": 1641235941586, "user_tz": -180, "elapsed": 307, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}}
# Yığınsal Kümelemenin Tanımlanması
agg_küme = AgglomerativeClustering(linkage='complete',
affinity='cosine',
n_clusters=2)
# Modeli Fit Etmek
kümeler = agg_küme.fit_predict(X_std)
# + colab={"base_uri": "https://localhost:8080/"} id="Zyo4bOi7G4eu" executionInfo={"status": "ok", "timestamp": 1641235942793, "user_tz": -180, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="f4829b6d-d477-41ff-b3b7-ea2684cc28d0"
print("Yığınsal Kümeleme Sonuçlarının Ayarlanmış Rand Endeksi: {}"
.format(metrics.adjusted_rand_score(y, kümeler)))
print("The silhoutte score of the Agglomerative Clustering solution: {}"
.format(metrics.silhouette_score(X_std, kümeler, metric='cosine')))
# + [markdown] id="dpexJF_PRoCg"
# ARI score is lower, whereas silhouette score is greater now.
# + colab={"base_uri": "https://localhost:8080/", "height": 796} id="x7WSA01QDglT" executionInfo={"status": "ok", "timestamp": 1641234455969, "user_tz": -180, "elapsed": 2593, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="f5bc252d-7c27-4f8a-ddf9-e085ee44f32d"
pca = PCA(n_components=2).fit_transform(X_std)
plt.figure(figsize=(10,5))
colours = 'rbg'
for i in range(pca.shape[0]):
plt.text(pca[i, 0], pca[i, 1], str(kümeler[i]),
color=colours[y[i]],
fontdict={'weight': 'bold', 'size': 50}
)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.show()
# + [markdown] id="PNX5iYPvOu8S"
# **For average linkage**
# + executionInfo={"status": "ok", "timestamp": 1641235950317, "user_tz": -180, "elapsed": 278, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} id="4OVsbHD7NzbV"
# Yığınsal Kümelemenin Tanımlanması
agg_küme = AgglomerativeClustering(linkage='average',
affinity='cosine',
n_clusters=2)
# Modeli Fit Etmek
kümeler = agg_küme.fit_predict(X_std)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641235951745, "user_tz": -180, "elapsed": 3, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="34953cc8-8ed4-4af0-ab54-fe34ec4dc0d4" id="UhlsdNsANzbW"
print("Yığınsal Kümeleme Sonuçlarının Ayarlanmış Rand Endeksi: {}"
.format(metrics.adjusted_rand_score(y, kümeler)))
print("The silhoutte score of the Agglomerative Clustering solution: {}"
.format(metrics.silhouette_score(X_std, kümeler, metric='cosine')))
# + [markdown] id="iIwOc0DCRyM0"
# ARI score is lower, whereas silhouette score is greater now.
# Also, this looks like the best linkage method among 3.
# + colab={"base_uri": "https://localhost:8080/", "height": 796} executionInfo={"status": "ok", "timestamp": 1641234787790, "user_tz": -180, "elapsed": 2413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="4fc3c71c-0d2f-4c9d-e604-4c33463e4d2e" id="daSrSLheNzbV"
pca = PCA(n_components=2).fit_transform(X_std)
plt.figure(figsize=(10,5))
colours = 'rbg'
for i in range(pca.shape[0]):
plt.text(pca[i, 0], pca[i, 1], str(kümeler[i]),
color=colours[y[i]],
fontdict={'weight': 'bold', 'size': 50}
)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.show()
# + [markdown] id="TIbal-CJOyjr"
# **For ward linkage**
# + executionInfo={"status": "ok", "timestamp": 1641235955196, "user_tz": -180, "elapsed": 339, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} id="lQySjq1bN4Sy"
# Yığınsal Kümelemenin Tanımlanması
agg_küme = AgglomerativeClustering(linkage='ward',
affinity='euclidean',
n_clusters=2)
# Modeli Fit Etmek
kümeler = agg_küme.fit_predict(X_std)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641235961629, "user_tz": -180, "elapsed": 266, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="01bfc52e-5e40-4057-efda-d1ec8c61fa76" id="jSLFdntIN4Sz"
print("Yığınsal Kümeleme Sonuçlarının Ayarlanmış Rand Endeksi: {}"
.format(metrics.adjusted_rand_score(y, kümeler)))
print("The silhoutte score of the Agglomerative Clustering solution: {}"
.format(metrics.silhouette_score(X_std, kümeler, metric='euclidean')))
# + [markdown] id="TeUFX9MsHNo1"
# Both scores are lesser now.
# + colab={"base_uri": "https://localhost:8080/", "height": 796} executionInfo={"status": "ok", "timestamp": 1641234820973, "user_tz": -180, "elapsed": 2541, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="194afb1a-cb01-4e9e-9405-10b60bec00b0" id="ObK2gTXGN4Sy"
pca = PCA(n_components=2).fit_transform(X_std)
plt.figure(figsize=(10,5))
colours = 'rbg'
for i in range(pca.shape[0]):
plt.text(pca[i, 0], pca[i, 1], str(kümeler[i]),
color=colours[y[i]],
fontdict={'weight': 'bold', 'size': 50}
)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.show()
# + [markdown] id="Mtc3lM4-S9sW"
# # Conclusion
# + [markdown] id="G5djj3F2TBSE"
# The metrics are telling us that the average linkage is the best among all 3.
# However, its ARI score is pretty bad still, but its silhouette score is greater than K-means.
| Clustering Assignments/Hierarchical Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, numpy as np
import pandas as pd
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from sklearn import datasets
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
keras.backend.set_floatx('float64')
from keras.utils import np_utils
def set_reproducible():
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '1960'
rn.seed(1960)
np.random.seed(1960)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1960)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
set_reproducible()
# -
# # Build a Keras Model
# +
def create_model():
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.utils import np_utils
model = Sequential()
model.add(Dense(5, input_shape=(4,) , activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
# -
iris = datasets.load_iris()
train_X, test_X, train_y, test_y = train_test_split(iris.data, iris.target, train_size=0.8, test_size=0.2, random_state=1960)
# +
from keras.wrappers.scikit_learn import KerasClassifier
clf = KerasClassifier(build_fn=create_model, epochs=12, verbose=0)
print(train_X.shape , train_y.shape)
clf.fit(train_X, train_y, verbose=0)
# -
print(clf.model.__dict__)
print(test_X.shape)
preds = clf.predict(test_X[0,:].reshape(1,4))
print(preds)
# # Generate SQL Code from the Model
# +
import json, requests, base64, dill as pickle, sys
sys.setrecursionlimit(200000)
pickle.settings['recurse'] = False
def test_ws_sql_gen(pickle_data):
WS_URL="https://sklearn2sql.herokuapp.com/model"
b64_data = base64.b64encode(pickle_data).decode('utf-8')
data={"Name":"model1", "PickleData":b64_data , "SQLDialect":"postgresql"}
r = requests.post(WS_URL, json=data)
# print(r.__dict__)
content = r.json()
# print(content)
lSQL = content["model"]["SQLGenrationResult"][0]["SQL"]
return lSQL;
# -
# commented .. see above
pickle_data = pickle.dumps(clf)
lSQL = test_ws_sql_gen(pickle_data)
# print(lSQL[0:2000])
print(lSQL)
# # Execute the SQL Code
# +
# save the dataset in a database table
import sqlalchemy as sa
#engine = sa.create_engine('sqlite://' , echo=False)
engine = sa.create_engine("postgresql://db:db@localhost/db?port=5432", echo=False)
conn = engine.connect()
lTable = pd.DataFrame(iris.data);
lTable.columns = ['Feature_0', 'Feature_1', 'Feature_2', 'Feature_3']
lTable['TGT'] = iris.target
lTable['KEY'] = range(iris.data.shape[0])
lTable.to_sql("INPUT_DATA" , conn, if_exists='replace', index=False)
# -
sql_output = pd.read_sql(lSQL , conn);
sql_output = sql_output.sort_values(by='KEY').reset_index(drop=True)
conn.close()
sql_output.sample(12, random_state=1960)
# # Keras Prediction
keras_output = pd.DataFrame()
keras_output_key = pd.DataFrame(list(range(iris.data.shape[0])), columns=['KEY']);
keras_output_score = pd.DataFrame(columns=['Score_0', 'Score_1', 'Score_2']);
keras_output_proba = pd.DataFrame(clf.predict_proba(iris.data), columns=['Proba_0', 'Proba_1', 'Proba_2'])
keras_output = pd.concat([keras_output_key, keras_output_score, keras_output_proba] , axis=1)
for class_label in [0, 1, 2]:
keras_output['LogProba_' + str(class_label)] = np.log(keras_output_proba['Proba_' + str(class_label)])
keras_output['Decision'] = clf.predict(iris.data)
keras_output.sample(12, random_state=1960)
# + [markdown] format="column"
# # Comparing the SQL and Keras Predictions
# -
sql_keras_join = keras_output.join(sql_output , how='left', on='KEY', lsuffix='_keras', rsuffix='_sql')
sql_keras_join.head(12)
condition = (sql_keras_join.Decision_sql != sql_keras_join.Decision_keras)
sql_keras_join[condition]
| doc/keras_iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
str="ENTER"
str.lower()
str1="learning"
str1[::-1]
str1="learning the coding is great fun"
str1[2:10:-1]
str="hsdnfmfn"
str.capitalize()
str="jhfdsn ghdgh"
str.count(str)
str=" hgjghkfhji hvikfghky "
str.strip()
str.lstrip()
str.rstrip()
str="Learning the coding is great fun."
str.startswith("learning")
str="Learning the coding is great fun."
str.startswith("learning")
str="Learning the coding is great fun."
str.startswith("arn",2,5)
str="Learning the coding is great fun."
str.startswith("coding",13)
# startswith take the arguments that they check the string that we provide it is start at the same index or
# position if yes then they provide us the trues or other wise the they provide us the false.
# # split() methosd is use to divide the a single string into multiple small list by default on the base of the space other wese you can use the sep="i", to split the string that character or the compotent of the string.
str="Learning the coding is the great Fun."
str1=str.split()
print(str1)
str="Learning the coding is the great Fun."
str1=str.split(sep="i")
print(str1)
mylist=["Learning","the","coding","is","Great","Fun"]
# the empty string the separator or you can use other items just put inside the empty string.
str=" ".join(mylist)
print(mylist)
print(str)
mylist=["Learning","the","coding","is","Great","Fun"]
# the empty string the separator or you can use other items just put inside the empty string.
str="_".join(mylist)
print(mylist)
print(str)
str1="learning the codeing is great fun"
str1[0:len(str1)]#slicing
str="learning"
help(slice)
str="learning the coding is great fun"
str[::1]
str="LearningTheCodingIsGreatFun"
str.find("Coding")#this will return the first index of the coding onwhich places they find it
str="LearningTheCodingIsGreatFun"
str.find("Coding",2)#Second Arguments starts searching from that index # Case Sensitive
str="LearningTheCodingIsGreatFun"
str.find("Coding",2,17)#thrid argument stops searching uptill that index.
print("hello".replace("e","a"))
str="Learning the coding is great fun."
newstr=str.replace("coding","Quran")
print(str)
print(newstr)
age=23; name="<NAME>"
print("Mr. {} you are {} years old.".format(name,age))#format replace the braces with the arguments that
# you provides hem
age=23; name="<NAME>";student="BSIT";salary="none"
# print("Mr. {} you are {} years old.".format(name,age))
newstr="Mr. {1} you studing in class of {2} in the age of {0} years old and have the salary of {3}".format(age,name,student,salary)
print(newstr)
a="hello";b="hello"
print(a==b);# the double equal check the contents of the two string.
print(a is b);# this "is" will check the memory address of the strings.
a="hello";b="bye"
print(a is not b);print(a!=b);#Here the (is not) check the memory addres and the (!=) check the contents
# of the two string
#String Membership test using the (in) operator.
str="Learning the coding is great Fun."
new="a"in str
print(new)
#String Membership test using the (in) operator.
str="Learning the coding is great Fun."
new="a"not in str
print(new)
str="python is interpreted language"
str[-2:-9:-2]
str="0123456789"
str[-2:-9:-2]
| String,strings Methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import copy
df_old = pd.read_excel('chen_shalev_labeld_row_data.xlsx')
df = df_old[:]
# # time_differences
# Isolating relevant columns - first and last entry dates
df_entry_dates = df[['תאריך כניסה ראשונית לתוכנית', 'תאריך השמה אחרונה','תאריך דיווח השמה אחרונה']]
# all the rows with df['תאריך השמה אחרונה'] == None also df['תאריך דיווח השמה אחרונה']== None
df.loc[df['תאריך השמה אחרונה'].astype(str)=='NaT','תאריך דיווח השמה אחרונה'].unique()
# +
## calculating the time differnec between first to last entry date
df_entry_dates.loc[:,'days_diff'] = df['תאריך השמה אחרונה'] - df_entry_dates['תאריך כניסה ראשונית לתוכנית']
## converting "delta time" column type to integer
df.loc[:,'days_diff'] = df_entry_dates['days_diff'].dt.days
# -
# # Last match score
#
df = df.rename(columns = {'ציון התאמה אחרון':'last_match_score'})
df['last_match_score'] = round(df['last_match_score'] /10)
# # unemployment_depth_months
df = df.rename(columns = {'עומק אבטלה בחודשים':'unemployment_depth_months'})
df['unemployment_depth_months'] = round(df['unemployment_depth_months'] /100)
# # age
df = df.rename(columns = {'גיל':'age'})
df.loc[df['age'].between(18, 29) ,'age'] = 1829
df.loc[df['age'].between(30, 39) ,'age'] = 3039
df.loc[df['age'].between(40, 49) ,'age'] = 4049
df.loc[df['age'].between(50, 54) ,'age'] = 5054
df.loc[df['age'].between(55, 120),'age'] = 55120
df['age'] = 'age_' + df['age'].astype(str)
# # education
df = df.rename(columns = {'רמת השכלה':'education'})
df.loc[df['education'].str.contains('יסודי חלקי'),'education'] = 'יסודי'
df.loc[df['education'].str.contains('תעודת הוראה'),'education'] = 'לימודי תעודה'
df.loc[df['education'].str.contains('הנדסאי'),'education'] = 'לימודי תעודה'
df.loc[df['education'].str.contains('טכנאי'),'education'] = 'לימודי תעודה'
df.loc[df['education'].str.contains('תעודת מקצוע'),'education'] = 'לימודי תעודה'
# # languages
df = df.rename(columns = {'שפות':'languages'})
df['languages'] = df['languages'].str.replace('שפת אם','')\
.str.replace('גבוהה','')\
.str.replace('בינונית','')\
.str.replace('בסיסית','')\
.str.replace('-','')\
.str.replace('-','')\
.str.replace(' ','')
# # religious
df = df.rename(columns = {'דת':'religious'})
df.loc[df['religious'].str.contains('נוצרי'),'religious'] = 'נוצרי'
df.loc[df['religious'].str.contains('מוסלמי'),'religious'] = 'מוסלמי'
df.loc[df['religious'].str.contains('ללא דת|נוצרי|מוסלמי|יהודי|דרוזי')==False , 'religious'] = 'other'
print(df['religious'].unique())
# # programs
df = df.rename(columns = {'פעילויות שעבר בתכנית':'programs'})
df_programs = copy.copy(df[['programs']])
programs = df_programs['programs']
# +
programs_list = [ 'חיפוש עבודה מונחה'
,'סדנת מכינה השמתית מעגלי תעסוקה'
,'ייעוץ תעסוקתי'
,'סדנת מכינה תהליכית מעגלי תעסוקה'
,'סדנת יישומי מחשב מעגלי תעסוקה'
,'שיחת אימון אישי'
,'ייעוץ פסיכולוגי'
,'סדנת ליווי בקבוצה קטנה'
,'עברית תעסוקתית'
,'זימון למנהל'
,'סדנת מכינה השמתית'
,'סדנת שינוי'
,'סדנת מכינה מחשבים מעגלי תעסוקה'
,'סדנת מכינה מעגלי תעסוקה'
,'יישומי מחשב מעגלי תעסוקה'
,'סדנת השמה פלוס מעגלי תעסוקה'
,'סדנת מכינה תהליכית'
,'סדנת מכינת השמתית מעגלי תעסוקה'
,'סדנת השמה'
,'מכינה תהליכית מעגלי תעסוקה'
,'סדנת תהליך'
,'סדנת מכינה מעורב מעגלי תעסוקה'
,'סדנת השמה מעגלי תעסוקה'
,'סדנת רכבת מעגלי תעסוקה'
,'סדנת תהליך מעגלי תעסוקה'
,'קורס יישומי מחשב'
,'יישומי מחשב מעגלי תעסוקה'
,'קורס יישומי מחשב'
,'סדנת שינוי מעגלי תעסוקה'
,'מכינה השמתית'
,]
# -
l =[]
for i in programs:
p = ''
for x in programs_list:
if x in str(i):
p = p + x + ','
if p == '':
p= None
l.append(p)
df_programs['programs'] = l
prog = df_programs['programs'].str.get_dummies(sep=',')
prog.columns = ['programs_' + str(x).replace('.',' ') for x in prog.columns]
pn = prog.sum(axis=1).to_frame()
pn.columns = ['number_of_programs']
program_summary = pd.concat([prog,pn],axis=1)
l = list(df.columns)
index = l.index('programs')
df = df.rename(columns = {'מגדר':'gender',
'ארץ לידה':'origin',
'חד הורי':'single_parent',
'ילדים עד גיל 18':'childrens',
'אחוזי נכות':'disability',
'מגבלה רפואית':'medical_limitation',
'רשיונות נהיגה':'licenses',
'שירות צבאי':'military_service',
'תדירות_התייצבות':'frequency_stabilization',
'':''})
parameters = [
'gender','age','religious','languages',
'origin','education','military_service',
'unemployment_depth_months','last_match_score',
'medical_limitation','single_parent',
'childrens','disability','licenses','days_diff',
'label',]
DF = pd.concat([program_summary,df.loc[:,parameters]],axis = 1)
DF.head()
DF.to_csv('df_for_desicion_tree_xlsx.csv',index=False)
DF = pd.read_csv('df_for_desicion_tree_xlsx.csv')
DF.describe()
| step_5_b_a_prepering_data_for_decision_tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Representation
# ## Lab 3: Web Scraping
# ### Lecturer: <NAME>
#
# 8. We want to write this to a CSV file for that we will need the csv package, lets test it. Write a file called PY04-testCSV.py
#
import csv
employee_file = open('employee_file.csv', mode='w')
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
employee_writer.writerow(['<NAME>', 'Accounting', 'November'])
employee_writer.writerow(['<NAME>', 'IT', 'March'])
employee_file.close()
# Look at the directory and check if an employee_file.csv was made
#
# ## References
#
# - Andrew Beatty course material
| labs/week03-webScraping/PY04-testCSV.py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
df = pd.read_csv("D:\Program Files\EnerjiSA_DS_Bootcamp\HW2/train.csv")
df.head(10)
df.info()
df[['MSSubClass', 'OverallQual', 'OverallCond', 'MoSold', 'YrSold']] = df[['MSSubClass', 'OverallQual', 'OverallCond', 'MoSold', 'YrSold']].astype('category')
for col in df.columns :
if df[col].dtype == 'object' :
df[col] = df[col].astype('category')
df.info()
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
df=df.drop(['Id','PoolQC','MiscFeature','Alley','Fence','FireplaceQu'], axis=1)
df['YearRemodAdd'] = 2010 - df['YearRemodAdd']
df = df.rename(columns={'YearRemodAdd': 'BuiltAge'})
sns.distplot(df['BuiltAge'])
# +
df_historical = df [ df ['BuiltAge'] > 50]
df_old2 = df [ (df ['BuiltAge'] <= 50) & (20 < df ['BuiltAge'])]
df_old1 = df [ (df ['BuiltAge'] <= 20) & (10 < df ['BuiltAge'])]
df_middle = df [ (df ['BuiltAge'] <= 10) & (5 < df ['BuiltAge'])]
df_new = df [ (df ['BuiltAge'] <= 5) & (0 <= df ['BuiltAge'])]
a = df_historical.SalePrice.mean()
b = df_old2.SalePrice.mean()
c = df_old1.SalePrice.mean()
d = df_middle.SalePrice.mean()
e = df_new.SalePrice.mean()
dict ={ "historical" : [a] , "old2" : [b] , "old1" : [c] , "middle" : [d] , "new" : [e]}
builtAge = pd.DataFrame(dict).T
builtAge = builtAge.rename(columns = {0 :"SalePrice"})
sns.lineplot(data = builtAge)
# -
sns.boxplot(data = df , x= 'BuiltAge' )
stats.spearmanr(df['BuiltAge'],df['SalePrice'])
sns.scatterplot(data = df_historical , x= 'BuiltAge' , y ='SalePrice')
sns.scatterplot(data = df_old2 , x= 'BuiltAge' , y ='SalePrice')
sns.scatterplot(data = df_old1 , x= 'BuiltAge' , y ='SalePrice')
sns.scatterplot(data = df_middle , x= 'BuiltAge' , y ='SalePrice')
sns.scatterplot(data = df_new , x= 'BuiltAge' , y ='SalePrice')
sns.scatterplot(data = df , x= 'BuiltAge' , y ='SalePrice')
sns.distplot(df["SalePrice"])
sns.pairplot(df[["YearBuilt","SalePrice"]])
df_new = df.iloc[ : , 1:-1]
df_new.head()
for col in df_new.columns :
spearman_rank_coeff1 = stats.spearmanr(df[col],df['SalePrice'])
if spearman_rank_coeff1[0] > 0.6 :
print("Corr value between " + str(col) + " - SalePrice is : " + str(spearman_rank_coeff1))
for col in df_new.columns :
spearman_rank_coeff1 = stats.spearmanr(df[col],df['SalePrice'])
if 0.4 <= spearman_rank_coeff1[0] <= 0.6 :
print("Corr value between " + str(col) + " - SalePrice is : " + str(spearman_rank_coeff1))
for col in df_new.columns :
spearman_rank_coeff1 = stats.spearmanr(df[col],df['SalePrice'])
if -0.4 <= spearman_rank_coeff1[0] < 0.4 :
print("Corr value between " + str(col) + " - SalePrice is : " + str(spearman_rank_coeff1))
for col in df_new.columns :
spearman_rank_coeff1 = stats.spearmanr(df[col],df['SalePrice'])
if spearman_rank_coeff1[0] < - 0.4 :
print("Corr value between " + str(col) + " - SalePrice is : " + str(spearman_rank_coeff1))
# +
corrs = []
for col in df_new.columns :
corr = spearman_rank_coeff1 = stats.spearmanr(df[col],df['SalePrice'])[0]
corrs.append(corr)
corr_df = pd.DataFrame(corrs)
sns.distplot(corr_df)
# -
df_corr = df.corr()
plt.figure(figsize=(20,10))
sns.heatmap(df_corr,annot=True,vmin=-1,vmax=1,cmap='coolwarm')
X_train = df.select_dtypes(include=['number']).copy()
X_train = X_train.drop(['SalePrice'], axis=1)
y_train = df["SalePrice"]
# fill in any missing data with the mean value
X_train = X_train.fillna(X_train.mean())
print(X_train.shape[1])
std = np.std(X_train, axis=0)
X_train /= std
from sklearn import linear_model
regressor = linear_model.Lasso(alpha=100,
positive=True,
fit_intercept=False,
max_iter=1000,
tol=0.0001)
regressor.fit(X_train, y_train)
import eli5
eli5.show_weights(regressor, top=-1, feature_names = X_train.columns.tolist())
# +
from xgboost import XGBRegressor
from matplotlib import pyplot
model_list = []
# define the model
model = XGBRegressor()
# fit the model
model.fit(X_train, y_train)
# get importance
importance = model.feature_importances_
# summarize feature importance
for i,v in enumerate(importance):
print((i,v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
# -
| HW2/data_preprocessing_houseprice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Variation(변분법)
# ## functional (범함수)
# > ## domain -> functions
# >> # rarnge -> functions
# > ## variation of calculus
# >> ## differential of funcional(변분)
# >>> ## cf) differential(미분) of value of function
# >>> ## cf) differnce(차분) of indipendent variable
import sympy as sm
list(sm.utilities.iterables.cartes([1,2,3],'ab'))
list(sm.utilities.iterables.variations([1,2,3],3,repeat=True))
sm.utilities.iterables.flatten([[(1,2)],[(3,4)],[(5,6)]],levels=1)
sm.utilities.iterables.group([1,1,2,2,2,3,1,2,3])
sm.utilities.iterables.group([1,1,1,2,2,2,2,3,3,1,2,3],multiple=False)
sm.utilities.iterables.multiset([1,1,1,2,2,2,2,3,3,1,2,3])
sm.utilities.iterables.has_dups([1,2,3,2])
set()
dict()
sm.Set()
sm.Dict()
# +
import itertools
list(itertools.permutations(range(3)))
list(itertools.combinations(range(3),2))
list(sm.utilities.iterables.multiset_combinations(range(3),2))
list(sm.utilities.iterables.multiset_permutations(range(3),2))
list(sm.utilities.iterables.multiset_partitions(range(4),2))
list(sm.utilities.iterables.subsets([1,2]))
# -
| python/Vectors/iterables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Task 1 create a soup with bs4 and load the data to work later on it
from bs4 import BeautifulSoup
import pandas as pd
fd = open("List of countries by GDP (nominal) - Wikipedia.htm", "r")
soup = BeautifulSoup(fd)
fd.close()
# # Task 2 How many tables are there?
all_tables = soup.find_all("table")
print("Total number of tables are {} ".format(len(all_tables)))
# # Task 3 find the right table using the class attribute
data_table = soup.find("table", {"class": '"wikitable"|}'})
print(type(data_table))
# # Task 4 Let's separate the source and the actual data
sources = data_table.tbody.findAll('tr', recursive=False)[0]
sources_list = [td for td in sources.findAll('td')]
print(len(sources_list))
data = data_table.tbody.findAll('tr', recursive=False)[1].findAll('td', recursive=False)
data_tables = []
for td in data:
data_tables.append(td.findAll('table'))
len(data_tables)
# # Task 5 Checking how to get the source names
source_names = [source.findAll('a')[0].getText() for source in sources_list]
print(source_names)
# # Task 6 Seperate the header and data for the first source
header1 = [th.getText().strip() for th in data_tables[0][0].findAll('thead')[0].findAll('th')]
header1
rows1 = data_tables[0][0].findAll('tbody')[0].findAll('tr')[1:]
data_rows1 = [[td.get_text().strip() for td in tr.findAll('td')] for tr in rows1]
df1 = pd.DataFrame(data_rows1, columns=header1)
df1.head()
# # Task 7 Do the same for the other two sources
header2 = [th.getText().strip() for th in data_tables[1][0].findAll('thead')[0].findAll('th')]
header2
rows2 = data_tables[1][0].findAll('tbody')[0].findAll('tr')[1:]
def find_right_text(i, td):
if i == 0:
return td.getText().strip()
elif i == 1:
return td.getText().strip()
else:
index = td.text.find("♠")
return td.text[index+1:].strip()
data_rows2 = [[find_right_text(i, td) for i, td in enumerate(tr.findAll('td'))] for tr in rows2]
df2 = pd.DataFrame(data_rows2, columns=header2)
df2.head()
# Now for the third one
header3 = [th.getText().strip() for th in data_tables[2][0].findAll('thead')[0].findAll('th')]
header3
rows3 = data_tables[2][0].findAll('tbody')[0].findAll('tr')[1:]
data_rows3 = [[find_right_text(i, td) for i, td in enumerate(tr.findAll('td'))] for tr in rows2]
df3 = pd.DataFrame(data_rows3, columns=header3)
df3.head()
| Lesson05/Activity07/Activity07_ReadingTabularData_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>
#
# In this notebook, I'll build a character-wise RNN trained on <NAME>, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
#
# This network is based off of <NAME>'s [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [<NAME>](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
#
# <img src="assets/charseq.jpeg" width="500">
# +
import time
import numpy as np
# -
# First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
# Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
text[:100]
# And we can see the characters encoded as integers.
encoded[:100]
# Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
len(vocab)
# ## Making training mini-batches
#
# Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
#
# <img src="assets/sequence_batching@1x.png" width=500px>
#
#
# <br>
#
# We start with our text encoded as integers in one long array in `encoded`. Let's create a function that will give us an iterator for our batches. I like using [generator functions](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/) to do this. Then we can pass `encoded` into this function and get our batch generator.
#
# The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the total number of batches, $K$, we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
#
# After that, we need to split `arr` into $N$ sequences. You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (`batch_size` below), let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
#
# Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `n_steps`. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character.
#
# The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of steps in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `n_steps` wide.
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
chars_per_batch = batch_size * n_steps
n_batches = len(arr)//chars_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches * chars_per_batch]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y_temp = arr[:, n+1:n+n_steps+1]
# For the very last batch, y will be one character short at the end of
# the sequences which breaks things. To get around this, I'll make an
# array of the appropriate size first, of all zeros, then add the targets.
# This will introduce a small artifact in the last batch, but it won't matter.
y = np.zeros(x.shape, dtype=x.dtype)
y[:,:y_temp.shape[1]] = y_temp
yield x, y
# Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
# If you implemented `get_batches` correctly, the above output should look something like
# ```
# x
# [[55 63 69 22 6 76 45 5 16 35]
# [ 5 69 1 5 12 52 6 5 56 52]
# [48 29 12 61 35 35 8 64 76 78]
# [12 5 24 39 45 29 12 56 5 63]
# [ 5 29 6 5 29 78 28 5 78 29]
# [ 5 13 6 5 36 69 78 35 52 12]
# [63 76 12 5 18 52 1 76 5 58]
# [34 5 73 39 6 5 12 52 36 5]
# [ 6 5 29 78 12 79 6 61 5 59]
# [ 5 78 69 29 24 5 6 52 5 63]]
#
# y
# [[63 69 22 6 76 45 5 16 35 35]
# [69 1 5 12 52 6 5 56 52 29]
# [29 12 61 35 35 8 64 76 78 28]
# [ 5 24 39 45 29 12 56 5 63 29]
# [29 6 5 29 78 28 5 78 29 45]
# [13 6 5 36 69 78 35 52 12 43]
# [76 12 5 18 52 1 76 5 58 52]
# [ 5 73 39 6 5 12 52 36 5 78]
# [ 5 29 78 12 79 6 61 5 59 63]
# [78 69 29 24 5 6 52 5 63 76]]
# ```
# although the exact numbers will be different. Check to make sure the data is shifted over one step for `y`.
# ## Building the model
#
# Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
#
# <img src="assets/charRNN.png" width=500px>
#
import torch
from torch import nn, optim
from torch.autograd import Variable
import torch.nn.functional as F
class CharRNN(nn.Module):
def __init__(self, n_tokens, n_steps=50, n_layers=2,
n_hidden=256, drop_prob=0.5):
super().__init__()
# Store parameters
self.n_tokens = n_tokens
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
# Define layers
self.dropout = nn.Dropout(drop_prob)
self.lstm = nn.LSTM(self.n_tokens, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
self.fc = nn.Linear(n_hidden, self.n_tokens)
def forward(self, x, hc):
# x = input, h = hidden state, c = cell state
x, (h, c) = self.lstm(x, hc)
x = self.dropout(x)
# Stack up LSTM outputs
batch_size = x.size()[0]
n_steps = x.size()[1]
x = x.view(batch_size * n_steps, self.n_hidden)
x = F.log_softmax(self.fc(x), dim=1)
return x, (h, c)
def init_hidden(net, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(net.parameters()).data
return (Variable(weight.new(net.n_layers, batch_size, net.n_hidden).zero_()),
Variable(weight.new(net.n_layers, batch_size, net.n_hidden).zero_()))
# ## Hyperparameters
#
# Here I'm defining the hyperparameters for the network.
#
# * `batch_size` - Number of sequences running through the network in one pass.
# * `num_steps` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
# * `lstm_size` - The number of units in the hidden layers.
# * `num_layers` - Number of hidden LSTM layers to use
# * `learning_rate` - Learning rate for training
# * `keep_prob` - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
#
# Here's some good advice from <NAME> on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
#
# > ## Tips and Tricks
#
# >### Monitoring Validation Loss vs. Training Loss
# >If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
#
# > - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
# > - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
#
# > ### Approximate number of parameters
#
# > The two most important parameters that control the model are `lstm_size` and `num_layers`. I would advise that you always use `num_layers` of either 2/3. The `lstm_size` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
#
# > - The number of parameters in your model. This is printed when you start training.
# > - The size of your dataset. 1MB file is approximately 1 million characters.
#
# >These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
#
# > - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `lstm_size` larger.
# > - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
#
# > ### Best models strategy
#
# >The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
#
# >It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
#
# >By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
#
batch_size = 100 # Sequences per batch
n_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
n_layers = 2 # Number of LSTM layers
learning_rate = 0.005 # Learning rate
drop_prob = 0.2 # Dropout drop probability
clip = 5 # Gradient clipping
# ## Time for training
# +
epochs = 20
print_every = 10
cuda = True
net = CharRNN(len(vocab), n_steps=n_steps, n_layers=n_layers,
n_hidden=lstm_size, drop_prob=drop_prob)
opt = optim.Adam(net.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
if cuda:
net.cuda()
counter = 0
n_chars = len(vocab)
for e in range(epochs):
# init hc a tuple of (hidden, cell) states
hc = init_hidden(net, batch_size)
for x, y in get_batches(encoded, batch_size, n_steps):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = Variable(x), Variable(y.long())
if cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden/cell state, otherwise
# we'd backprop through the entire training history
hc = tuple([Variable(each.data) for each in hc])
net.zero_grad()
output, hc = net.forward(inputs, hc)
loss = criterion(output, targets.view(batch_size * n_steps))
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm(net.parameters(), clip)
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.data[0]))
# -
torch.save(net.state_dict(), 'anna_rnn.net')
# ## Sampling
#
# Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
#
# The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
def predict(net, char, hc=None, cuda=False):
if hc is None:
hc = init_hidden(net, 1)
x = one_hot_encode(np.array([[char]]), net.n_tokens)
# Make sure our variables are volatile so we don't save the history
# since we're in inference mode here
inputs = Variable(torch.from_numpy(x), volatile=True)
hc = tuple([Variable(each.data, volatile=True) for each in hc])
if cuda:
inputs = inputs.cuda()
x, hc = net.forward(inputs, hc)
return x, hc
def choose_char(x, top_k=None):
if top_k is None:
ps, out = torch.exp(x).max(dim=0)
out = out[0].data.numpy()[0]
else:
probs, idx = torch.exp(x).topk(top_k)
probs, idx = probs.data.numpy().squeeze(), idx.data.numpy().squeeze()
out = np.random.choice(idx, p=probs/probs.sum())
return out
def sample(net, n_samples, prime="The", cuda=False, top_k=None):
''' Sample from a trained network.
'''
# First make sure the network is in inference mode
net.eval()
if cuda:
net.cuda()
else:
net.cpu()
# Initialize hidden state
hc = init_hidden(net, 1)
# Build up the hidden state from the priming text
sample = list(prime)
for char in sample:
x, hc = predict(net, vocab_to_int[char], hc=hc, cuda=cuda)
# Get the first new character
if cuda:
x = x.cpu()
char_int = choose_char(x)
sample.append(int_to_vocab[char_int])
for ii in range(n_samples):
x, hc = predict(net, char_int, hc=hc, cuda=cuda)
if cuda:
x = x.cpu()
char_int = choose_char(x, top_k=top_k)
sample.append(int_to_vocab[char_int])
return sample
print(''.join(sample(net, 1000, top_k=5)))
# Here, pass in the path to a checkpoint and sample from the network.
| intro-to-rnns/Anna_KaRNNa_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Developer Overview
#
# Data of participants that do not code during their work at DLR are not included in the following charts.
# ## Table of Contents:
# * [What is the highest level of education you have attained?](#What-is-the-highest-level-of-education-you-have-attained?)
# * [In which discipline is your highest academic qualification?](#In-which-discipline-is-your-highest-academic-qualification?)
# * [How much time do you spent on ... ?](#How-much-time-do-you-spent-on-...-?)
# * [How many years of software development experience do you have?](#How-many-years-of-software-development-experience-do-you-have?)
# * [Who uses the code that you write?](#Who-uses-the-code-that-you-write?)
# ### Setting up
# + pycharm={"is_executing": false}
# Import notebook containing sampled dataset
# %run "./00_data-cleaning.ipynb"
# + pycharm={"is_executing": false}
# Filtering the df
df = df[(df['Do you write code as part of your job?'] =='Yes')]
#x = len(df[df['write_code']== 'Yes'])
#df.drop(['write_code'], axis=1, inplace=True)
# -
# ### What is the highest level of education you have attained?
# + pycharm={"is_executing": false}
# What is the highest level of education you have attained?
count = df['What is the highest level of education you have attained?'].value_counts()
results = pd.DataFrame(count.values, count.index)
results.columns = ['What is the highest level of education you have attained?']
display(results)
# + pycharm={"is_executing": false}
# Levels of education
order=['Doctorate','Master degree','Undergraduate degree', 'Final secondary-school examination', 'general qualification for university entrance', 'Other']
plt.figure(figsize=(15,10))
count = df['What is the highest level of education you have attained?'].value_counts()
sns.set(style="darkgrid")
sns.barplot(count.index, count.values, order=order)
plt.xticks(rotation= 90)
plt.title('What is the highest level of education you have attained?', bbox={'facecolor':'0.8', 'pad':12})
plt.ylabel('Number of participants', fontsize=13)
plt.xlabel('Education level', fontsize=13)
ax = plt.gca()
totals = []
for i in ax.patches:
totals.append(i.get_height())
total = sum(totals)
for i in ax.patches:
ax.text(i.get_x()+ 0.15, i.get_height()+.9, \
str(round((i.get_height()/total)*100, 2))+'%', fontsize=17,
color='dimgrey')
plt.show()
# -
# ### In which discipline is your highest academic qualification?
# + pycharm={"is_executing": false}
# In which discipline is your highest academic qualification?
count = df['In which discipline is your highest academic qualification?'].value_counts()
results = pd.DataFrame(count.values, count.index)
display(results)
# + pycharm={"is_executing": false}
# Academic discipline for education and professional development
plt.figure(figsize=(15,10))
count = df['In which discipline is your highest academic qualification?'].value_counts()
sns.set(style="darkgrid")
sns.barplot(count.index, count.values)
plt.xticks(rotation= 90)
plt.title('In which discipline is your highest academic qualification?', bbox={'facecolor':'0.8', 'pad':12})
plt.ylabel('Number of participants', fontsize=15)
plt.xlabel('Academic discipline', fontsize=15)
ax = plt.gca()
totals = []
for i in ax.patches:
totals.append(i.get_height())
total = sum(totals)
for i in ax.patches:
ax.text(i.get_x()+ 0.15, i.get_height()+.9, \
str(round((i.get_height()/total)*100, 2))+'%', fontsize=13,
color='dimgrey')
plt.show()
# -
# ### How much time do you spent on ... ?
# + pycharm={"is_executing": false}
# In an average month, how much time do you spend on...
fig, ax =plt.subplots(4,2, sharey=True, figsize=(25,20))
count1 = df['In an average month, how much time do you spend on software development?'].value_counts()
sns.set()
ax1 = sns.barplot(count1.index, count1.values, ax=ax[0,0], palette="cubehelix")
ax1.set_title("spend software development")
count2 = df['In an average month, how much time would you like to spend on software development?'].value_counts()
ax2 = sns.barplot(count2.index, count2.values, ax=ax[0,1], palette="cubehelix")
ax2.set_title("would spend software development")
count3 = df['In an average month, how much time do you spend on research?'].value_counts()
ax3 = sns.barplot(count3.index, count3.values, ax=ax[1,0], palette="cubehelix")
ax3.set_title("spend research")
count4 = df['In an average month, how much time would you like to spend on research?'].value_counts()
ax4 = sns.barplot(count4.index, count4.values, ax=ax[1,1], palette="cubehelix")
ax4.set_title("would spend research")
count5 = df['In an average month, how much time do you spend on management?'].value_counts()
ax5 = sns.barplot(count5.index, count5.values, ax=ax[2,0], palette="cubehelix")
ax5.set_title("spend management")
count6 = df['In an average month, how much time would you like to spend on management?'].value_counts()
ax6 = sns.barplot(count6.index, count6.values, ax=ax[2,1], palette="cubehelix")
ax6.set_title("would spend management")
count7 = df['In an average month, how much time do you spend on other activities?'].value_counts()
ax7 = sns.barplot(count7.index, count7.values, ax=ax[3,0], palette="cubehelix")
ax7.set_title("spend other activities")
count8 = df['In an average month, how much time would you like to spend on other activities?'].value_counts()
ax8 = sns.barplot(count8.index, count8.values, ax=ax[3,1], palette="cubehelix")
ax8.set_title("would spend other activities")
fig.suptitle('In an average month, how much time do you/ would you spend on...', bbox={'facecolor':'0.8', 'pad':12})
plt.show()
# -
# ### How many years of software development experience do you have?
# + pycharm={"is_executing": false}
# How many years of software development experience do you have?
count = df['How many years of software development experience do you have?'].value_counts()
results = pd.DataFrame(count.values, count.index)
results.columns = ['Programming experience']
display(results)
# + pycharm={"is_executing": false}
# How many years of software development experience -> entscheiden mit oben
#df['programming_experience'] = df['programming_experience'].fillna(value=-1)
#convert_dict = {'programming_experience': int}
#df['programming_experience'] = df['programming_experience'].astype(convert_dict)
#df['programming_experience'] = df[df['programming_experience'] >=0]
#x = len(df[df['programming_experience']!= -1])
#df.drop(['programming_experience'], axis=1, inplace=True)
plt.figure(figsize=(15,10))
count = df['How many years of software development experience do you have?'].value_counts()
sns.set(style="darkgrid")
sns.barplot(count.index, count.values)
plt.xticks(rotation= 90)
plt.title('How many years of software development experience do you have?', bbox={'facecolor':'0.8', 'pad':12})
plt.ylabel('Number of participants', fontsize=15)
plt.xlabel('Programming experience', fontsize=15)
plt.show()
# + pycharm={"is_executing": false}
# How many years of software development experience
plt.figure(figsize=(9,8))
plt.title('How many years of software development experience do you have?', bbox={'facecolor':'0.8', 'pad':12})
bplot = sns.boxplot(y='How many years of software development experience do you have?', data=df, width=0.5, palette="colorblind")
# -
# ### Who uses the code that you write?
# + pycharm={"is_executing": false}
# Who uses the code that you write?
count = df['Who uses the code that you write?'].value_counts()
results = pd.DataFrame(count.values, count.index)
results.columns = ['Who use the code']
display(results)
# + pycharm={"is_executing": false}
# who use the code
order=['0 - Mostly me','1','2', '3', '4', '5 - Mostly other people']
plt.figure(figsize=(11,10))
count = df['Who uses the code that you write?'].value_counts()
sns.set(style="darkgrid")
sns.barplot(count.index, count.values, order=order)
# plt.xticks(rotation= 90)
plt.title('Who uses the code that you write?', bbox={'facecolor':'0.8', 'pad':12})
plt.ylabel('Number of participants', fontsize=15)
plt.xlabel('who use code', fontsize=15)
ax = plt.gca()
totals = []
for i in ax.patches:
totals.append(i.get_height())
total = sum(totals)
for i in ax.patches:
ax.text(i.get_x()+ 0.15, i.get_height()+.9, \
str(round((i.get_height()/total)*100, 2))+'%', fontsize=17,
color='dimgrey')
plt.show()
| 03_survey-analysis/02_developer-overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hough Lines
# ### Import resources and display the image
# +
import numpy as np
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# Read in the image
image = cv2.imread('images/phone.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# -
# ### Perform edge detection
# +
# Convert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Define our parameters for Canny
low_threshold = 50
high_threshold = 100
edges = cv2.Canny(gray, low_threshold, high_threshold)
plt.imshow(edges, cmap='gray')
# -
# ### Find lines using a Hough transform
# +
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on.
# rho and theta define the resolution of the detection
rho = 1
theta = 1* np.pi/180 #1 degree
# Minimum threshold to detect a line.
# It's the minimum number of times that a particular intersection appears in the Hough space.
threshold = 60
min_line_length = 100 # the minimum line length
max_line_gap = 5 # gap between discontinuous line segments.
line_image = np.copy(image) #creating an image copy to draw lines on
# Run Hough on the edge-detected image
# Now this function returns all the detected Hough lines.
# Each line is actually an array of four points, x1 y1 and x2 y2.
# These are just the two endpoints of each line.
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
print(len(lines))
# Iterate over the output "lines" and draw lines on the image copy
for line in lines:
x1,y1,x2,y2 = line[0]
print("x1: {} - y1: {} - x2: {} - y2:{}".format(x1, y1, x2, y2))
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0), 3)
plt.figure(figsize=(15,10))
plt.imshow(line_image)
# WHERE IS THE 5TH LINE? (First and last very very similar)
# -
| L03-020-Notebook-01-Hough_lines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# +
using SymPy
@vars h x
f = SymFunction("f")
f1 = f(x + im*h)
# -
T1 = series(f1, h, 0, 2)
T1 = T1.removeO()
T1Imag = imag(T1)
T1Imag = real(T1)
| Code/Chapter 2 - Derivatives and Gradients/2.5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#  | CÂMARA LEGISLATIVA DO DISTRITO FEDERAL | 
# --- | :---: | ---
# | **COORDENADORIA DE MODERNIZAÇÃO E INFORMÁTICA - CMI** |
# | **OFICINA ASI/LABHINOVA** |
# | **ANÁLISE EXPLORATÓRIA DE DADOS ABERTOS USANDO PYTHON** |
# # Acessando Dados Abertos por meio de uma API
# ## Exemplo de fonte de dados abertos por meio de uma API:
# Por exemplo, o Portal da Transparência do Governo Federal fornece dados abertos através de uma API que pode ser acessada no site:
#
# http://www.transparencia.gov.br/swagger-ui.html
# O exemplo de dados em formato JSON foi obtido dessa API em:
#
# http://www.transparencia.gov.br/swagger-ui.html#!/Bolsa32Fam237lia/bolsaFamiliaPorMunicipioUsingGET
# Nesse momento, o instrutor deve mostrar alguns exemplos de requisições feitas à API direto no browser como por exemplo a requisição abaixo que foi usada nos slides para apresentar o formato de dados JSON:
# http://www.transparencia.gov.br/api-de-dados/bolsa-familia-por-municipio?mesAno=202001&codigoIbge=5300108&pagina=1
# ## Importante entender algumas coisas:
# #### 1 - De onde descobrirmos o código para o Distrito Federal?
# Isso é fornecido pelo próprio site do IBGE em: https://www.ibge.gov.br/explica/codigos-dos-municipios.php#DF
# #### 2 - Importante salientar que a API pede a data no formato YYYYMM
# Então pedimos a data 202001 que significa que queremos os dados relativo ao mês de janeiro de 2020.
# O instrutor pode mostrar outras requisições JSON em tempo real mudando as datas ou mesmo pedindo dados de outros municípios.
# Por exemplo, alguns códigos de municípios:
# - Pirenópolis - 5217302
# - Cocalzinho de Goiás - 5205513
# - Caldas Novas - 5204508
# E alguns exemplos de requisições para esses municípios:
#
# http://www.transparencia.gov.br/api-de-dados/bolsa-familia-por-municipio?mesAno=202001&codigoIbge=5217302&pagina=1
#
# http://www.transparencia.gov.br/api-de-dados/bolsa-familia-por-municipio?mesAno=202001&codigoIbge=5205513&pagina=1
#
# http://www.transparencia.gov.br/api-de-dados/bolsa-familia-por-municipio?mesAno=202001&codigoIbge=5204508&pagina=1
# # Ok, mas e agora como faço isso por meio de código python?
# #### Primeiro vamos precisar importar algumas bilbiotecas que farão o trabalho duro pra gente:
from urllib.request import Request, urlopen
import json
print('Bibliotecas importadas!')
# #### Se você não executou o código da célula acima, execute! Do contrário, os códigos abaixo não vão funcionar! Clique na célula acima e clique no comando "Run" ou pressione "SHIFT + ENTER" no teclado.
# #### Em seguida, vamos programar um pouquinho.
# O código abaixo define uma função que poderemos usar em seguida.
# O que ela faz é criar uma função que faz a requisição JSON e devolve o objeto JSON para você.
# Você passa como os parâmetros strMesAno e strCodIBGE e ela devolve o objeto JSON.
# +
siteApi = 'http://www.transparencia.gov.br/api-de-dados'
def getJsonData(strMesAno,strCodIBGE):
method='bolsa-familia-por-municipio?mesAno={0}&codigoIbge={1}&pagina={2}'.format(strMesAno,strCodIBGE,1)
req = Request(siteApi + '/' + method, headers={'User-Agent': 'Mozilla/5.0'})
JSON = urlopen(req).read()
return json.loads(JSON)
print('Função criada!')
# -
# #### Agora vamos usar a função!
#
# Por exemplo, vamos pegar os dados de Pirenopolis de Dezembro de 2018.
# Que tal?
dadosBolsaFamiliaPiri = getJsonData(201812,5217302)
# #### Agora vamos dar uma olhada no conteúdo da variável dadosBolsaFamiliaPiri:
dadosBolsaFamiliaPiri
# #### De exercício, quero que pegue os dados de Cocalzinho de Goiás de janeiro de 2020.
#
# Consegue fazer isso sozinho?
# Use a célula a seguir para fazer isso. Vou deixar parte do comando pronto para você:
dadosBolsaFamiliaCocalzinho = getJsonData(?,?)
dadosBolsaFamiliaCocalzinho
# #### Nesse momento o instrutor deveria ensinar como acessar uma determinada informação do objeto JSON fornecido.
# Por exemplo: O campo "valor":
print(dadosBolsaFamiliaPiri[0]['valor'])
# Outro exemplo: O campo "município":
print(dadosBolsaFamiliaPiri[0]['municipio'])
# E dentro do municipio, o campo "nomeIBGE":
print(dadosBolsaFamiliaPiri[0]['municipio']['nomeIBGE'])
# # Legal, mas e se eu quiser obter vários dados?
#
# ## Aí temos que programar!
#
# #### Mas para facilitar o andamento da oficina, nós já criamos o programa que faz uma requisição para cada mês de cada ano desde 2013 até 2020 para Brasília e gerou um arquivo CSV com esses dados!
#
# #### Dessa forma, na próxima etapa da oficina, vamos aprender a ler esse arquivo CSV (processo idêntico para se ler qualquer arquivo desse tipo) e, em seguida, poderemos fazer algumas análises nesses dados.
# # Quando o instrutor autorizar, você vai seguir esse link aqui:
# ## [Explorando dados de um arquivo csv](../csv/csv.ipynb)
| etapas/json/.ipynb_checkpoints/json-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
" Import the libraries "
import os
import sys
import math
import copy
import numpy as np
import pandas as pd
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
# +
" Import the scripts of SD for Explaining and the supplementary scripts for neighbors generation"
absFilePath = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
newPath = os.path.join(absFilePath, 'SplitSD4X\\')
sys.path.append(newPath)
newPath_supp = os.path.join(newPath, 'supplementary')
sys.path.append(newPath_supp)
from fill_missing_values import *
from missing_values_table import *
from subgroups_discovery import *
from neighbors_generation import *
from neighbors_generation_2 import *
from neighbors_generation_3 import *
from neighbors_generation_4 import *
# -
# ## Data Preparation
# +
" Loading the dataset "
datasets_path = os.path.join(absFilePath, 'Datasets\\')
url = datasets_path + 'data_anuran.csv'
df = pd.read_csv(url)
df = df.drop(columns=['RecordID','Genus','Species'])
df = df.dropna()
" Handling data "
family_mapper = {'Bufonidae' : 0,
'Dendrobatidae' : 1,
'Hylidae' : 2,
'Leptodactylidae' : 3}
df['Family'] = df['Family'].replace(family_mapper)
" separate the data and the target "
data_df = df.drop(columns=['Family'])
target_df = df['Family']
categorical_feature_mask = (data_df.dtypes == object)
categorical_cols_names = data_df.columns[categorical_feature_mask].tolist()
numerical_cols_names = data_df.columns[~categorical_feature_mask].tolist()
data_df = pd.concat([data_df[numerical_cols_names].astype(float), data_df[categorical_cols_names]],axis = 1)
data_target_df = pd.concat([data_df, target_df], axis=1)
# +
" generate the Test SET "
nb_test_instances = 1000
test_df = data_target_df.sample(n=nb_test_instances)
data_test_df = test_df.drop(columns=['Family'])
target_test_df = test_df['Family']
" generate the Training SET "
train_df = pd.concat([data_target_df,test_df]).drop_duplicates(keep=False)
data_train_df = train_df.drop(columns=['Family'])
target_train_df = train_df['Family']
data_test = data_test_df.values
target_test = target_test_df.values
numerical_cols = np.arange(0,len(numerical_cols_names))
categorical_cols = np.arange(len(numerical_cols_names),data_df.shape[1])
# -
# ## Neighbors Generation (*Version 1*)
# +
nb_neighbors = 50
list_neigh = generate_all_neighbors(data_test,numerical_cols,categorical_cols,nb_neighbors)
" store all the neighbors together "
n = np.size(data_test,0)
all_neighbors = list_neigh[0]
for i in range(1,n) :
all_neighbors = np.concatenate((all_neighbors, list_neigh[i]), axis=0)
" One hot encoding "
df_neigh = pd.DataFrame(data = all_neighbors,columns= numerical_cols_names + categorical_cols_names)
df_neigh[categorical_cols_names] = df_neigh[categorical_cols_names].astype(int,errors='ignore')
" Store the neighbors in a list"
data_neigh = df_neigh.values
n = np.size(data_test,0)
list_neigh = []
j = 0
for i in range(0,n):
list_neigh.append(data_neigh[j:(j+nb_neighbors),:])
j += nb_neighbors
# -
# ## Neighbors Generation (*Version 2*)
# +
mat_nb_categ = []
j = 0
for name in categorical_cols_names :
mat_nb_categ.append(np.size(data_df[name].unique()))
list_neigh_2 = generate_all_neighbors_2(data_test,numerical_cols,categorical_cols,mat_nb_categ,nb_neighbors)
" store all the neighbors together "
n = np.size(data_test,0)
all_neighbors_2 = list_neigh_2[0]
for i in range(1,n) :
all_neighbors_2 = np.concatenate((all_neighbors_2, list_neigh_2[i]), axis=0)
df_neigh_2 = pd.DataFrame(data = all_neighbors_2,columns= numerical_cols_names + categorical_cols_names)
df_neigh_2[categorical_cols_names] = df_neigh_2[categorical_cols_names].astype(int,errors='ignore')
data_neigh_2 = df_neigh_2.values
n = np.size(data_test,0)
list_neigh_2 = []
j = 0
for i in range(0,n):
list_neigh_2.append(data_neigh_2[j:(j+nb_neighbors),:])
j += nb_neighbors
# -
# ## Neighbors Generation (*Version 3*)
# +
list_neigh_3 = generate_all_neighbors_3(data_test,numerical_cols,categorical_cols,mat_nb_categ,nb_neighbors)
" store all the neighbors together "
n = np.size(data_test,0)
all_neighbors_3 = list_neigh_3[0]
for i in range(1,n) :
all_neighbors_3 = np.concatenate((all_neighbors_3, list_neigh_3[i]), axis=0)
df_neigh_3 = pd.DataFrame(data = all_neighbors_3,columns= numerical_cols_names + categorical_cols_names)
df_neigh_3[categorical_cols_names] = df_neigh_3[categorical_cols_names].astype(int,errors='ignore')
data_neigh_3 = df_neigh_3.values
n = np.size(data_test,0)
list_neigh_3 = []
j = 0
for i in range(0,n):
list_neigh_3.append(data_neigh_3[j:(j+nb_neighbors),:])
j += nb_neighbors
# -
# ## Neighbors Generation (*Version 4*)
# +
special = []
list_neigh_4 = generate_all_neighbors_4(data_test,numerical_cols,categorical_cols,mat_nb_categ,nb_neighbors,special)
" store all the neighbors together "
n = np.size(data_test,0)
all_neighbors_4 = list_neigh_4[0]
for i in range(1,n) :
all_neighbors_4 = np.concatenate((all_neighbors_4, list_neigh_4[i]), axis=0)
df_neigh_4 = pd.DataFrame(data = all_neighbors_4,columns= numerical_cols_names + categorical_cols_names)
df_neigh_4[categorical_cols_names] = df_neigh_4[categorical_cols_names].astype(int,errors='ignore')
data_neigh_4 = df_neigh_4.values
n = np.size(data_test,0)
list_neigh_4 = []
j = 0
for i in range(0,n):
list_neigh_4.append(data_neigh_4[j:(j+nb_neighbors),:])
j += nb_neighbors
# -
# #### One hot encoding for the training and the test sets
# +
data_train = data_train_df.values
target_train = target_train_df.values
data_test = data_test_df.values
target_test = target_test_df.values
# -
# ## Training the MLP model
# +
" Sklearn MLP Classifier : "
mlp = MLPClassifier(hidden_layer_sizes=(100,100), max_iter=1000,
solver='adam', random_state=1,
learning_rate_init=.1)
model_nt = mlp.fit(data_train, target_train)
target_pred_mlp = model_nt.predict(data_test)
# -
# ## Execution of Split Based Selection Form Algorithm :
split_point = len(numerical_cols)
nb_models = 100
(L_Subgroups_1,P_1) = SplitBasedSelectionForm (data_test, target_test, nb_models, model_nt, list_neigh,split_point,4)
(L_Subgroups_2,P_2) = SplitBasedSelectionForm (data_test, target_test, nb_models, model_nt, list_neigh_2,split_point,4)
(L_Subgroups_3,P_3) = SplitBasedSelectionForm (data_test, target_test, nb_models, model_nt, list_neigh_3,split_point,4)
(L_Subgroups_4,P_4) = SplitBasedSelectionForm (data_test, target_test, nb_models, model_nt, list_neigh_4,split_point,4)
# +
" Define the functions to save and load data "
import pickle
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# +
'SAVE THE DATA'
path = './saved_data/'
save_obj(data_train, path + 'data_train')
save_obj(target_train, path + 'target_train')
save_obj(data_test, path + 'data_test')
save_obj(target_test, path + 'target_test')
save_obj(list_neigh, path + 'list_neighbors_1')
save_obj(list_neigh_2, path + 'list_neighbors_2')
save_obj(list_neigh_3, path + 'list_neighbors_3')
save_obj(list_neigh_4, path + 'list_neighbors_4')
# -
'SAVE THE LIST OF THE SUBGROUPS'
save_obj(L_Subgroups_1, path + 'list_subgroups_1')
save_obj(L_Subgroups_2, path + 'list_subgroups_2')
save_obj(L_Subgroups_3, path + 'list_subgroups_3')
save_obj(L_Subgroups_4, path + 'list_subgroups_4')
| tabular data/classification/Benchmarks/3. anuran/supplementary tests/anuran_neigh_generations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Santosh-Gupta/AbstractNet/blob/master/cleaner.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="GWQeUZCvm1e7" colab_type="code" outputId="6c2e9e00-3f90-47e6-cf93-6e96b44154ef" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="_pYCbaVArHHr" colab_type="code" colab={}
import zipfile
import os
import sys
import pandas as pd
import numpy as np
import gc
from urllib import request
import json
import itertools
import gzip
import shutil
import ast
import pickle
# + [markdown] id="GgtStEsjUB_a" colab_type="text"
# # Cleaning
#
# + id="kCPEsfcVsFaS" colab_type="code" colab={}
def dlandopen(url, file, json):
request.urlretrieve(url, file )
with gzip.open(file, 'rb') as f_in:
with open(json, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(file)
f = open(json)
mylistH = f.readlines()
del f
return mylistH
# + id="PG-cFBs-sJ10" colab_type="code" colab={}
def firstDataframe(mylist, start, finish):
data = []
for line in itertools.islice(mylist , start , finish):
data.append(json.loads(line))
df = pd.DataFrame(data)
del data
gc.collect()
return df
# + id="JZ-BEn2FtLX0" colab_type="code" colab={}
def processData(dftbr, type):
#TODO: Should only keep files with inCitations or outCitations + of type specified. Pruning would come next.
dftbr['totalCitations'] = dftbr['inCitations'] + dftbr['outCitations']
dftbr.drop(['journalVolume','journalPages','year','authors','sources','doiUrl', 'inCitations', 'outCitations'], axis = 1)
dftbr = dftbr[( dftbr.astype(str)['totalCitations'] != '[]' )] #remove any paper with no citations
dftbr = dftbr[dftbr.fieldsOfStudy.apply(lambda x: type[0] in x or type[1] in x)]
return dftbr
# + id="2C6UKSPRn7iC" colab_type="code" colab={}
#Corpus goes from 000 to 180
url_template = "https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/2020-04-10/s2-corpus-{:03}.gz"
file_template = 'json{:03}.gz'
json_template = 'json{:03}'
save_template = "/content/gdrive/My Drive/TF2.2Data/CleanedData/df{:03}.json"
# + id="_joePvAUsGap" colab_type="code" outputId="617c1f4e-de38-42e8-a7f7-7d70ffb6a20c" colab={"base_uri": "https://localhost:8080/", "height": 102}
#To Finish
i = 140
while i <= 140:
first_list = dlandopen(url_template.format(i), file_template.format(i), json_template.format(i))
print("File Unzipped: " + str(i))
raw_df = firstDataframe(first_list, 0, len(first_list))
print("Raw Dataset Created: " + str(i))
first_clean_df = processData(raw_df, ["Biology", "Medicine"])
del raw_df
print("First Clean: " + str(i))
print(first_clean_df.shape)
first_clean_df.to_json(save_template.format(i))
del first_clean_df
print("Saved: " + str(i))
i += 1
# + [markdown] id="K8vbbzLHT9IS" colab_type="text"
# # Pruning
# + id="H6qHPXZ4T8c-" colab_type="code" outputId="8f984dee-df66-4499-85fa-d34b0ce4ebb2" colab={"base_uri": "https://localhost:8080/", "height": 1000}
df_template = "/content/gdrive/My Drive/TF2.2Data/CleanedData/df{:03}.json"
paper_set = set()
i = 0
while i <= 184:
print(i)
df = pd.read_json(df_template.format(i))
b = set(df['id'])
paper_set.update(b)
i+=1
# + id="IZzkpjqxXbUB" colab_type="code" outputId="9477e598-e2bd-4211-9930-e46e5544c2c9" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(paper_set)
# + id="BzRmI-zWz5Mf" colab_type="code" outputId="ec6b307a-28b5-402d-e35d-0e53eaf10d53" colab={"base_uri": "https://localhost:8080/", "height": 1000}
df = pd.read_json(df_template.format(100))
df
# + id="vg1eeOzW2oEr" colab_type="code" outputId="981bee13-8a40-4943-e27b-a6530c618329" colab={"base_uri": "https://localhost:8080/", "height": 1000}
pruned_file_template = "/content/gdrive/My Drive/TF2.2Data/PrunedData/prdf{:03}.json"
i = 0
while i <= 184:
print(i)
df = pd.read_json(df_template.format(i))
print(df.shape)
df2 = df[df["totalCitations"].transform(lambda x: bool(set(x)&paper_set))]
print(df2.shape)
df2.to_json(pruned_file_template.format(i))
i += 1
# + id="bf6qFCSFBMU-" colab_type="code" outputId="0e392478-0668-4913-a11d-c427df3dcb5d" colab={"base_uri": "https://localhost:8080/", "height": 949}
df2
# + id="mF45WW5QBN-j" colab_type="code" outputId="895f0201-b279-488b-d447-387717d77776" colab={"base_uri": "https://localhost:8080/", "height": 1000}
df2
# + id="zucO0J4LBgOk" colab_type="code" outputId="47bf778e-9f69-4c85-e970-8d9348151f37" colab={"base_uri": "https://localhost:8080/", "height": 1000}
df_template = "/content/gdrive/My Drive/NLRData/PrunedData/prdf{:03}.json"
paper_set = set()
i = 0
while i <= 184:
print(i)
df = pd.read_json(df_template.format(i))
b = set(df['id'])
paper_set.update(b)
i+=1
print(len(paper_set))
| cleaner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Chapter 10 [Onlinestatsbook.com](onlinestatsbook.com) : "Estimation"
# ------
#
#
# #### Below are selected formulas and exercises from chapter 10 of the infamous onlinestatsbook.com, a highly trusted resource for learning about statistics.
#
# #### The formulas and exercises were chosen based on difficulty and based on if using python to understand the concept or answer the question was deemed useful.
#
# #### Please note the below does not include the questions from the case studies. A separate notebook for each case study can be found in this repository or is forthcoming.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy import stats
# -
# ### Section 1: "Introduction"
#
# One of the major applications of statistics is estimating **population parameters** from **sample statistics**.
#
# **Point estimates** are statistics that are a single value. Point estimates are usually supplemented by interval estimates called confidence intervals.
#
#
# ### Section 2: "Degrees of Freedom"
#
# The degrees of freedom (df) of an estimate is the number of independent pieces of information on which the estimate is based.
#
# http://onlinestatbook.com/2/estimation/df.html
#
# ### Section 3: "Characteristics of Estimators"
#
# **Bias**
# A statistic is biased if the long-term average value of the statistic is not the parameter it is estimating. More formally, a statistic is biased if the mean of the sampling distribution of the statistic is not equal to the parameter. The mean of the sampling distribution of a statistic is sometimes referred to as the expected value of the statistic.
#
# **Sampling Variability**
# The sampling variability of a statistic refers to how much the statistic varies from sample to sample and is usually measured by its standard error ; the smaller the standard error, the less the sampling variability.
#
# ### Section 8: "Confidence Interval on the Mean"
#
# In general, you compute the 95% confidence interval for the mean with the following formula **when we know the standard deviation of the sampling distribution of the mean (which never happens)**:
#
# Lower limit = M - Z.95σM
#
# Upper limit = M + Z.95σM
#
# You should use the t distribution rather than the normal distribution when the variance is not known and has to be estimated from sample data.
#
# However, with smaller sample sizes, the t distribution is leptokurtic, which means it has relatively more scores in its tails than does the normal distribution. As a result, you have to extend farther from the mean to contain a given proportion of the area. Recall that with a normal distribution, with a normal distribution, 95% of the distribution is within 1.96 standard deviations of the mean. Using the t distribution, if you have a sample size of only 5, 95% of the area is within 2.78 standard deviations of the mean. Therefore, the standard error of the mean would be multiplied by 2.78 rather than 1.96.
#
# #### Assume that the following five numbers are sampled from a normal distribution: 2, 3, 5, 6, and 9 and that the standard deviation is not known. The first steps are to compute the sample mean and variance:
#
#
nums = [2, 3, 5, 6, 9]
sample_mean = sum(nums)/len(nums)
sample_var = sum([(n - sample_mean)**2 for n in nums])/(len(nums)-1)
sample_var
# #### The next step is to estimate the standard error of the mean. If we knew the population variance, we could use the following formula:
#
# ![alt text][img1]
#
# [img1]: http://onlinestatbook.com/2/estimation/graphics/sem_pop.jpg
#
# #### Instead we compute an estimate of the standard error (sM):
#
# ![alt text][img2]
#
# [img2]: http://onlinestatbook.com/2/estimation/graphics/sem_sample.jpg
# +
std_err_estimate = sample_var**.5/len(nums)**.5
std_err_estimate
# -
# #### The next step is to find the value of t from the t-distribution table. The value for the 95% interval for df = N - 1 = 4 is 2.776. The confidence interval is then computed just as it is when σM. The only differences are that sM and t rather than σM and Z are used.
#
# ![alt text][img1]
#
# [img1]: https://web.stanford.edu/dept/radiology/cgi-bin/classes/stats_data_analysis/lesson_4/t_table.gif
# +
#lower limit
print("lower limit= "+str(sample_mean - 2.776*std_err_estimate))
#upper limit
print("upper limit= "+str(sample_mean + 2.776*std_err_estimate))
# -
# ##### Question 3 out of 5.
# You take a sample (N = 25) of test scores from a population. The sample mean is 38, and the population standard deviation is 6.5. What is the 95% confidence interval on the mean?
print("lower limit= "+str(38 - 2.064*(6.5/(25**.5))))
print("upper limit= "+str(38 + 2.064*(6.5/(25**.5))))
# ##### Question 4 out of 5.
# You take a sample (N = 9) of heights of fifth graders. The sample mean was 49, and the sample standard deviation was 4. What is the 99% confidence interval on the mean?
print("lower limit= "+str(49 - 3.334*(4/(9**.5))))
print("upper limit= "+str(49 + 3.334*(4/(9**.5))))
# ##### Question 5 out of 5.
# Based on the data below, what is the upper limit of the 95% confidence interval for the mean of A1?
#
nums = """
1
4
5
5
7
9
10
11
12
13
14
14
17
19
20
23
24
24
24
29"""
nums = nums.split()
nums = [int(n) for n in nums]
nums
len(nums)
# +
sample_mean = sum(nums)/len(nums)
sample_var = sum([(n - sample_mean)**2 for n in nums])/(len(nums)-1)
std_err_est = sample_var**.5/(len(nums)**.5)
print("lower limit= "+str(sample_mean - 2.093 * std_err_est))
print("upper limit= "+str(sample_mean + 2.093 * std_err_est))
# -
#
# ### Section 9: "t Distribution"
#
# consider the case in which you have a normal distribution but you do not know the standard deviation. You sample N values and compute the sample mean (M) and estimate the standard error of the mean (σM) with sM. Determining the probability that M will be within 1.96 sM of the population mean (μ)is a difficult problem because there are two ways in which M could be more than 1.96 sM from μ:
#
# (1) M could, by chance, be either very high or very low and (2) sM could, by chance, be very low.
#
# Intuitively, it makes sense that the probability of being within 1.96 standard errors of the mean should be smaller than in the case when the standard deviation is known (and cannot be underestimated). But exactly how much smaller? Fortunately, the way to work out this type of problem was solved in the early 20th century by <NAME> who determined the distribution of a mean divided by an estimate of its standard error. This distribution is called the Student's t distribution or sometimes just the t distribution.
#
# The t distribution is very similar to the normal distribution when the estimate of variance is based on many degrees of freedom, but has relatively more scores in its tails when there are fewer degrees of freedom.
#
# The figure below shows t distributions with 2, 4, and 10 degrees of freedom and the standard normal distribution. Notice that the normal distribution has relatively more scores in the center of the distribution and the t distribution has relatively more in the tails.
#
# ![alt text][img1]
#
# [img1]:http://onlinestatbook.com/2/estimation/graphics/t_dist.jpg
# suppose you sampled 9 values from a normal population and estimated the standard error of the mean (σM) with sM. What is the probability that M would be within 1.96sM of μ? Since the sample size is 9, there are N - 1 = 8 df. From Table 1 you can see that with 8 df the probability is 0.95 that the mean will be within 2.306 sM of μ. The probability that it will be within 1.96 sM of μ is therefore lower than 0.95. the figure below shows that 0.086 of the area of a t distribution is more than 1.96 standard deviations from the mean, so the probability that M would be less than 1.96sM from μ is 1 - 0.086 = 0.914.
#
# ![alt text][img1]
#
# [img1]: http://onlinestatbook.com/2/estimation/graphics/t_area.gif
# ##### Question 4 out of 5.
# In a t distribution with 10 degrees of freedom, what is the probability of getting a value within two standard deviations of the mean?
from scipy import stats
1-stats.t.sf(2,df=10)*2
# #### Question 5 out of 5.
# There is a population of test scores with an unknown standard deviation. You sample 21 scores from this population, and you calculate the mean and standard deviation. You get a value for the mean that is 1.5 standard errors greater than what you think is the population mean. What is the probability that you would get a value 1.5 standard deviations or more from the mean in this t distribution?
stats.t.sf(1.5,df=20)*2
# #### Section 11: "Confidence Interval on Diff bw Means"
#
# The difference in sample means is used to estimate the difference in population means. The accuracy of the estimate is revealed by a confidence interval.
# In order to construct a confidence interval, we are going to make three assumptions:
#
# * The two populations have the same variance. This assumption is called the assumption of homogeneity of variance.
# * The populations are normally distributed.
# * Each value is sampled independently from each other value.
#
# The consequences of violating these assumptions are discussed in a later section. For now, suffice it to say that small-to-moderate violations of assumptions 1 and 2 do not make much difference.
# A confidence interval on the difference between means is computed using the following formula:
# * Lower Limit = M1 - M2 -(tCL)()
# * Upper Limit = M1 - M2 +(tCL)()
#
# where M1 - M2 is the difference between sample means, tCL is the t for the desired level of confidence, and is the estimated standard error of the difference between sample means.
#
# The first step is to compute the estimate of the standard error of the difference between means.
#
# ![alt text][img1]
#
# [img1]: http://onlinestatbook.com/2/sampling_distributions/graphics/equal_var.gif
#
# Since we are assuming the population variances are the same, we estimate this variance by averaging our two sample variances. Thus, our estimate of variance is:
#
# ![alt text][img2]
#
# [img2]: http://onlinestatbook.com/2/estimation/graphics/MSE.gif
#
#
# The next step is to find the t to use for the confidence interval (tCL). To calculate tCL, we need to know the degrees of freedom. The degrees of freedom is the number of independent estimates of variance on which MSE is based. This is equal to (n1 - 1) + (n2 - 1) where n1 is the sample size of the first group and n2 is the sample size of the second group.
#
#
# #### Computations for Unequal Sample Sizes (optional)
# The calculations are somewhat more complicated when the sample sizes are not equal. One consideration is that MSE, the estimate of variance, counts the sample with the larger sample size more than the sample with the smaller sample size. Computationally this is done by computing the sum of squares error (SSE) as follows:
#
# ![alt text][img3]
#
# [img3]: http://onlinestatbook.com/2/estimation/graphics/sse.gif
#
#
# THen, the formula
#
# ![alt text][img4]
#
# [img4]: http://onlinestatbook.com/2/estimation/graphics/sed.gif
#
# Is replaced by
#
# ![alt text][img5]
#
# [img5]: http://onlinestatbook.com/2/estimation/graphics/sed.gif
#
# where nh is the harmonic mean of the sample sizes and is computed as follows:
#
# ![alt text][img6]
#
# [img6]: http://onlinestatbook.com/2/estimation/graphics/nh.gif
#
# ##### Question 2 out of 4.
# You are comparing men and women on hours spent watching TV. You pick a sample of 12 men and 14 women and calculate a confidence interval on the difference between means. How many degrees of freedom does your t value have?
12-1+14-1
# ##### Question 3 out of 4.
# You are comparing freshmen and seniors at your college on hours spent studying per day. You pick a sample of 11 people from each group. For freshmen, the mean was 3 and the variance was 1.2. For seniors, the mean was 2 and the variance was 1. Calculate a 90% confidence interval on the difference between means (freshmen - seniors). What is the lower limit of this CI?
# +
mean = 3-2
var_est = (1.2+1)/2
std_est = ((2 * var_est)/11)**.5
df = 11-1+11-1
t = 1.725
print("lower limit= "+str(mean - t * std_est))
# -
# Question 4 out of 4.
# Scores on a test taken by 1st graders and 2nd graders were compared to look at development. The five 1st graders sampled got the following scores: 4, 3, 5, 7, 4. The five 2nd graders sampled got the following scores: 7, 9, 8, 6, 9. Compute the 95% confidence interval for the difference between means (2nd graders - 1st graders).
first_g = [4, 3, 5, 7, 4]
second_g = [7, 9, 8, 6, 9]
# +
#diff bw means
mean = sum(second_g)/len(second_g)-sum(first_g)/len(second_g)
#calculate variance for each sample
first_g_var_est = sum([(g-(sum(first_g)/len(second_g)))**2 for g in first_g])/(len(first_g)-1)
second_g_var_est = sum([(g-(sum(second_g)/len(second_g)))**2 for g in second_g])/(len(second_g)-1)
#use to estiamte var of diff between two samples
var_est = (first_g_var_est+second_g_var_est)/2
#use var to estimate std of diff
std_est = ((2 * var_est)/len(first_g))**.5
#calc df
df = 5-1+5-1
#find t for df=10 and .05 two tail
t = 2.306
print("lower limit= "+str(mean - t * std_est))
print("upper limit= "+str(mean + t * std_est))
# -
# #### Section 12: "Correlation"
#
# The computation of a confidence interval on the population value of Pearson's correlation (ρ) is complicated by the fact that the sampling distribution of r is not normally distributed. The solution lies with Fisher's z' transformation described in the section on the sampling distribution of Pearson's r.
#
# * Convert r to z'
# * Compute a confidence interval in terms of z'
# * Convert the confidence interval back to r.
#
#
# http://onlinestatbook.com/2/sampling_distributions/samp_dist_r.html
# +
def z_trans(r):
return(.5*np.log((1 + r) / (1 - r)))
n=34
r=-.654
z = z_trans(r)
std_err_z = 1/(n-3)**.5
print("lower limit= "+str(z - 1.96 * std_err_z))
print("upper limit= "+str(z + 1.96 * std_err_z))
#use z to r table to ascertain r
# -
np.exp(np.log(10))
def r_trans(z):
return((np.exp(2*z)-1)/(np.exp(2*z)+1))
r_trans(-1.13)
r_trans(-.43)
# ##### Question 2 out of 3.
# A sample of 28 was taken from a population, and r = .45. What is the 95% confidence interval for the population correlation?
n=28
r=.45
z = z_trans(r)
std_err_z = 1/(n-3)**.5
lower = z - 1.96 * std_err_z
upper= z + 1.96 * std_err_z
print(r_trans(lower))
print(r_trans(upper))
# Question 3 out of 3.
# The sample correlation is -0.8. If the sample size was 40, then the 99% confidence interval states that the population correlation lies between -.909 and
n=40
r=-.8
z = z_trans(r)
std_err_z = 1/(n-3)**.5
lower = z - 2.58 * std_err_z
upper= z + 2.58 * std_err_z
print(r_trans(lower))
print(r_trans(upper))
# #### Section 14: "Exercises"
#
# q11 A population is known to be normally distributed with a standard deviation of 2.8. (a) Compute the 95% confidence interval on the mean based on the following sample of nine: 8, 9, 10, 13, 14, 16, 17, 20, 21. (b) Now compute the 99% confidence interval using the same data.
# +
nums = [8, 9, 10, 13, 14, 16, 17, 20, 21]
sample_mean = sum(nums)/len(nums)
pop_var= 2.8
std_err = pop_var/len(nums)**.5
#A
print("lower limit= "+str(sample_mean - 1.96*std_err))
print("upper limit= "+str(sample_mean + 1.96*std_err))
# -
#B
print("lower limit= "+str(sample_mean - 2.58*std_err))
print("upper limit= "+str(sample_mean + 2.58*std_err))
# ##### Q12 A person claims to be able to predict the outcome of flipping a coin. This person is correct 16/25 times. Compute the 95% confidence interval on the proportion of times this person can predict coin flips correctly. What conclusion can you draw about this test of his ability to predict the future?
p = 16/25
s_p = (p*(1-p)/25)**.5
print("lower limit= "+str(p - 1.96*s_p - .5/25))
print("upper limit= "+str(p + 1.96*s_p +.5/25))
print("margin of error= "+str(1.96*s_p - .5/25))
# ###### we can conclude this person cannot accurately predict the outcome of a coin flip
# ##### Q13 What does it mean that the variance (computed by dividing by N) is a biased statistic?
#
# It means that sometimes it overestimates the population variance and sometimes it underestimates it but the average of a large number of measurements would equal the actual variance.
# ##### Q14 A confidence interval for the population mean computed from an N of 16 ranges from 12 to 28. A new sample of 36 observations is going to be taken. You can't know in advance exactly what the confidence interval will be because it depends on the random sample. Even so, you should have some idea of what it will be. Give your best estimation.
#
# we know that we calculated the lower band of the confidence intervals as follows:
#
# 20 - 1.96 x std/16^.5
#
# we can back into the standard error with some algebra:
# +
std = ((12-20)/-1.96)*(16**.5)
#threfore the new confidence interval for a sample size of 16 will be:
print("lower limit= "+str(20 - 1.96*std/(36**.5)))
print("upper limit= "+str(20 + 1.96*std/(36**.5)))
# -
# ##### Q15 You take a sample of 22 from a population of test scores, and the mean of your sample is 60. (a) You know the standard deviation of the population is 10. What is the 99% confidence interval on the population mean. (b) Now assume that you do not know the population standard deviation, but the standard deviation in your sample is 10. What is the 99% confidence interval on the mean now?
#
# part A can be answered similarly to q14 replacing 1.96 with 2.576
#
# part B is calculated as follows after finding z for 22-1 for 99% confidence interval is 2.831:
print("lower limit= "+str(60 - 2.831*(10/(22**.5))))
print("upper limit= "+str(60 + 2.831*(10/(22**.5))))
# ##### Q16 You read about a survey in a newspaper and find that 70% of the 250 people sampled prefer Candidate A. You are surprised by this survey because you thought that more like 50% of the population preferred this candidate. Based on this sample, is 50% a possible population proportion? Compute the 95% confidence interval to be sure.
p = .7
s_p = (p*(1-p)/250)**.5
print("lower limit= "+str(p - 1.96*s_p - .5/250))
print("upper limit= "+str(p + 1.96*s_p +.5/250))
print("margin of error= "+str(1.96*s_p - .5/250))
# 50% is way outside the confidence interval
# ##### Q17 Heights for teenage boys and girls were calculated. The mean height for the sample of 12 boys was 174 cm and the variance was 62. For the sample of 12 girls, the mean was 166 cm and the variance was 65. (a) What is the 95% confidence interval on the difference between population means? (b) What is the 99% confidence interval on the difference between population means? (c) Do you think the mean difference in the population could be about 5? Why or why not?
# +
#a
mean_diff = 174 - 166
var_est = (62+65)/2
std_err_est = ((2 * var_est)/12)**.5
#find t for df=22 and .05 two tail
t = 2.074
print("lower limit= "+str(mean_diff - t * std_err_est))
print("upper limit= "+str(mean_diff + t * std_err_est))
# +
#b
#find t for df=22 and .05 two tail
t = 2.819
print("lower limit= "+str(mean_diff - t * std_err_est))
print("upper limit= "+str(mean_diff + t * std_err_est))
# -
# #c
# the mean difference could be about five because that value is well within the confidence interval
# ##### Q18 You were interested in how long the average psychology major at your college studies per night, so you asked 10 psychology majors to tell you the amount they study. They told you the following times: 2, 1.5, 3, 2, 3.5, 1, 0.5, 3, 2, 4. (a) Find the 95% confidence interval on the population mean. (b) Find the 90% confidence interval on the population mean.
# +
nums = [2, 1.5, 3, 2, 3.5, 1, 0.5, 3, 2, 4.]
sample_mean = sum(nums)/len(nums)
sample_var = sum([(n - sample_mean)**2 for n in nums])/(len(nums)-1)
std_err_est = sample_var**.5/(len(nums)**.5)
#a
print("lower limit= "+str(sample_mean - 2.262 * std_err_est))
print("upper limit= "+str(sample_mean + 2.262 * std_err_est))
# +
#extra way to get t value
n=len(nums)
df = n-1
alpha =.05
stats.t(df).isf(alpha/2)
# +
#extra way to get t confidence interval
stats.t.interval(1-alpha,df,sample_mean,stats.sem(nums))
# -
#
# ##### Q19 True/false: As the sample size gets larger, the probability that the confidence interval will contain the population mean gets higher. (relevant section & relevant section)
#
# true
#
#
# ##### Q21 True/false: You have a sample of 9 men and a sample of 8 women. The degrees of freedom for the t value in your confidence interval on the difference between means is 16. (relevant section & relevant section)
#
# false it is 15 assuming we don't know the population variance
#
# ##### Q22 True/false: Greek letters are used for statistics as opposed to parameters. (relevant section)
#
# false
#
# ##### Q23 True/false: In order to construct a confidence interval on the difference between means, you need to assume that the populations have the same variance and are both normally distributed. (relevant section)
#
# false
#
# #### remaining exercises are from case studies. please go to those individual notebooks to peruse the responses to those exercises. that concludes chapter 10!
| chapters/osb_chap10_estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="aPYMlUtrZB4b"
# # Desafio Codenation: Descubra as melhores notas de matemática do ENEM 2016
#
# Você deverá criar um modelo para prever a nota da prova de matemática de quem participou do ENEM 2016.
# + [markdown] colab_type="text" id="zJopTJkVZPdp"
# O contexto do desafio gira em torno dos resultados do ENEM 2016 (disponíveis no arquivo train.csv). Este arquivo, e apenas ele, deve ser utilizado para todos os desafios. Qualquer dúvida a respeito das colunas, consulte o Dicionário dos Microdados do Enem 2016.
#
# Muitas universidades brasileiras utilizam o ENEM para selecionar seus futuros alunos e alunas. Isto é feito com uma média ponderada das notas das provas de matemática, ciências da natureza, linguagens e códigos, ciências humanas e redação, com os pesos abaixo:
#
# * matemática: 3
# * ciências da natureza: 2
# * linguagens e códigos: 1.5
# * ciências humanas: 1
# * redação: 3
#
# No arquivo test.csv crie um modelo para prever nota da prova de matemática (coluna NU_NOTA_MT) de quem participou do ENEM 2016.
#
# Salve sua resposta em um arquivo chamado answer.csv com duas colunas: NU_INSCRICAO e NU_NOTA_MT.
#
# Faça o upload do arquivo answer.csv usando o botão “Submeter resposta”.
# + [markdown] colab_type="text" id="0R9cfh9xZgVz"
# ## Load libs
# + colab={} colab_type="code" id="-0ZaNvlj7nqn"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
# + [markdown] colab_type="text" id="XDony-LVZjv6"
# ## Load dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" id="g3hYkGSp8Hie" outputId="a360bb0a-0208-46b6-f5d8-b5c56776d85d"
traindf = pd.read_csv('train.csv', index_col=0)
traindf.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="1q58NzRhjo34" outputId="c4cf7efa-f3a5-41ab-ba67-5f4502880b57"
traindf['NU_NOTA_MT'].head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="dRQ83MMv8dCo" outputId="5030defe-eec4-48f6-aff0-2f534cc1f214"
traindf.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 346} colab_type="code" id="uD3o8dTplmqn" outputId="f8d9a691-f894-45f8-8f02-57b7300c2608"
traindf.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" id="X4iEEfLd97UZ" outputId="bfb66314-49fc-40ee-c0fe-fe656c25268d"
testdf = pd.read_csv('test.csv')
testdf.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="j7KDFLTa-3gV" outputId="5fd6bf2a-9453-4936-9af2-141bc19f8177"
testdf.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 317} colab_type="code" id="1S2T8o_Yl1RC" outputId="870163fd-ee14-4f4c-b2ce-ddcfc40fe34c"
testdf.describe()
# + colab={} colab_type="code" id="WK8y6Gug_tUS"
answerdf = pd.DataFrame()
# + [markdown] colab_type="text" id="WsLLt3BGcYdB"
# ## Data analysis
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="MHJK7uH9Z2vY" outputId="1cce36d2-a3a6-4eb1-d525-ec78eacfd2c0"
answerdf['NU_INSCRICAO'] = testdf['NU_INSCRICAO']
answerdf.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="C6JQSIJFaRob" outputId="b3f32547-9c31-48bb-b9df-a7f07a67fd55"
answerdf.shape
# + [markdown] colab_type="text" id="WTd4UCeS-yKa"
# ### First hypothesis of features
# + [markdown] colab_type="text" id="cGmDBKVS_XQI"
# Foi percebido que as features NU_IDADE e IN_TREINEIRO possui uma baixa correlação com as outras features
# + colab={} colab_type="code" id="lO1rrgXG-sBe"
var = ['NU_IDADE','IN_TREINEIRO','NU_NOTA_CN','NU_NOTA_CH','NU_NOTA_LC','NU_NOTA_REDACAO']
# + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="qpvTivxG8_Ua" outputId="57d00e4e-8072-4f7c-9290-0bd1b3622613"
traindf[var].corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="RpIpHnUCamnQ" outputId="fe70b65c-8f52-4615-e064-f7df8f5b26ed"
testdf[var].corr()
# + [markdown] colab_type="text" id="SqR_9DTi_TYY"
# ### Second hypothesis of features
# + [markdown] colab_type="text" id="75duHbbw_sjV"
# As features NU_IDADE e IN_TREINEIRO foram descartadas, pois demonstraram não serem relevantes para a eficiência do modelo
# + colab={} colab_type="code" id="chkc9-Rwa7um"
features = ['NU_NOTA_CN','NU_NOTA_CH','NU_NOTA_LC','NU_NOTA_REDACAO']
# + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="s08iweGO-e04" outputId="efaf2c6f-c8f7-4af3-8185-fc9247627cbe"
traindf[features].corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="ozNtPrFX-mK8" outputId="af6418ef-d9f3-4c80-ce57-697e701ec566"
testdf[features].corr()
# + [markdown] colab_type="text" id="dkqVD8Wg__4F"
# Para facilitar a percepção quanto ao nível de correlação entre as features foi gerado um mapa de calor com as mesmas
# + colab={"base_uri": "https://localhost:8080/", "height": 372} colab_type="code" id="dA1TvHWp9O07" outputId="9fce3e15-3365-4acf-93f8-e2650d8ed891"
# %matplotlib inline
plt.figure(figsize=(4,4))
plt.title('Train Features')
sns.heatmap(traindf[features].corr(), annot=True, linewidths=0.5, linecolor='black', cmap='Reds')
plt.xticks(rotation=90)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 372} colab_type="code" id="I3QUNmxrbAPQ" outputId="e6157c47-2091-482d-d00f-0e03ff454047"
# %matplotlib inline
plt.figure(figsize=(4,4))
plt.title('Test Features')
sns.heatmap(testdf[features].corr(), annot=True, linewidths=0.5, linecolor='black', cmap='Reds')
plt.xticks(rotation=90)
plt.show()
# + [markdown] colab_type="text" id="xWWa1Zy0cPDF"
# ## Pré-processamento dos dados
# + [markdown] colab_type="text" id="wXs7McwUAaF8"
# Visto que existem dados nulos no dataset existem duas abordagens que podem ser tomadas:
#
# 1. Excluir os dados nulos do dataset. O que poderia diminuir drasticamente as amostras para treinar o modelo;
#
# 2. Substituir os valores nulos por zeros. O que conserva o número de amostras do dataset.
#
# 3. Substituir os valores nulos com o valor da média das features. O que conserva o número de amostras do dataset.
#
# A abordagem escolhida será a segunda, visto que a primeira com certeza irá reduzir o potencial do modelo de entender o padrão e visto que a terceira foi testada e não gerou bons resultados.
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="ueDGjxTUdERx" outputId="c71381a8-7fe2-4810-fef8-d9aca4ac8bfb"
traindf[features].isnull().sum()
# + colab={} colab_type="code" id="RpcV7C-4UDV9"
traindf['NU_NOTA_MT'].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="6PYSXzXvcvAK" outputId="b0c2843a-70e9-467a-eb97-8607a9895fa1"
testdf[features].isnull().sum()
# + colab={} colab_type="code" id="ZTbxFXjrTYYj"
# Preencher valores nulos com o valor 0 - Tratamento das notas de provas corrompidas
traindf['NU_NOTA_CN'].fillna(0, inplace=True)
traindf['NU_NOTA_CH'].fillna(0, inplace=True)
traindf['NU_NOTA_REDACAO'].fillna(0, inplace=True)
traindf['NU_NOTA_LC'].fillna(0, inplace=True)
traindf['NU_NOTA_MT'].fillna(0, inplace=True)
testdf['NU_NOTA_CN'].fillna(0, inplace=True)
testdf['NU_NOTA_CH'].fillna(0, inplace=True)
testdf['NU_NOTA_REDACAO'].fillna(0, inplace=True)
testdf['NU_NOTA_LC'].fillna(0, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="_-y11zQsdIhB" outputId="95348c8c-a27e-454f-db87-cef27fcd5d08"
traindf[features].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="5phxuN2tT-rF" outputId="392aeca7-b0fc-45ad-881a-f754b6491e82"
traindf['NU_NOTA_MT'].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="JmSJppaVdZ1b" outputId="691f333d-9d8f-4d0e-c749-3962cae633ac"
testdf[features].isnull().sum()
# + [markdown] colab_type="text" id="WioV4lWsdk1R"
# ## Train Test Split
# + [markdown] colab_type="text" id="7285MpboD3kN"
# Nesta etapa os dados serão separados da target feature, ou seja, aquela que se deseja prever, além de serem separados em dois conjuntos, um de treino e outro de teste.
#
# Neste experimento os dados serão separados em uma proporção 80/20, ou seja, 80% das amostras do dataset serão selecionadas para o conjunto de treino e os outros 20% para o conjunto de teste, que será usado para validação do modelo.
# + [markdown] colab_type="text" id="QwX-oSkqJ5Q1"
# ### Separação das features do target
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="QyVx0yuCFNgm" outputId="11b2734e-f86c-42b1-8f38-5e5a62151cf7"
X = traindf[features]
X.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="wG0RW1BzdbTw" outputId="e0bdd136-ee95-48a9-fe4b-c7194c2506de"
y = traindf['NU_NOTA_MT']
y.head()
# + [markdown] colab_type="text" id="-4bk58nvKBam"
# ### Separação dos dados em treino e teste
# + colab={} colab_type="code" id="Ck6pPklVFDOw"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# + [markdown] colab_type="text" id="QJ2a1RrlKGK8"
# ### Normalização e padronização das escalas das features
# + colab={} colab_type="code" id="hG7XAP3aJ0oz"
sc = StandardScaler()
sc.fit(X_train)
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)
# + [markdown] colab_type="text" id="HHSUqMXDtcw_"
# ## Regression
# + [markdown] colab_type="text" id="g_C86yZLtif3"
# ### Linear regression
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="uXRAo5SofwEh" outputId="2d7b9594-c67d-4215-957f-39ade2d9510d"
lr = LinearRegression()
lr.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="Kbj4HNvDGRtQ" outputId="95a74a4b-5616-467c-952e-1d3392f1f7e6"
# To retrieve the intercept and retrieving the slope
lr.intercept_, lr.coef_
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="RLx3biKgGf0v" outputId="1218d9e4-0a66-4ffe-c9d8-c01a7b26f340"
y_pred = lr.predict(X_test)
y_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="MzpYSspDMtz5" outputId="a678df5c-29f1-4cd0-dfb9-a29982b39459"
y_test
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="lNkDmiIRGsIR" outputId="2bea14ca-281a-4b62-9b95-28fd29098803"
r2_score(y_test, y_pred)
# + [markdown] colab_type="text" id="vKyPWyOhtoGf"
# ### Random Forest regression
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="cedXmr1-LABT" outputId="8280dab1-516b-453f-ed6c-d7ffd9c24523"
regr = RandomForestRegressor(max_depth=6, n_estimators=50, random_state=0)
regr.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="SqiOz78bOZCA" outputId="b2b6952a-28df-4d50-96b0-3e3837c9359f"
regr.feature_importances_
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="mt_U9sX3Obh-" outputId="ca8fe698-ddff-4d74-c06d-f17fdc899a42"
y_pred = regr.predict(X_test)
y_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="aCNlws2xOhrt" outputId="68ca650a-2b4e-4c64-c8d1-05fad6011f6c"
r2_score(y_test, y_pred)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="EeRAwbZbSOe6" outputId="083c7e05-c1f0-49db-8c7d-b33ecdfdf8b5"
y_pred = regr.predict(testdf[features])
y_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="clgIn5PuYj62" outputId="eff391e1-2a1b-4387-d6f4-5e6638e244e1"
answerdf['NU_NOTA_MT'] = y_pred
answerdf.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="gl6GGzyXa2Tx" outputId="b1663a8b-e57b-4886-e5f9-99c8efec0722"
testdf.shape, answerdf.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="PhmGSh-ra-4R" outputId="400b5ece-7499-4109-b81e-68650e87ffd2"
answerdf.describe()
# + colab={} colab_type="code" id="b7xOx1F9aOhL"
answerdf.to_csv('answer.csv', index=False)
# + [markdown] colab_type="text" id="0s7P_Y3OtNaO"
# ### Random Forest with Grid Search Cross-Validation
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="eXpImbNcajoQ" outputId="8ea2a108-c447-4944-de13-91fe5984d4aa"
# Perform Grid-Search
gsc = GridSearchCV(
estimator=RandomForestRegressor(),
param_grid={
'max_depth': range(3,7),
'n_estimators': (10, 50, 100, 1000),
},
cv=5, scoring='r2', verbose=0, n_jobs=-1)
grid_result = gsc.fit(X, y)
best_params = grid_result.best_params_
rfr = RandomForestRegressor(max_depth=best_params["max_depth"], n_estimators=best_params["n_estimators"], random_state=False, verbose=False)
# Perform K-Fold CV
scores = cross_val_score(rfr, X, y, cv=10, scoring='r2')
scores
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="l6E_YXoslyaC" outputId="a0ba318a-124b-4507-a10f-b57279f6772e"
scores.mean() * 100
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="SI86EMxbi2_n" outputId="375e1fea-97b5-4008-f4c4-37318c8be213"
rfr.fit(traindf[features], traindf['NU_NOTA_MT'])
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="VykoYowvg-N6" outputId="3ee93bb2-aba3-43f0-fa03-dc69b18b2d2d"
y_pred = rfr.predict(testdf[features])
y_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="ZEd3ucgmhDwx" outputId="fcad904f-4c47-4cf3-a171-806bd51548a6"
answerdf['NU_NOTA_MT'] = y_pred
answerdf.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="azVLWDwjlEGs" outputId="394d1ae2-9073-4c6d-9ed6-da5d63003b68"
answerdf.describe()
# + colab={} colab_type="code" id="-nNgc0HBhIx6"
answerdf.to_csv('answer.csv', index=False, float_format='%.1f')
# + colab={} colab_type="code" id="wcteIt8mkPC_"
| module_8/challenge/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Principal Component Analysis
# * `dim1` -- PCA with given number of components
# * `dim2` -- PCA Minka's MLE
# * `dim3` -- PCA required Explained Variance
# %load_ext autoreload
# %autoreload 2
# add path
import sys; import os; sys.path.append(os.path.realpath("../"))
# demo datasets
from datasets.demo1 import X_train, Y_train, fold_ids, X_valid, Y_valid, meta as meta_data
#meta_data
# +
# transformer implementations
typ = 'dim4'
if typ is 'dim1':
from verto.dim1 import trans, meta
trans.set_params(**{'pca__n_components': 3})
elif typ is 'dim3':
from verto.dim3 import trans, meta
trans.set_params(**{'required_ev': 0.8}) # slowly increase from 0.1 towards 100%
else:
tmp = __import__("verto."+typ, fromlist=['trans', 'meta'])
trans = tmp.trans
meta = tmp.meta
# -
meta
# ## Transform
# %%time
trans.fit(X_train, Y_train)
# %%time
X_new = trans.transform(X_train)
from seasalt import create_feature_names
feature_names = create_feature_names(meta['feature_names_prefix'], X_new.shape[1])
print(feature_names)
import pandas as pd
df_new = pd.DataFrame(data=X_new, columns=feature_names)
# ## Evaluate
# - check if the PCA components are "good" predictors
# - eyeball the p-values of the logistic regression coefficients
df_new.head()
import statsmodels.api as sm
#lr = sm.Logit(Y_train, sm.add_constant(X_new)).fit()
lr = sm.Logit(Y_train, sm.add_constant(X_new)).fit_regularized(method='l1', alpha=.5)
print(lr.summary())
| nbs/dim - PCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
xls = pd.ExcelFile("./data/Fulldata.xlsx")
data = pd.read_excel(xls)
print(data.shape)
#4204 site 1 the one used
snapselected = "4204"
selected = []
for name in data["name"]:
if name.find(snapselected) > 0:
l = list(data[data["name"] == name].values[0])
selected.append(l)
for n in data.columns:
print("%8s "%(n), end="")
print("")
for v in selected:
for i, sv in enumerate(v):
if i == 0:
print("%8s "%(sv[-8:]), end="")
else:
print("%8.4f "%(sv), end="")
print("")
# +
# In pratica questi sono i vari siti individuati nello snapshot della dinamica
# -
print(data.columns)
# +
# queste sono quelle autocalcolate
# linearity
# planarity
# sphericity
# anisotropy
import seaborn as sns
corr = data.corr().abs()
s = corr.unstack()
so = s.sort_values(kind="quicksort", ascending=False)
for s in so.index:
if so[s] < 1.0 and so[s] > 0.7:
print (s, so[s])
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns)
# +
first = {}
N = 200
for cname in data.columns:
if cname != "name":
first[cname] = data.sort_values(by=[cname], ascending=False).head(N)
print("Looking for ", snapselected)
for k in first:
d =first[k]
for name in d["name"]:
if name.find(snapselected) > 0:
print(name, " in ", k)
# -
N = 10
print(first["sscore"].sort_values(by=["anisotropy"], ascending=False).head(N))
# vediamo i valori per il sito selezionato che dovrebbe essere e200k_4204.jrun_site_2
print(data[data["name"] == "e200k_4204.jrun_site_1"])
# +
# sitescore > 0.8, exposure < 0.49, enclosure < 0.78, phobic/philic balance ≈ 1.0.
filter1 = data[data["sscore"] > 0.8]
filter12 = filter1[filter1["sexposure"] < 0.49]
filter123 = filter12[filter12["senclosure"] < 0.79]
filter1234 = filter123[filter123["sbalance"] < 1.3]
filter1234 = filter1234[filter1234["sbalance"] > 0.7]
# in realta' per i filtri devo usare valori leggermente diversi per farlo rientrare
print("There are ", len(filter1234))
for name in filter1234["name"]:
if name.find(snapselected) >= 0:
print(name, " found ")
# +
import matplotlib.pyplot as plt
import numpy
fig = plt.figure(figsize = (15,5))
ax = fig.add_axes([0,0,1,1])
ax.set_title('Histogram geometric parameters')
ax.set_xlabel('Bins')
ax.set_ylabel('Value')
for ln in ["planarity", "linearity", "anisotropy", "sphericity"]:
plt.hist(filter1234[ln].values, bins = 20, label=ln)
print("range for ", ln , " is ", max(filter1234[ln].values), min(filter1234[ln].values))
print(" ",numpy.mean(filter1234[ln].values))
print(data[data["name"] == "e200k_4204.jrun_site_1"])
plt.show()
# +
#N = 10
#print(filter1234.sort_values(by=["planarity"], ascending=False).head(N))
# posso immaginare una cosa del genere, selezione in base ai filtri e poi
# prendo tutti i siti con valore di sfericità planarità e anisotropia maggiori del valore medio
# planarity > medio sphericity > medio
finalselection = filter1234[filter1234["planarity"] > numpy.mean(filter1234["planarity"].values)]
finalselection = finalselection[finalselection["sphericity"] > numpy.mean(filter1234["sphericity"].values)]
#finalselection = finalselection[finalselection["anisotropy"] > numpy.mean(filter1234["anisotropy"].values)]
print(len(finalselection))
print(finalselection.sort_values(by=["sscore"], ascending=False))
# +
conformers = set()
for n in finalselection["name"].values:
conformer = n.replace("e200k_", "").split(".")[0]
conformers.add(conformer)
print("How many conformers ", len(conformers))
# -
for n in finalselection["name"].values:
print(n)
# +
# check new data sites
import pandas as pd
import numpy
import matplotlib.pyplot as plt
data = pd.read_csv("./data/newsites.csv")
for ln in ["planarity", "linearity", "anisotropy", "sphericity"]:
plt.hist(data[ln].values, bins = 20, label=ln)
print("range for ", ln , " is ", max(data[ln].values), min(data[ln].values))
print(" ",numpy.mean(data[ln].values))
print(data[data["name"] == "e200k_4204_mini_site_1.pdb"])
#0.11096 0.54185 0.34719 0.65281
# senclosure sbalance linearity planarity sphericity anisotropy
#2832 0.78115 0.71482 0.11096 0.54185 0.34719 0.65281
# -
| checkdata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # Categorical Data Plots
#
# Now let's discuss using seaborn to plot categorical data! There are a few main plot types for this:
#
# * catplot
# * boxplot
# * violinplot
# * stripplot
# * swarmplot
# * barplot
# * countplot
#
# Let's go through examples of each!
import seaborn as sns
# %matplotlib inline
tips = sns.load_dataset('tips')
tips.head()
# ## barplot and countplot
#
# These very similar plots allow you to get aggregate data off a categorical feature in your data. **barplot** is a general plot that allows you to aggregate the categorical data based off some function, by default the mean:
sns.barplot(x='sex',y='total_bill',data=tips)
import numpy as np
# You can change the estimator object to your own function, that converts a vector to a scalar:
sns.barplot(x='sex',y='total_bill',data=tips,estimator=np.std)
# ### countplot
#
# This is essentially the same as barplot except the estimator is explicitly counting the number of occurrences. Which is why we only pass the x value:
sns.countplot(x='sex',data=tips)
sns.countplot(x='smoker', data=tips)
# ## boxplot and violinplot
#
# boxplots and violinplots are used to shown the distribution of categorical data. **A box plot (or box-and-whisker plot) shows the distribution of quantitative data in a way that facilitates comparisons between variables or across levels of a categorical variable**. The box shows the quartiles of the dataset while the whiskers extend to show the rest of the distribution, except for points that are determined to be “outliers” using a method that is a function of the inter-quartile range.
sns.boxplot(x="day", y="total_bill", data=tips,palette='rainbow')
# Can do entire dataframe with orient='h'
sns.boxplot(data=tips,palette='rainbow',orient='h')
sns.boxplot(x="day", y="total_bill", hue="smoker",data=tips, palette="coolwarm")
sns.boxplot(x='day', y='total_bill', data=tips, hue='sex', palette='husl')
# ### violinplot
# A violin plot plays a similar role as a box and whisker plot. It shows the distribution of quantitative data across several levels of one (or more) categorical variables such that those distributions can be compared. Unlike a box plot, in which all of the plot components correspond to actual datapoints, the violin plot features a kernel density estimation of the underlying distribution.
sns.violinplot(x="day", y="total_bill", data=tips,palette='rainbow')
sns.violinplot(x="day", y="total_bill", data=tips,hue='sex',palette='Set1')
# to join halves of both sexes
sns.violinplot(x="day", y="total_bill", data=tips,hue='sex',split=True,palette='Set1')
# ## stripplot and swarmplot
# The **stripplot will draw a scatterplot where one variable is categorical**. A strip plot can be drawn on its own, but it is also a good complement to a box or violin plot in cases where you want to show all observations along with some representation of the underlying distribution.
#
# The **swarmplot is similar to stripplot(), but the points are adjusted (only along the categorical axis) so that they don’t overlap**. This gives a better representation of the distribution of values, although it does not scale as well to large numbers of observations (both in terms of the ability to show all the points and in terms of the computation needed to arrange them).
sns.stripplot(x="day", y="total_bill", data=tips)
sns.stripplot(x="day", y="total_bill", data=tips,jitter=True)
sns.stripplot(x="day", y="total_bill", data=tips,jitter=True,hue='sex',palette='Set1')
sns.stripplot(x='day', y='total_bill', data=tips, jitter=True, hue='smoker', palette='Set1')
sns.stripplot(x="day", y="total_bill", data=tips,jitter=True,hue='sex',palette='Set1',split=True)
# Just like combining a scatterplot, stripplot and a violinplot
# PS: Don't use on very large datasets
sns.swarmplot(x="day", y="total_bill", data=tips)
sns.swarmplot(x="day", y="total_bill",hue='sex',data=tips, palette="Set1", dodge=True)
# ### Combining Categorical Plots
sns.violinplot(x="tip", y="day", data=tips,palette='rainbow')
sns.swarmplot(x="tip", y="day", data=tips,color='black',size=3)
# ## catplot
#
# catplot is the most general form of a categorical plot. It can take in a **kind** parameter to adjust the plot type:
sns.catplot(x='sex',y='total_bill',data=tips,kind='bar')
# #### <font color= #FF4B2B> PS: Use easy to read plots (such as barplots, countplots, violinplots) when presenting to the layman. </font>
# # Great Job!
| Section 9. Seaborn Exercises/Categorical Plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # synpuf disclosure risk
#
# Compare synthetic PUFs trained from a 50% sample, both to the training set and the remaining 50% holdout. Synthetic file (1) is from synthimpute random forests; (2) is from the synthpop R package.
# ## Setup
#
# ### Imports
# +
import pandas as pd
import numpy as np
import synthimpute as si
from scipy import stats
import matplotlib as mpl
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
# -
# ### Graphing
# +
# # !wget https://github.com/MaxGhenis/random/raw/master/Roboto-Regular.ttf -P /usr/local/lib/python3.6/dist-packages/matplotlib/mpl-data/fonts/ttf
# mpl.font_manager._rebuild()
# +
sns.set_style('white')
DPI = 200
mpl.rc('savefig', dpi=DPI)
mpl.rcParams['figure.dpi'] = DPI
mpl.rcParams['figure.figsize'] = 6.4, 4.8 # Default.
mpl.rcParams['font.sans-serif'] = 'Roboto'
mpl.rcParams['font.family'] = 'sans-serif'
# Set title text color to dark gray (https://material.io/color) not black.
TITLE_COLOR = '#212121'
mpl.rcParams['text.color'] = TITLE_COLOR
# Axis titles and tick marks are medium gray.
AXIS_COLOR = '#757575'
mpl.rcParams['axes.labelcolor'] = AXIS_COLOR
mpl.rcParams['xtick.color'] = AXIS_COLOR
mpl.rcParams['ytick.color'] = AXIS_COLOR
# -
# ### Load data
rf_synth = pd.read_csv('~/Downloads/puf_synth_50p_sample.csv')
synthpop = pd.read_csv('~/Downloads/puf_synthpop_50p_sample.csv')
train = pd.read_csv('~/Downloads/puf_50p_sample_train.csv')
test = pd.read_csv('~/Downloads/puf_50p_sample_test.csv')
nearest_rf = pd.read_csv('~/Downloads/nearest_rf_50p.csv')
nearest_synthpop = pd.read_csv('~/Downloads/nearest_synthpop_50p.csv')
# ## Examine results
nearest_rf.sort_values('train_dist').head()
nearest_rf.sort_values('dist_diff').head()
nearest_synthpop.sort_values('train_dist').head()
# ## Analysis
nearest_rf[['train_dist', 'test_dist', 'dist_diff']].describe()
nearest_synthpop[['train_dist', 'test_dist', 'dist_diff']].describe()
stats.ttest_1samp(nearest_rf.dist_diff, 0)
stats.ttest_1samp(nearest_synthpop.dist_diff, 0)
# ## Charts
# ### Scatterplots of distance to train and to test
#
# Note we can't use log scales because of the exact zeros.
ax = nearest_rf.plot.scatter('train_dist', 'test_dist', alpha=0.15, linewidths=0.0)
# 45-degree line.
ax.plot(ax.get_xlim(), ax.get_ylim(), ls='--')
sns.despine(left=True, bottom=True)
ax.grid(color='#eeeeee')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax.set(xlabel='Shortest distance to a training record',
ylabel='Shortest distance to a test record')
plt.title('Distance from synthesis to nearest training and test records (RF 50%)',
loc='left')
plt.show()
ax = nearest_synthpop.plot.scatter('train_dist', 'test_dist', alpha=0.15, linewidths=0.0)
# 45-degree line.
ax.plot(ax.get_xlim(), ax.get_ylim(), ls='--')
sns.despine(left=True, bottom=True)
ax.grid(color='#eeeeee')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
ax.set(xlabel='Shortest distance to a training record',
ylabel='Shortest distance to a test record')
plt.title('Distance from synthesis to nearest training and test records (synthpop 50%)',
loc='left')
plt.show()
# ### Boxplot of distances
distances_stacked = pd.DataFrame({
'type': 'train',
'model': 'RF',
'dist': nearest_rf.train_dist
}).append(pd.DataFrame({
'type': 'test',
'model': 'RF',
'dist': nearest_rf.test_dist
})).append(pd.DataFrame({
'type': 'train',
'model': 'synthpop',
'dist': nearest_synthpop.train_dist
})).append(pd.DataFrame({
'type': 'test',
'model': 'synthpop',
'dist': nearest_synthpop.test_dist
}))
f, ax = plt.subplots(figsize=(7, 7))
ax.set(yscale='log')
sns.boxplot(x='type', y='dist',
hue='model', data=distances_stacked)
sns.despine(left=True, bottom=True)
ax.yaxis.grid(color='#eeeeee')
plt.title('Nearest distances from synthetic records to train and test, by model')
plt.show()
# ### CDFs of distances
# Plot the four series.
BINS = 1000000
ax = nearest_rf.train_dist.hist(cumulative=True, density=1, bins=BINS, color='#021aee', histtype='step')
nearest_rf.test_dist.hist(cumulative=True, density=1, bins=BINS, histtype='step', color='#021aee', ls='--')
nearest_synthpop.train_dist.hist(cumulative=True, density=1, bins=BINS, histtype='step', color='#7a7a7a')
nearest_synthpop.test_dist.hist(cumulative=True, density=1, bins=BINS, histtype='step', color='#7a7a7a', ls='--')
# Formatting.
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda y, _: '{:.0%}'.format(y)))
ax.grid(color='#eeeeee')
sns.despine(left=True, bottom=True)
plt.xscale('log')
plt.legend(['RF train', 'RF test', 'synthpop train', 'synthpop test'])
ax.set(xlabel='Shortest distance from synthetic record to train/test record',
ylabel='Share of synthetic records')
plt.title('CDF of shortest distances from synthesis to train/test (from 50% PUF samples)')
plt.show()
# ### Exact matches
exact_matches = pd.DataFrame({
'type': ['train', 'train', 'test', 'test'],
'model': ['RF', 'synthpop', 'RF', 'synthpop'],
'share_exact': [(nearest_rf.train_dist == 0).mean(),
(nearest_synthpop.train_dist == 0).mean(),
(nearest_rf.test_dist == 0).mean(),
(nearest_synthpop.test_dist == 0).mean()]})
exact_matches
ax = sns.barplot(x='type', y='share_exact', hue='model', data=exact_matches)
sns.despine(left=True, bottom=True)
ax.yaxis.grid(color='#eeeeee')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda y, _: '{:.0%}'.format(y)))
plt.title('Share of synthetic records exactly matching a train or test record, by model')
plt.show()
exact_matches_bool = pd.DataFrame({
'type': 'train',
'model': 'RF',
'share_exact': (nearest_rf.train_dist == 0)
}).append(pd.DataFrame({
'type': 'test',
'model': 'RF',
'share_exact': (nearest_rf.test_dist == 0)
})).append(pd.DataFrame({
'type': 'train',
'model': 'synthpop',
'share_exact': (nearest_synthpop.train_dist == 0)
})).append(pd.DataFrame({
'type': 'test',
'model': 'synthpop',
'share_exact': (nearest_synthpop.test_dist == 0)
}))
ax = sns.barplot(x='type', y='share_exact', hue='model', data=exact_matches_bool)
sns.despine(left=True, bottom=True)
ax.yaxis.grid(color='#eeeeee')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda y, _: '{:.0%}'.format(y)))
plt.title('Share of synthetic records exactly matching a train or test record, by model')
plt.show()
# What share of exact matches to train also exactly matched a test?
# +
def share_exact_matches(df):
exact_train = df[df.train_dist == 0]
return((exact_train.test_dist == 0).sum() / exact_train.shape[0])
share_exact_matches(nearest_rf)
# -
share_exact_matches(nearest_synthpop)
| analysis/disclosure/.ipynb_checkpoints/disclosure_risk_10p_sample-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Copyright 2018 MD.ai, Inc.
# Licensed under the Apache License, Version 2.0*
#
# # Create additional annotations using the MD.ai Annotator
# #### Pneumonia Detection Challenge: https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
#
# For further data exploration and to create your own additional annotations, clone the MD.ai project at:
# https://public.md.ai/annotator/project/LxR6zdR2.
#
# You can then create your own team, add new labels and additional annotations. The “Users“ tab will allow you to create teams and assign exams to team members. You can track progress and export your new annotations in JSON format.
#
# Further instructions and videos are available at https://docs.md.ai.
# + [markdown] colab_type="text" id="z0i1JLTF8_Pu"
# ### Clone the Annotator Project
#
# RSNA Pneumonia Detection Challenge Annotator project URL:
# https://public.md.ai/annotator/project/LxR6zdR2/workspace.
#
# To add annotations to the cloned project, you need to clone the project first.
#
# First, navigate to the original project URL (above), click on "Clone Project" button.
#
# 
#
# + [markdown] colab_type="text" id="FdivV_kX8_PN"
# **Intro to deep learning for medical imaging lessons**
#
# - Lesson 1. Classification of chest vs. adominal X-rays using TensorFlow/Keras [Github](https://github.com/mdai/ml-lessons/blob/master/lesson1-xray-images-classification.ipynb) [Annotator](https://public.md.ai/annotator/project/PVq9raBJ)
#
# - Lesson 2. Lung X-Rays Semantic Segmentation using UNets. [Github](https://github.com/mdai/ml-lessons/blob/master/lesson2-lung-xrays-segmentation.ipynb)
# [Annotator](https://public.md.ai/annotator/project/aGq4k6NW/workspace)
#
# - Lesson 3. RSNA Pneumonia detection using Kaggle data format [Github](https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-kaggle.ipynb) [Annotator](https://public.md.ai/annotator/project/LxR6zdR2/workspace)
#
# - Lesson 3. RSNA Pneumonia detection using MD.ai python client library [Github](https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-mdai-client-lib.ipynb) [Annotator](https://public.md.ai/annotator/project/LxR6zdR2/workspace)
#
# - MD.ai python client libray URL: https://github.com/mdai/mdai-client-py
# - MD.ai documentation URL: https://docs.md.ai/
# + colab={} colab_type="code" id="lVBBH_EF8_PU"
import os
import sys
import json
import pydicom
import pandas as pd
# + [markdown] colab_type="text" id="0a1bLOhX8_PW"
# ### Import the `mdai` library
#
# Run the block below to install the `mdai` client library into your python environment.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="cYvDgipk8_PX" outputId="28c7fa59-b481-4b40-dfe1-3e2f49cbb9a5"
# !pip install --upgrade --quiet mdai
import mdai
mdai.__version__
# + colab={} colab_type="code" id="WZQjgjnK8_Pb"
# Root directory of the project
ROOT_DIR = os.path.abspath('./lesson3-data')
# + [markdown] colab_type="text" id="RrVjmqtJ8_Po"
# ### Create an `mdai` client
#
# The mdai client requires an access token, which authenticates you as the user. To create a new token or select an existing token, navigate to the "Personal Access Tokens" tab on your user settings page at the specified MD.ai domain (e.g., public.md.ai).
#
# **Important: keep your access tokens safe. Do not ever share your tokens.**
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="XB28YDKw8_Pq" outputId="68eed3fe-121f-4966-f53d-bd734025e262"
mdai_client = mdai.Client(domain='public.md.ai', access_token="MY_PERSONAL_ACCESS_TOKEN")
# + [markdown] colab_type="text" id="Zl509Hs38_Pu"
# ### Define project
#
# Define a project you have access to by passing in the project id. The project id can be found in the URL in the following format: `https://public.md.ai/annotator/project/{project_id}`.
#
# For example, `project_id` would be `XXXX` for `https://public.md.ai/annotator/project/XXXX`.
#
# Specify optional `path` as the data directory (if left blank, will default to current working directory).
# + colab={} colab_type="code" id="uXUv3t_98_Pv"
# use cloned project_id!
CLONED_PROJECT_ID = 'EoBKoMBG'
p = mdai_client.project(project_id=CLONED_PROJECT_ID, path=ROOT_DIR)
# + [markdown] colab_type="text" id="PXiRWnxX8_Pz"
# ## Prepare data
#
# ### Grab the label ids. You'll need these to create a label dictionary.
# + colab={"base_uri": "https://localhost:8080/", "height": 107} colab_type="code" id="EDecTsz28_P0" outputId="aeec82e1-7a0b-477f-fd37-6ec1a5a7b4df"
p.show_label_groups()
# + [markdown] colab_type="text" id="gB22GyWU8_P4"
# ### Set label ids
#
# Selected label ids must be explicitly set by `Project#set_label_ids` method in order to prepare datasets.
# -
# ## Note: Your label ids and dataset ids will be different. Use show_label_groups() and show_datasets() to find your specific ids.
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="ChuEtYTz8_P4" outputId="67326c39-cc58-414b-9189-f9337e24a9f2"
# this maps label ids to class ids
# make sure this matches the Kaggle dataset
# target = 0: No Lung Opacity
# target = 1: Lung Opacity
labels_dict = {
'L_Wdjx2B':0, # target = 0, background
'L_NBy1aB':1, # target = 1, lung opacity
}
print(labels_dict)
p.set_labels_dict(labels_dict)
# -
# ### Use this formula to find your specific dataset id and use it to load dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="dFJRPTL18_P8" outputId="a198f18a-ab68-4eda-d28e-af888206d35e"
p.show_datasets()
# + colab={} colab_type="code" id="1K-f6JdP8_QB"
dataset = p.get_dataset_by_id('D_gEX5do')
dataset.prepare()
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="xBIVY6IL8_QE" outputId="833066ff-317a-4c9d-f38b-1e7c9269b16d"
dataset.show_classes()
# +
# generate kaggle labels format (see, stage_1_train_labels.csv)
# use dataset object from above
image_ids = dataset.get_image_ids()
kaggle_data = []
for image_id in image_ids:
ds = pydicom.dcmread(image_id)
anns = dataset.get_annotations_by_image_id(image_id)
for ann in anns:
labelId = ann['labelId']
target = int(dataset.label_id_to_class_id(labelId))
if target == 0:
kaggle_data.append((ds.PatientID, None, None, None, None, target))
elif target == 1:
x = ann['data']['x']
y = ann['data']['y']
height = ann['data']['height']
width = ann['data']['width']
kaggle_data.append((ds.PatientID, x, y, height, width, target))
else:
raise ValueError('Target {} is invalid.'.format(target))
kaggle_df = pd.DataFrame(kaggle_data, columns=['patientId', 'x', 'y', 'width', 'height', 'Target'])
# -
kaggle_df.to_csv('stage_1_train_cloned.csv')
kaggle_df.head()
# !cat stage_1_train_cloned.csv
# **At this point, you could merge the training labels in this csv with the original training labels csv (i.e., stage_1_train_labels.csv); either using the command line, or read the csv data via pandas and merge the data.**
| pneumonia-detection-clone-project-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="N6vYLtten6yz"
# # Telegram Bot
#
# Simple Bot to save incoming messages.
# You will find it at https://**t**.me/messagecollector_bot
# + colab={"base_uri": "https://localhost:8080/"} id="15lFy32z6tWh" outputId="024fab09-239d-42cd-fd59-aa268e566c39"
# !pip install python-telegram-bot==12.7 pymongo dnspython==2.0.0 pyTelegramBotAPI==4.0.1 pandas jsonpickle
# + id="RMVgPZnpoMNA"
# importing all dependancies
import logging
import os
import telebot
from telegram import ParseMode
from telegram.ext import CallbackContext, Updater, CommandHandler, JobQueue, Dispatcher
import pymongo
import json
import pandas as pd
import jsonpickle
# + id="kudZp8fgoPs3"
#Override environment variables on dev environment if you test the bot
# Getting environment variables from Heroku configs if not overriden
BOT_TELEGRAM_API_TOKEN = os.environ.get('botKey', "<KEY>")
BOT_MONGODB_CONECTION_URL = os.environ.get('mongodbConnectionURL', "mongodb+srv://botaUser:botaPassword@botscluster.j5qlm.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
BOT_DATABASE_NAME = os.environ.get('databaseName', "TelegramBotForAstoneshi")
# + id="fzWhFKltoSJ1"
# Initialize logging for debugging purpose
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s',level=logging.INFO)
logger = logging.getLogger(__name__)
# + id="LN_KLn0Hrcwf"
# Database Class
class Database:
# constructor
def __init__(self):
self.connectionURL = BOT_MONGODB_CONECTION_URL
self.databaseName = BOT_DATABASE_NAME
self.dbClient = None
# connect to the database
def connect(self):
try:
if not self.dbClient:
logger.info("Database Client initialized.")
self.dbClient = pymongo.MongoClient(self.connectionURL)
database = self.dbClient[str(self.databaseName)]
if database:
logger.info("Database Connected.")
return database
else:
logger.info("Database Connection failed.")
return None
else:
logger.info("Database Client Connection failed.")
return None
except Exception as er:
logger.error(er)
# + id="6gKf6BMPsnxf"
# Message Class
class Message:
# message constructor
def __init__(self, dbConnection):
self.dbConnection = dbConnection
# save message object
def save_message(self, messageObj):
try:
if self.dbConnection:
self.messagesCollection = self.dbConnection["messages"]
if self.messagesCollection.insert_one(messageObj):
logger.info("Message saved in Database")
return True
else:
logger.error("Failed to save message on database")
return False
else:
logger.error("Database connection error")
return False
except Exception as er:
logger.error(er)
return False
# + colab={"base_uri": "https://localhost:8080/"} id="IeJsiyPhBKHO" outputId="2a83a7d5-610c-4e55-9779-be1e343713f2"
# Initializing database
db = Database()
dbConnection = db.connect()
# Initializing a message object
messageContent = Message(dbConnection)
# + id="Cimi9PZcs_f2"
# initialize the bot
bot = telebot.TeleBot(BOT_TELEGRAM_API_TOKEN, parse_mode="markdown")
# + id="d8k1Ckz7oWGE"
# Function to catch incomming command /about
@bot.message_handler(commands=['about'])
def about(message):
try:
bot.reply_to(message, "This is a sample Telegram bot. This bot will store incoming messages in a database")
except Exception as e:
logger.error(e)
pass
# Function to catch incomming command /help
@bot.message_handler(commands=['help'])
def help(message):
try:
bot.reply_to(message, "Send a message. Then have a look https://github.com/w3gen/TelegramBotForAstoneshi")
except Exception as e:
logger.error(e)
pass
# catch all messages and save in database
@bot.message_handler(func=lambda m: True)
def echo_all(message):
try:
#bot.reply_to(message, message.text)
messageObj = {
"chat_id": message.chat.id,
"message_id": message.message_id,
"date": message.date,
"type": message.chat.type,
"text": message.text,
"user": message.from_user.id,
"username": message.from_user.username,
"first_name": message.from_user.first_name,
"last_name": message.from_user.last_name
}
messageContent.save_message(messageObj)
except Exception as e:
logger.error(e)
pass
# + id="riQJ53B4MTNu"
# this function will send a message to a specific chat ID
def sendMessageViaChatId(chat_id, txt):
bot.send_message(chat_id, txt)
# + id="GlTdOVrdLzGM"
sendMessageViaChatId(-1001545752396, "Hi") # 1664758714 is the chat ID (For private messages, group ID = Chat ID)
# + colab={"base_uri": "https://localhost:8080/"} id="b6Neolbr3cO7" outputId="8ee35566-13c9-4bd8-a7e8-7cb2f2496b25"
# start polling to continuously listen for messages
bot.polling()
# gracefully stop the bot after ctrl + c
bot.stop_polling()
# + [markdown] id="FQkdDqlOrUrU"
#
#
# ---
#
#
# # View Data
#
# View collected data from the Bot. (First stop the bot to view data)
# + id="PwAXKOoNXsWj"
def viewData():
# getting database connection
messagesCollection = dbConnection["messages"]
# Make a query to the specific DB and Collection
cursor = messagesCollection.find()
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
return df
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="y5aorekCr3Z8" outputId="2d00e445-28ac-4886-d6f7-e251026ee0bb"
viewData()
| TelegramBotForAstoneshi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''py38'': conda)'
# language: python
# name: python3
# ---
import torch
y = torch.rand(4, 4)
print(y)
z = torch.rand(4, 4)
print(z)
x = torch.rand(4, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(4, 4)
print(y)
z = torch.rand(4, 4)
print(z)
x = torch.rand(4, 1)
mask = x.ge(0.5)
print(mask)
y = y.masked_scatter(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 3, 3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 3, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 1, 3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(1, 3, 3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(1, 1, 3)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(1, 3, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(3, 1, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(3, 3, 3)
print(z)
x = torch.rand(1, 1, 1)
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
y = torch.rand(3, 3, 3)
print(y)
z = torch.rand(5, 3)
print(z)
x = torch.tensor([True, False, False])
mask = x.ge(0.5)
print(mask)
y.masked_scatter_(mask, z)
print(y)
import numpy as np
# +
x = np.array([[1], [2], [3]])
y = np.array([4, 5, 6])
b = np.broadcast(x, y)
print(b)
out = np.empty(b.shape)
print(out)
# -
a1 = np.array([[2],[2], [3]])
a2 = np.array([1,3,4])
print(a1 + a2)
| pythonExample/torch_masked_scatter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro. to Snorkel: Extracting Spouse Relations from the News
# ## Part III: Training an End Extraction Model
#
# In this final section of the tutorial, we'll use the noisy training labels we generated in the last tutorial part to train our end extraction model.
#
# For this tutorial, we will be training a Bi-LSTM, a state-of-the-art deep neural network implemented in [TensorFlow](https://www.tensorflow.org/).
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE
# Note that this is necessary for parallel execution amongst other things...
# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'
from snorkel import SnorkelSession
session = SnorkelSession()
# -
# We repeat our definition of the `Spouse` `Candidate` subclass:
# +
from snorkel.models import candidate_subclass
Spouse = candidate_subclass('Spouse', ['person1', 'person2'])
# -
# We reload the probabilistic training labels:
# +
from snorkel.annotations import load_marginals
train_marginals = load_marginals(session, split=0)
# -
# We also reload the candidates:
train_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all()
dev_cands = session.query(Spouse).filter(Spouse.split == 1).order_by(Spouse.id).all()
test_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all()
# Finally, we load gold labels for evaluation:
# +
from snorkel.annotations import load_gold_labels
L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1)
L_gold_test = load_gold_labels(session, annotator_name='gold', split=2)
# -
# Now we can setup our discriminative model. Here we specify the model and learning hyperparameters.
#
# They can also be set automatically using a search based on the dev set with a [GridSearch](https://github.com/HazyResearch/snorkel/blob/master/snorkel/learning/utils.py) object.
# +
from snorkel.learning.disc_models.rnn import reRNN
train_kwargs = {
'lr': 0.01,
'dim': 50,
'n_epochs': 10,
'dropout': 0.25,
'print_freq': 1,
'max_sentence_length': 100
}
lstm = reRNN(seed=1701, n_threads=None)
lstm.train(train_cands, train_marginals, X_dev=dev_cands, Y_dev=L_gold_dev, **train_kwargs)
# -
# Now, we get the precision, recall, and F1 score from the discriminative model:
p, r, f1 = lstm.score(test_cands, L_gold_test)
print("Prec: {0:.3f}, Recall: {1:.3f}, F1 Score: {2:.3f}".format(p, r, f1))
# We can also get the candidates returned in sets (true positives, false positives, true negatives, false negatives) as well as a more detailed score report:
tp, fp, tn, fn = lstm.error_analysis(session, test_cands, L_gold_test)
# Note that if this is the final test set that you will be reporting final numbers on, to avoid biasing results you should not inspect results. However you can run the model on your _development set_ and, as we did in the previous part with the generative labeling function model, inspect examples to do error analysis.
#
# You can also improve performance substantially by increasing the number of training epochs!
#
# Finally, we can save the predictions of the model on the test set back to the database. (This also works for other candidate sets, such as unlabeled candidates.)
lstm.save_marginals(session, test_cands)
# ##### More importantly, you completed the introduction to Snorkel! Give yourself a pat on the back!
| intro_Z/.ipynb_checkpoints/Intro_Tutorial_3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import dependencies
import pandas as pd
import os
# Read in csv file
data = pd.read_csv('../Resources/cities.csv')
# Convert to DataFrame
data_df = pd.DataFrame(data)
data_df = data_df[["City", "Country", "Lat", "Max Temp","Humidity", "Cloudiness", "Wind Speed"]]
data_df.head()
# save data to html
data_df.to_html('../Resources/data.html', index=False)
| WebVisualizations/csv_to_html.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Required libraries
import configparser
# %load_ext sql
# ### Redshift Configuration
# +
config = configparser.ConfigParser()
config.read('dwh.cfg')
DWH_DB_USER=config.get("CLUSTER", "DB_USER")
DWH_DB_PASSWORD=config.get("CLUSTER", "DB_PASSWORD")
DWH_ENDPOINT=config.get("CLUSTER", "HOST")
DWH_PORT=config.get("CLUSTER", "DB_PORT")
DWH_DB=config.get("CLUSTER", "DB_NAME")
# -
# ### Open Connection in the Redshift
conn_string = "postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT, DWH_DB)
# %sql $conn_string
# ### Top 10 Artist Played
# + language="sql"
# SELECT artists.name,
# Count(songplays.user_id) as Count
# FROM songplays
# LEFT JOIN artists
# ON ( songplays.artist_id = artists.artist_id )
# WHERE songplays.artist_id IS NOT NULL
# GROUP BY artists.name
# ORDER BY Count(songplays.user_id) DESC
# LIMIT 10
# -
# ### Top 10 Music Played
# + language="sql"
# SELECT songs.title,
# Count(songplays.user_id) as Count
# FROM songplays
# LEFT JOIN songs
# ON ( songplays.song_id = songs.song_id )
# WHERE songplays.artist_id IS NOT NULL
# GROUP BY songs.title
# ORDER BY Count(songplays.user_id) DESC
# LIMIT 10
# -
| Data Warehouse/examples_queries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## DCTC DCSC feature extraction demo
# First I import packages:
import sys
import numpy as np
import matplotlib.pyplot as P
from scipy.io import wavfile
from HTKFeat import MFCC_HTK
# ## Speech signal waveform and spectrogram
# +
#samplerate, signal = wavfile.read('file.raw') # sample rate is 16000 in this example
samplerate = 16000
mfcc=MFCC_HTK()
signal = mfcc.load_raw_signal('f1nw0000pes_short.wav')
sig_len = signal.size/samplerate # sig_len is in secs
P.figure(figsize = (15,4))
P.xlim(0, int(sig_len))
t = np.linspace(0, sig_len, signal.size)
P.plot(t, signal)
P.figure(figsize=(15,4))
s = P.specgram(signal, Fs = samplerate, cmap = 'jet')
P.xlim(0, int(sig_len))
# -
# ## DC Removal
signal = signal - np.mean(signal)
# ## Speech frames
# +
# frame shift and frame length in samples
win_shift = 160
win_len = 400
win_num = np.floor((sig_len-win_len)/win_shift).astype('int')+1
wins = []
for w in range(win_num):
s = w * win_shift
e = s + win_len
win = signal[s:e].copy()
wins.append(win)
wins = np.asarray(wins) # frames of speech
# -
# ## Pre-emphasis
for win in wins:
win -= np.hstack((win[0], win[:-1])) * k
# ## Hamming window
hamm = np.hamming(400) # change to Kaiser?
for win in wins:
win *= hamm
# ## FFT
# +
fft_len = np.asscalar(2 ** (np.floor(np.log2(win_len)) + 1).astype('int'))
ffts = []
for win in wins:
win = np.abs(np.fft.rfft(win, n = fft_len)[:-1])
ffts.append(win)
ffts = np.asarray(ffts)
# -
# ## Filtering
# +
low_freq_mel = 0
high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)
fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
for k in range(f_m_minus, f_m):
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
for k in range(f_m, f_m_plus):
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
filter_banks = numpy.dot(pow_frames, fbank.T)
filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks) # Numerical Stability
filter_banks = 20 * numpy.log10(filter_banks) # dB
# 怎么应用这个滤波器?
| DCTC-DCSC-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This notebook will explore model data at the Ocean Networks Canada(ONC) VENUS nodes.
#
# Information about the ONC VENUS nodes can be found here:
# * http://venus.uvic.ca/
# # Resources
# We have lots of resources for working in Python. You can always look here for ideas, suggestions and more details.
#
# * http://salishsea-meopar-docs.readthedocs.org/en/latest/work_env/python_notes.html
#
# # Import Libraries
# We need to import some tools for use in our notebooks. The first few lines below are libraries that are used in many of our notebooks. They are developed by the Python community.
#
# That last few lines are developed in house and reflect commonly needed tasks in our group. The source code is stored in tools/SalishSeaTools/salishsea_tools/
#
#
# +
#Python
import matplotlib.pyplot as plt
import numpy as np
import datetime
import netCDF4 as nc
from dateutil import tz
#in-house
from salishsea_tools import (viz_tools,tidetools,nc_tools)
from salishsea_tools.nowcast import (analyze,figures,research_VENUS)
# %matplotlib inline
# -
# # Plot Map
# Where are the VENUS nodes? Plot them on the model map.
# * We have a function that loads temperature and salinity data from either the central or east node.
# * figures.load_VENUS('Central') or figures.load_VENUS('East')
# * Documentation: http://salishsea-meopar-tools.readthedocs.org/en/latest/SalishSeaTools/nowcast.html#module-nowcast.figures
# * This function returns the temperature and salinity data, as well as the longitude, latitude and depth of the node
# +
#load an example file to grab model_depths
T = nc.Dataset('/data/dlatorne/MEOPAR/SalishSea/nowcast/01feb15/SalishSea_1d_20150201_20150201_grid_T.nc')
model_depths = T.variables['deptht'][:]
#load model grid
fB= nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc')
Y = fB.variables['nav_lat'][:]
X = fB.variables['nav_lon'][:]
bathy = fB.variables['Bathymetry'][:]
#load VENUS at and plot position
datas={}; lons={}; lats={}; depths={};
for key in ('Central','East'):
datas[key] = research_VENUS.load_VENUS(key)
lons[key] = research_VENUS.SITES['VENUS'][key]['lon']
lats[key] = research_VENUS.SITES['VENUS'][key]['lat']
depths[key] = research_VENUS.SITES['VENUS'][key]['depth']
datas[key][['sal','temp']]=datas[key][['sal','temp']].astype(float)
#plotting
fig,ax=plt.subplots(1,1,figsize=(5,5))
viz_tools.plot_coastline(ax,fB,coords='map')
for key in ('Central','East'):
ax.plot(lons[key],lats[key],'o',label=key)
print key, 'depth:', depths[key], '(m)'
ax.set_xlim([-123.5,-123])
ax.set_ylim([48.7,49.5])
ax.legend(loc=0)
viz_tools.set_aspect(ax)
# -
# # Compare model and observations
# +
def truncate(data, t_o,t_f):
#truncate a data between times t_o and t_f
sdt=t_o.replace(tzinfo=tz.tzutc()); edt=t_f.replace(tzinfo=tz.tzutc())
data_trun = data[(data.date< edt) & (data.date >sdt)]
return data_trun
# -
def compare_model_VENUS(station,start_time, end_time):
""" function to compare model tmperature and salinity data with
VENUS observation over start_time and end_time.
Station specifies 'East' or 'Central'
start_time and end_time are datetime objects
returns mean and standard deviation of model and observations and a figure.
"""
#VENUS observations already loaded. But truncat over plotting tim
data_trun = truncate(datas[station],start_time,end_time)
#look up grid points that correspond to VENUS nodes
k = tidetools.find_model_level(depths[station],model_depths)
[j,i] = tidetools.find_closest_model_point(lons[station],lats[station],X,Y,bathy)
#load model data
fnames = analyze.get_filenames(start_time,end_time,'1h', 'grid_T','/data/dlatorne/MEOPAR/SalishSea/nowcast/')
sal, time = analyze.combine_files(fnames,'vosaline',k,j,i)
temp, time = analyze.combine_files(fnames,'votemper',k,j,i)
#plotting
#salinity
fig,axs= plt.subplots(2,1,figsize=(15,10))
ax=axs[0]
ax.plot(time,sal,label='model')
ax.plot(data_trun.date[:],data_trun.sal[:],'-',label='obs')
ax.set_xlim([start_time,end_time])
ax.legend(loc=0)
ax.set_title('Salinity comparison - VENUS {}'.format(station))
ax.set_ylabel('Salinty [PSU]')
#temperature
ax=axs[1]
ax.plot(time,temp,label='model')
ax.plot(data_trun.date[:],data_trun.temp[:],'-',label='obs')
ax.set_xlim([start_time,end_time])
ax.legend(loc=0)
ax.set_title('Temp. comparison - VENUS {}'.format(station))
ax.set_ylabel('Temperature (deg C)')
#statistics
means={'sal': {},'temp':{}}; stds={'sal':{},'temp':{}}
means['sal']['obs']= data_trun.sal.mean(); stds['sal']['obs']=data_trun.sal.std()
means['temp']['obs']= data_trun.temp.mean(); stds['temp']['obs']=data_trun.temp.std()
means['sal']['model']= np.mean(sal); stds['sal']['model']=np.std(sal)
means['temp']['model']= np.mean(temp); stds['temp']['model']=np.std(temp)
return fig,means,stds
# +
start_time=datetime.datetime(2015,5,1)
end_time=datetime.datetime(2015,5,15)
fig,means,stds=compare_model_VENUS('East',start_time,end_time)
print 'Observed Salinity: Mean {}, std {}'.format(means['sal']['obs'], stds['sal']['obs'])
print 'Modelled Salinity: Mean {}, std {}'.format(means['sal']['model'], stds['sal']['model'])
print 'Observed Temperature: Mean {}, std {}'.format(means['temp']['obs'], stds['temp']['obs'])
print 'Modelled Temperature: Mean {}, std {}'.format(means['temp']['model'], stds['temp']['model'])
# +
fig,means,stds=compare_model_VENUS('Central',start_time,end_time)
print 'Observed Salinity: Mean {}, std {}'.format(means['sal']['obs'], stds['sal']['obs'])
print 'Modelled Salinity: Mean {}, std {}'.format(means['sal']['model'], stds['sal']['model'])
print 'Observed Temperature: Mean {}, std {}'.format(means['temp']['obs'], stds['temp']['obs'])
print 'Modelled Temperature: Mean {}, std {}'.format(means['temp']['model'], stds['temp']['model'])
# -
# Questions
# 1. What improvements can we make to these plots?
# 2. How else can we compare model with observations?
# # Profiles over depth
# We've recently started saving model data at the VENUS nodes every 15 minutes. Let's import and plot temperaure and salinty depth profiles over time.
def combine(files, var):
"""function to combine a model variable from a list of many files.
Returns an array of the variabel and associated model times.
Suitable for use with the VENUS_east.nc and VENUS_central.nc files.
Only for variables over depth.
"""
#empty arrays
time = np.array([])
var_list=[]
for f in files:
G = nc.Dataset(f)
var_tmp = G.variables[var][:,:,0,0]
#append variable to array
var_list.append(var_tmp)
t = nc_tools.timestamp(G, np.arange(var_tmp.shape[0]))
for ind in range(len(t)):
t[ind] = t[ind].datetime
time = np.append(time, t)
var_ary=np.concatenate(var_list,axis=0)
return var_ary,time
# The VENUS files are either VENUS_east.nc or VENUS_central.nc. Make a list and sort them in chronological order.
# +
import glob
import os
path = '/data/dlatorne/MEOPAR/SalishSea/nowcast/'
files = glob.glob(os.path.join(path,'*','*_1h_*_grid_T*'))
files = []
files_East = glob.glob(os.path.join(path,'*','VENUS_east.nc'))
for f in files_East:
directory=os.path.dirname(f)
files.append(glob.glob(os.path.join(directory,'*_1h_*_grid_T*'))[0])
files.sort(key=os.path.basename)
files_East = [];
files_Central=[];
for f in files:
directory=os.path.dirname(f)
files_East.append(os.path.join(directory,'VENUS_east.nc'))
files_Central.append(os.path.join(directory,'VENUS_central.nc'))
# -
# Now grab the temperatude and salinity data.
temps={}
sals={}
temps['East'],time=combine(files_East,'votemper')
sals['East'],time=combine(files_East,'vosaline')
temps['Central'],time=combine(files_Central,'votemper')
sals['Central'],time=combine(files_Central,'vosaline')
# Plotting the time series next
# +
#plot East node time seres
key='East'
fig,axs=plt.subplots(2,1,figsize=(15,10))
ax=axs[0]
mesh=ax.pcolormesh(time,-model_depths,sals[key].T,vmin=10,vmax=32)
cbar=plt.colorbar(mesh,ax=ax)
ax.set_title('{} Salinity'.format(key))
cbar.set_label('Salinity [PSU]')
ax=axs[1]
mesh=ax.pcolormesh(time,-model_depths,temps[key].T,vmin=8,vmax=14)
cbar=plt.colorbar(mesh,ax=ax)
ax.set_title('{} Temperature'.format(key))
cbar.set_label('Temperature (deg C)')
for ax in axs:
ax.xaxis_date()
ax.set_ylim([-200,0])
ax.set_ylabel('Depth (m)')
fig.autofmt_xdate()
# +
#plot Central node time seres
key='Central'
fig,axs=plt.subplots(2,1,figsize=(15,10))
ax=axs[0]
mesh=ax.pcolormesh(time,-model_depths,sals[key].T,vmin=15,vmax=32)
cbar=plt.colorbar(mesh,ax=ax)
ax.set_title('{} Salinity'.format(key))
cbar.set_label('Salinity [PSU]')
ax=axs[1]
mesh=ax.pcolormesh(time,-model_depths,temps[key].T,vmin=8,vmax=14)
cbar=plt.colorbar(mesh,ax=ax)
ax.set_title('{} Temperature'.format(key))
cbar.set_label('Temperature (deg C)')
for ax in axs:
ax.xaxis_date()
ax.set_ylim([-300,0])
ax.set_ylabel('Depth (m)')
fig.autofmt_xdate()
# -
# Questions
# 1. How can we improve these plots?
# 2. Do you notice anything interesting? What other information would help you explain some of the trends in these plots?
# # What's next?
# Here are a few things to try.
#
# 1. Think of ways to improve the above plots.
# 2. Plot the current speed vs depth over time. How can we compare this with observations?
# 3. Read through this notebook where Susan calculated tidal ellipses using output from an early simulation. Start thinking about how we can redo the tidal ellipse calculations at the VENUS East and Central nodes with more recent model data.
#
#
# * http://nbviewer.ipython.org/urls/bitbucket.org/salishsea/analysis/raw/tip/compare_tides/Ellipses.ipynb
| Muriel/old notebooks/Exploring Model Output at the ONC VENUS nodes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/skmisht/CNN/blob/main/CNN_PyTorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="bh7Wik-3D0R6"
# # Melanoma detection - PyTorch
# + id="UPZxs6Q0EC91" colab={"base_uri": "https://localhost:8080/"} outputId="eb55e0e3-e921-412f-ec3f-d58d0e290aac"
import os
import csv
import zipfile
from google.colab import files
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Torch modules
import torch
print("Current Torch version: ", torch.__version__)
# to install if there is no latest version of torch
# # !pip install -q torch==1.0.0
import torchvision
from torchvision.transforms import transforms
# + id="QKltfGoJZAm-" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6b475d75-5baf-4344-dabf-ccf9f5efde95"
# test if the notebook is running on GPU
import tensorflow as tf
tf.test.gpu_device_name()
# + id="PYE0r77thWKr"
# data path: https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Data.zip
# download and unzip the compressed file
# # !wget --no-check-certificate \
# # https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Data.zip \
# # -O /tmp/isic.zip
# local_zip = '/tmp/skin_cancer_data.zip'
# try to extract the zip folder
# try:
# with zipfile.ZipFile(local_zip, 'r') as zip:
# zip.extractall('/tmp')
# print("Extracted all the files into tmp")
# except FileNotFoundError as fnf_error:
# print(fnf_error)
# + id="UEuVG0uGPyQu"
| CNN_PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Sympy, General
# +
import sympy as sp
sp.sqrt(8)
# -
r1 = sp.Rational(4, 5)
r2 = sp.Rational(5, 4)
print(r1 + r2)
print(r1 / r2)
x, y = sp.symbols("x y")
e1 = y + 2 * x
e2 = 2 * x + y
print(e1)
print(e2)
print(e1 - x)
print(e2 + 1)
print(x * e1)
print(sp.expand(x * e1))
print(sp.factor(x * e1))
# ### Derivatives
# +
import sympy as sp
import numpy as np
from scipy.misc import derivative
x = sp.Symbol('x')
print(sp.diff(3*x**2+1, x))
def f(x):
return 3*x**2 + 1
def d(x):
return derivative(f, x)
print(derivative(f, 2.0))
# %matplotlib inline
import matplotlib.pyplot as plt
y = np.linspace(-3, 3)
print(y)
plt.plot(y, f(y))
plt.plot(y, d(y))
# -
# ### Integrals
# +
print(sp.integrate(3.0*x**2 + 1, x))
from scipy.integrate import quad
def f(x):
return 3.0 * x**2 + 1
i = quad(f, 0, 2)
print(i[0])
# -
| notebooks/sympy-samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas as pd
from utils import load_csv_data, calc_path_stats
current_directory = os.getcwd()
cloudflare = load_csv_data(os.path.join(current_directory, "../data/cloudflare-workers"))
cloudfront = load_csv_data(os.path.join(current_directory, "../data/cloudfront-functions"))
lambdaEdge = load_csv_data(os.path.join(current_directory, "../data/lambda@edge"))
# +
stats = {}
stats["cloudfront"] = calc_path_stats(cloudfront["combined"]["Total"])
stats["cloudflare"] = calc_path_stats(cloudflare["combined"]["Total"])
stats["lambdaEdge"] = calc_path_stats(lambdaEdge["combined"]["Total"])
labels = ["p95", "p99", "max"]
stats_df = pd.DataFrame(stats, index = labels)
stats_df
| notebooks/performance_calculations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
from ml_utils.core import *
# # David's ML_UTILS
#
# > My helper lib for Data Analytics/ML
# This file will become your README and also the index of your documentation.
# ## Install
# Go to this lib's repo, then
#
# `pip install -e .`
#
# for an editable installation
# ## How to use
# Fill me in please! Don't forget code examples:
1+1
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Note: for Notebook to work in JupyterLab, run : `conda install hyperspy hyperspy-gui-traitsui -c conda-forge` in the Anaconda prompt
# # EDS-TEM quantification of core shell nanoparticles
#
# Using machine learning methods, such as independent component analysis (ICA), the composition of embedded nanostructures, such as core-shell nanoparticles, can be accurately measured as demonstrated by D. Roussow et al., Nano Letters, 2015 (see the [full article](https://www.repository.cam.ac.uk/bitstream/handle/1810/248102/Roussouw%20et%20al%202015%20Nano%20Letters.pdf?sequence=1)). Using the same data, this notebook reproduces the main results of this article.
#
#
# ## Author
#
# * 13/04/2015 <NAME> - Developed for HyperSpy workshop at University of Cambridge
#
# ## Changes
#
# * 29/05/2016 <NAME>. Update the syntax for HyperSpy 0.8.5 (Python 3 compatibility)
# * 03/08/2016 <NAME>. Update the syntax for HyperSpy 1.1
# * 06/08/2016 <NAME>. Update the syntax for HyperSpy 0.8.1
# * 27/08/2016 <NAME>. Update for workshop at EMC Lyon
#
# ## Requirements
#
# * HyperSpy 1.3
#
# ## <a id='top'></a> Contents
#
# 1. <a href='dat'> Specimen & Data</a>
# 2. <a href='#loa'> Loading</a>
# 3. <a href='#bss'> Blind source separation of core/shell nanoparticles</a>
# 4. <a href='#bare'> Representative spectrum from bare cores</a>
# 5. <a href='#com'> Comparison and quantification</a>
# 6. <a href='#fur'> Going father: Isolating the nanoparticles</a>
#
# # <a id='dat'></a> 1. Specimen & Data
#
# The sample and the data used in this tutorial are described in
# <NAME>, et al., Nano Letters, In Press (2015) (see the [full article](https://www.repository.cam.ac.uk/bitstream/handle/1810/248102/Roussouw%20et%20al%202015%20Nano%20Letters.pdf?sequence=1)).
#
# FePt@Fe$_3$O$_4$ core-shell nanoparticles are investigated with an EDS/TEM experiment (FEI Osiris TEM, 4 EDS detectors). The composition of the core can be measured with ICA (see figure 1c). To prove the accuracy of the results, measurements on bare FePt bimetallic nanoparticles from a synthesis prior to the shell addition step are used.
# <img src="images/core_shell.png" style="height:350px;">
# Figure 1: (a) A spectrum image obtained from a cluster of core-shell nanoparticles. (b) The nanoparticles are comprised of a bi-metallic Pt/Fe core surrounded by an iron oxide shell on a carbon support. (c) ICA decomposes the mixed EDX signals into components representing the core (IC#0), shell (IC#1) and support (IC#2).
# # <a id='loa'></a> 2. Loading
# <a href='#top'> Table of contents</a>
# Import HyperSpy and matplotlib libraries
# %matplotlib qt5
import hyperspy.api as hs
# Load the spectrum images of the bare seeds and the core shell nanoparticles.
c = hs.load('datasets/bare_core.hdf5')
cs = hs.load('datasets/core_shell.hdf5')
c.metadata
# Plot the intensity of Fe K${\alpha}$ and Pt L${\alpha}$.
axes = hs.plot.plot_images(hs.transpose(*(c.get_lines_intensity() + cs.get_lines_intensity())),
scalebar='all', axes_decor=None, per_row=2, cmap='RdBu')
# ## <a id='bss'></a> 3. Blind source separation of core/shell nanoparticles
# <a href='#top'> Table of contents</a>
# Apply blind source separation (ICA) to obtain a factor (spectrum) corresponding to the core.
cs.change_dtype('float')
cs.decomposition()
ax = cs.plot_explained_variance_ratio()
# ICA on the three first components.
cs.blind_source_separation(3)
axes = cs.plot_bss_loadings()
axes = cs.plot_bss_factors()
# The first component corresponds to the core.
s_bss = cs.get_bss_factors().inav[0]
# ## <a id='bare'></a> 4. Representative spectrum from bare cores
# <a href='#top'> Table of contents</a>
# To obtain a representative spectrum of the bare nanoparticles, the low intensity of Pt L${\alpha}$ is masked.
pt_la = c.get_lines_intensity(['Pt_La'])[0]
mask = pt_la > 6
axes = hs.plot.plot_images(hs.transpose(*(mask, pt_la * mask)), axes_decor=None, colorbar=None,
label=['Mask', 'Pt L${\\alpha}$ intensity'], cmap='RdBu')
# To apply the mask, the navigation dimensions of the mask must be manipulated to match the navigation dimensions of the EDS spectrum image. This is achieved crudely via first generating a mask using the built in vacuum_mask() method and then overwriting the data with the mask generated above.
c_mask = c.sum(-1)
c_mask.data = mask.data
# The sum over the particles is used as a bare core spectrum.
s_bare = (c * c_mask).sum()
# ## <a id='com'></a> 5. Comparison and quantification
# <a href='#top'> Table of contents</a>
# Stack together the spectrum of bare particles and the first ICA component.
s_bare.change_dtype('float')
s = hs.stack([s_bare, s_bss], new_axis_name='Bare or BSS')
s.metadata.General.title = 'Bare or BSS'
axes = hs.plot.plot_spectra(s, style='mosaic', legend=['Bare particles', 'BSS #0'])
# ### Method 1
#
# X-ray intensities measurement with background subtraction.
w = s.estimate_background_windows()
s.plot(background_windows=w)
# Refinement of the windows position.
w
w[1, 0] = 8.44
w[1, 1] = 8.65
s.plot(background_windows=w, navigator='slider')
sI = s.get_lines_intensity(background_windows=w)
# ### Method 2
#
# Measure X-ray intensity by fitting a Gaussian model
m = s.isig[5.:15.].create_model()
m.add_family_lines(['Cu_Ka', 'Co_Ka'])
m.components
m.plot()
m.multifit()
m.fit_background()
m.calibrate_energy_axis()
m.plot()
sI = m.get_lines_intensity()[-2:]
# Set up the kfactors for Fe K${\alpha}$ and Pt L${\alpha}$.
#From Brucker software (Esprit)
kfactors = [1.450226, 5.075602]
# Quantify with <NAME>.
composition = s.quantification(method="CL", intensities=sI, factors=kfactors,
plot_result=True)
# ## <a id='fur'></a> 6. Going further
# <a href='#top'> Table of contents</a>
# Further image processing with [scikit-image](http://scikit-image.org/) and [scipy](http://www.scipy.org/). Apply a watershed transformation to isolate the nanoparticles.
# - Transform the mask into a distance map.
# - Find local maxima.
# - Apply the watershed to the distance map using the local maximum as seed (markers).
#
# Adapted from this scikit-image [example](http://scikit-image.org/docs/dev/auto_examples/plot_watershed.html).
from scipy.ndimage import distance_transform_edt, label
from skimage.morphology import watershed
from skimage.feature import peak_local_max
distance = distance_transform_edt(mask.data)
local_maxi = peak_local_max(distance, indices=False,
min_distance=2, labels=mask.data)
labels = watershed(-distance, markers=label(local_maxi)[0],
mask=mask.data)
axes = hs.plot.plot_images(
[pt_la.T, mask.T, hs.signals.Signal2D(distance), hs.signals.Signal2D(labels)],
axes_decor='off', per_row=2, colorbar=None, cmap='RdYlBu_r',
label=['Pt L${\\alpha}$ intensity', 'Mask',
'Distances', 'Separated particles'])
| extra/TEM_EDS_nanoparticles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
from os import listdir
from os.path import isfile, join
path_shake = './complete_works/'
all_files = [f.replace('.txt','') for f in listdir(path_shake) if isfile(join(path_shake, f))]
n_files = len(all_files)
print("The total number of Shakespeare's complete work: ",n_files)
print(all_files)
# +
tragedy = 'tragedy'
n_tragedy = 0
tragedy_files=[]
poetry = 'poetry'
n_poetry = 0
poetry_files = []
sonnet = 'sonnet'
n_sonnet = 0
history = 'hist'
n_hist = 0
hist_files = []
n_comedy = 0
comedy_files = []
for i in range(num):
if(tragedy in all_files[i]):
n_tragedy +=1
tragedy_files.append(all_files[i])
elif(history in all_files[i]):
n_hist +=1
hist_files.append(all_files[i])
elif(poetry in all_files[i]):
n_poetry +=1
poetry_files.append(all_files[i])
if(sonnet in all_files[i]):
n_sonnet +=1
else:
n_comedy +=1
comedy_files.append(all_files[i])
# -
print("n_tragedy = ",n_tragedy) # 12
print("The tragedies are: \n",tragedy_files)
print("n_hist = ",n_hist) # 10
print("The histories are: \n",tragedy_files)
print("n_poetry = %d,"%n_poetry, "while %d of them are sonnets."%n_sonnet) # 5
print("The poetries are: \n",poetry_files)
print("n_comedy = ",n_comedy) # 15
print("The comedies are: \n",comedy_files)
# +
# words and lines counting
import numpy as np
import codecs
word_count = np.zeros(n_files)
line_count = np.zeros(n_files)
for i in range(n_files):
file_name = './complete_works/'+ all_files[i]+'.txt'
with codecs.open(file_name, "r",encoding='utf-8', errors='ignore') as fdata:
data = fdata.read()
sentences_1 = data.split('\n')
sentences_2 = [elem for elem in sentences_1 if elem !='']
for part in sentences_2:
word_list = part.split(' ')
word_count[i] += len(word_list)
line_count[i] += 1
print(i+1)
print("Total word numbers of "+all_files[i]+": ", int(word_count[i]))
print("Total line numbers of "+all_files[i]+": ", int(line_count[i]))
print('')
# -
| 1-basic-statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="AqJstf0N1O-t"
# # Dogs vs Cats Image Classification using Convolutional Neural Networks
# + [markdown] colab_type="text" id="ohyXPnjQ1O-v"
# In this notebook we are going to create a model to solve the problem of finding whether the image is of a dog or a cat using Convoloutional Neural Networks (CNNs).
# The dataset and problem statement is taken from Kaggle Competition: [dogs-vs-cats-redux-kernels-edition](https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition).
# + [markdown] colab_type="text" id="oounhP4E1O-w"
# ## Setting up Environment and Dependencies
# + [markdown] colab_type="text" id="DzEle7xfkpyx"
# We are going to use Fastai library which runs on top of PyTorch. We'll also install kaggle-cli which will help us download the dataset from kaggle competition.
#
# **Note 1: **
# We need to restart the notebook kernel once these libraries are installed.
#
# **Note 2:**
# In Google Colab, you'll need to run these dependencies every single time you start the notebook.
# In a dedicated machine over AWS, Google Cloud, etc. you need to do this only once as these dependencies will then be installed on the machine's disk storage.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 1856} colab_type="code" executionInfo={"elapsed": 9248, "status": "ok", "timestamp": 1526668075571, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="0cJmPYXg3ODf" outputId="5b54a5f7-39e7-4282-8195-ad591f0ee500"
# Run these commands once to install the necessary dependencies and download the dogs and cats dataset from kaggle
# !pip install torch torchvision
# !pip install fastai
# !pip install kaggle-cli
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="vsGSD6FelXr1"
# !pip install --no-cache-dir -I pillow
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 343} colab_type="code" executionInfo={"elapsed": 73646, "status": "ok", "timestamp": 1526668290201, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="JVKu9zgcg7Q7" outputId="f0fea646-63f7-4734-8bd6-d87c65b194e8"
# To download and extract the weights of Pretrained models such as Resnet.
# !wget --header="Host: files.fast.ai" --header="User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" --header="Accept-Language: en-US,en;q=0.9" --header="Cookie: _ga=GA1.2.755364775.1526348522; _gid=GA1.2.1192476799.1526616713" --header="Connection: keep-alive" "http://files.fast.ai/models/weights.tgz" -O "weights.tgz" -c
# !tar -xvzf weights.tgz -C /home/anubhavshrimal/anaconda3/envs/fastai/lib/python3.6/site-packages/fastai/
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 32170, "status": "ok", "timestamp": 1526673362411, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="7akSIcTJLSZW" outputId="81961b60-6024-4ba1-e5b2-3873f9065b3f"
# Enter your Kaggle username and password in the below command to download the dataset
# !kg download -u username -p password -c dogs-vs-cats-redux-kernels-edition
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 637585} colab_type="code" executionInfo={"elapsed": 19376, "status": "ok", "timestamp": 1526673398748, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="Fm-V4s-Lmbcc" outputId="5cd0340b-9492-40ca-c7b6-afcb907c2506"
# Create data directory and unzip training and testing data
# !mkdir data && unzip test.zip -d data/
# !unzip train.zip -d data
# + [markdown] colab_type="text" id="nt2YIkphmxcg"
# ## Coding Begins:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="iZePk5Q_lL9f"
# Put these at the top of every notebook, to get automatic reloading and inline plotting
# NOTE: While using colab you should comment the first 2 lines of autoreload
# #%reload_ext autoreload
# #%autoreload 2
# %matplotlib inline
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="e5J8z8nC1O-3"
# This file contains all the main external libraries we'll use
from fastai.imports import *
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="VIs0CRev1O--"
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
# + [markdown] colab_type="text" id="38gPzJw41O_B"
# `PATH` is the path to your dataset unzipped files. `sz` is the size that the images will be resized to in order to ensure that the training runs quickly. `bs` is the batch size.
#
# `arch` is the pretrained architecture that we are using to train the CNN model. Here we are using `resnet34`.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="G5xVKLHB1O_B"
PATH = "data/"
sz=224
arch=resnet34
bs=58
# + [markdown] colab_type="text" id="0bSBHHar1O_F"
# It's important that you have a working NVidia GPU set up. The programming framework used to behind the scenes to work with NVidia GPUs is called CUDA. Therefore, you need to ensure the following line returns `True` before you proceed.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1134, "status": "ok", "timestamp": 1526673646880, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="_YK7-0vZ1O_G" outputId="ee6653d5-ea0a-4dd1-dddf-0adc2db8a93e"
torch.cuda.is_available()
# + [markdown] colab_type="text" id="PBOjMyOk1O_L"
# In addition, NVidia provides special accelerated functions for deep learning in a package called CuDNN. Although not strictly necessary, it will improve training performance significantly, and is included by default in all supported fastai configurations. Therefore, if the following does not return `True`, you may want to look into why.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 943, "status": "ok", "timestamp": 1526673648082, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="TfQkLSd41O_M" outputId="49e8d830-bf39-42a6-c594-67091a44e25e"
torch.backends.cudnn.enabled
# + [markdown] colab_type="text" id="5FW2kfwT1O_c"
# ### Data cleaning, creation and Dataset exploration:
# + [markdown] colab_type="text" id="1XdyTixR1O_d"
# The cat and dog images are kept in a single directory, hence we need to **create a csv file `labels.csv` in the `data/` directory** to store the file names with their corresponding label.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 1304, "status": "ok", "timestamp": 1526673654936, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="_aDFpdhq1O_v" outputId="7b5a64a3-6d6e-48cc-c698-a625376509a7"
files = os.listdir(f'{PATH}train')
raw_data = {'fname': [], 'label': []}
for fname in files:
raw_data['fname'].append(fname)
raw_data['label'].append(fname[:3])
df = pd.DataFrame(raw_data, columns = ['fname', 'label'])
df.to_csv(f'{PATH}labels.csv', index = False)
df.head()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 142} colab_type="code" executionInfo={"elapsed": 1276, "status": "ok", "timestamp": 1526673656499, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="cernK6k7FAVy" outputId="45e7c077-abc0-4633-878e-0639e6a08c54"
# See how many images of cats and dogs are present in the training dataset
df.pivot_table(index='label', aggfunc=len).sort_values('fname', ascending=False)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 269} colab_type="code" executionInfo={"elapsed": 2240, "status": "ok", "timestamp": 1526673659020, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="dSBA7WEchl1K" outputId="b5b727e3-9565-42a2-eb6c-617cff4cfac1"
img = plt.imread(f'{PATH}train/{files[3]}')
plt.imshow(img);
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 269} colab_type="code" executionInfo={"elapsed": 1914, "status": "ok", "timestamp": 1526673661226, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="KGxwCC5N_dSW" outputId="9380b059-a37b-40e1-e7e7-4eea832e7ab7"
img = plt.imread(f'{PATH}train/{files[4]}')
plt.imshow(img);
# + [markdown] colab_type="text" id="nCjrqvmn1O_5"
# Here is how the raw data looks like
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1819, "status": "ok", "timestamp": 1526673663306, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="cGgB0UY71O_6" outputId="a211627e-d5b7-4f09-986f-b6c61839f22e"
img.shape
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 340} colab_type="code" executionInfo={"elapsed": 1581, "status": "ok", "timestamp": 1526673665209, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="-4Km8aSP1PAB" outputId="a5abe505-edea-46c5-d761-cb4bbc63f462"
img[:4,:4]
# + [markdown] colab_type="text" id="CFWv4i3F6lHN"
# As there is not validation set provided, we'll create our own `validation set` which will be 20% of the `training set`.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1236, "status": "ok", "timestamp": 1526673666774, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="NIwTm5iUKo8h" outputId="e69c3596-9426-4b67-e556-5ecb2c07f6db"
# Creating Cross Validation set
label_csv = f'{PATH}labels.csv'
n = len(list(open(label_csv)))-1
val_idxs = get_cv_idxs(n)
val_idxs
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Abe2zNm_KxtR"
# Applying data transformations such as zooming and side mirror image to increase the data size
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_csv(PATH, 'train', f'{PATH}labels.csv', test_name='test',
val_idxs=val_idxs, tfms=tfms, bs=bs)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1263, "status": "ok", "timestamp": 1526673670376, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="HI8EJQI2OqYF" outputId="8e734ab3-b41b-4594-8225-6595c32d95bb"
fn = PATH + data.trn_ds.fnames[3];
fn
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 516} colab_type="code" executionInfo={"elapsed": 2308, "status": "ok", "timestamp": 1526673673004, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="c8CD5_cBOwV8" outputId="3f345789-37f9-412c-9bea-123f64af8dff"
img = PIL.Image.open(fn);
img
# + [markdown] colab_type="text" id="UYXjNd8s70ID"
# Looking at the various image sizes present in the dataset.
#
# As most of the images are nearly of the same size and are of row size 500 we need not perform any size reductions.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 265} colab_type="code" executionInfo={"elapsed": 4332, "status": "ok", "timestamp": 1526673677694, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="4YyFULmeP-3E" outputId="07adb88f-081e-47c1-d23b-1a1f7ebb5073"
size_d = {k: PIL.Image.open(PATH+k).size for k in data.trn_ds.fnames}
row_sz,col_sz = list (zip(*size_d.values()))
row_sz=np.array(row_sz); col_sz=np.array(col_sz)
plt.hist(row_sz);
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 317} colab_type="code" executionInfo={"elapsed": 1097, "status": "ok", "timestamp": 1526673678923, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="gy_9DaRXQINi" outputId="3ae3951c-de9a-4a67-83aa-c7d9c215f9ec"
plt.hist(row_sz[row_sz<1000])
# + [markdown] colab_type="text" id="B_cEqhEZDpMP"
# ### Train Model:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="MevtdZY6QRgA"
# Function to give data to the model in batch sizes to avoid memory overflow
def get_data(sz,bs):
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_csv(PATH, 'train', f'{PATH}labels.csv', test_name='test', num_workers=4,
val_idxs=val_idxs, tfms=tfms, bs=bs)
return data if sz>300 else data.resize(340, 'tmp')
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 792782, "status": "ok", "timestamp": 1526674472963, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="Imx7zOQzQZ3e" outputId="47b9b6f2-3016-416d-f6e3-ce8f40228b79"
# Get the data and initialize the model with pretrained architecture weights, training data and Fit the data with a learning rate of 1e-2.
data = get_data(sz,bs)
learn = ConvLearner.pretrained(arch, data, precompute=True, ps=0.5)
learn.fit(1e-2, 2)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 170} colab_type="code" executionInfo={"elapsed": 928442, "status": "ok", "timestamp": 1526675402042, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="LsvHmHpobFvP" outputId="3cb40d1d-20c8-4104-ba56-4512b6fca426"
learn.precompute=False
learn.fit(1e-2, 5, cycle_len=1)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="P-UU1mvve5Uk"
# Save the intermediate model and load it.
learn.save('224_pre')
learn.load('224_pre')
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="jS8-dtom1_WS"
# Train the model on larger sized images
learn.set_data(get_data(299,bs))
learn.freeze()
learn.fit(1e-2, 3, cycle_len=1)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="Z-_Ms4Nqqcn9" outputId="94190fa2-bc05-4f3f-c0f5-9bb817378670"
learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="50lridBDBKjp"
learn.save('299_pre')
learn.load('299_pre')
# + [markdown] colab_type="text" id="1VR7v0-ID85z"
# ### Validation & Testing:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17} colab_type="code" executionInfo={"elapsed": 166057, "status": "ok", "timestamp": 1526675569668, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="wCZNjo6dkMkE" outputId="db78a4f2-38b5-4e4d-e1c7-d74d2b057339"
# Perform Test Time Augmentation to better evaluate the validation set accuracy.
# The predictions are in natural log so take its exponent to get the probabilities.
# We take the mean of all the probabilities for an image and its augmentations.
log_preds,y = learn.TTA()
probs = np.mean(np.exp(log_preds),0)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1341, "status": "ok", "timestamp": 1526675571098, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="Qdw4ejaXlmCv" outputId="5de5fabc-69dd-448f-d509-201a57a74fc7"
accuracy_np(probs, y)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="tj_Vdj-Znnf7"
# Display the confusion matrix for visualizing the accuracy of the model.
from sklearn.metrics import confusion_matrix
preds = np.argmax(probs, axis=1)
cm = confusion_matrix(y, preds)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 345} colab_type="code" executionInfo={"elapsed": 965, "status": "ok", "timestamp": 1526675573624, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="W8oGDeB2nuq5" outputId="c120e571-c154-4344-ee5a-1beec30238be"
plot_confusion_matrix(cm, data.classes)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 66184, "status": "ok", "timestamp": 1526675640052, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="lTFcuP7YqzTh" outputId="64e70fdb-7556-4e76-fcad-240113b2fcb8"
# Run the model on the test set
log_preds_test = learn.predict(is_test=True)
log_preds_test.shape
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1215, "status": "ok", "timestamp": 1526677563011, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="hcPJf9dyrE9r" outputId="78a4f495-3739-4ea6-cf4c-5bd59bbff876"
# test_prob are the probabilities of the test images
test_probs = np.exp(log_preds_test)
# predictions are the labels of the test images 1 = dog, 0 = cat
predictions = np.argmax(test_probs, axis=1)
predictions
# + [markdown] colab_type="text" id="JI0kPuvwFKec"
# ### Kaggle Submission:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1920, "status": "ok", "timestamp": 1526675879736, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="tRxdpAYP9dV7" outputId="9621f926-670d-4b39-aaff-b3fd0bfd0d02"
# Get the dog probabilities column, because kaggle competition has this as the specified format
test_probs = test_probs[:,1]
test_probs
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 1218, "status": "ok", "timestamp": 1526676054804, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="3uh5OP-bt-88" outputId="60064860-a1bd-410f-a649-ea68784d7ef8"
# Create the sumbmission.csv file
raw_submission = {'id': [], 'label': test_probs}
for fname in data.test_ds.fnames:
raw_submission['id'].append(fname[5:-4])
submission = pd.DataFrame(raw_submission, columns=['id', 'label'])
submission.to_csv(f'{PATH}submission.csv', index = False)
submission.head()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1926, "status": "ok", "timestamp": 1526672572578, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="6lV8pOPOwJt_" outputId="3371e43e-81b4-47b5-b3c9-83865bb6ec4d"
FileLink(f"{PATH}submission.csv")
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 5835, "status": "ok", "timestamp": 1526676105757, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="k4NmHtkFx8Wh" outputId="333efff2-35e5-4176-ef44-e4ac7262839a"
# Use your kaggle username and password to send the results on your kaggle competition and get the score.
# !kg submit data/submission.csv -u username -p password -c dogs-vs-cats-redux-kernels-edition -m "Submission on Test.zip"
# + [markdown] colab_type="text" id="4JNEf85y1PBd"
# ## Choosing a learning rate
# + [markdown] colab_type="text" id="qHLH7vId1PBd"
# The *learning rate* determines how quickly or how slowly you want to update the *weights* (or *parameters*). Learning rate is one of the most difficult parameters to set, because it significantly affects model performance.
#
# The method `learn.lr_find()` helps you find an optimal learning rate. It uses the technique developed in the 2015 paper [Cyclical Learning Rates for Training Neural Networks](http://arxiv.org/abs/1506.01186), where we simply keep increasing the learning rate from a very small value, until the loss stops decreasing. We can plot the learning rate across batches to see what this looks like.
#
# We first create a new learner, since we want to know how to set the learning rate for a new (untrained) model.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="heAG60541PBe"
learn = ConvLearner.pretrained(arch, data, precompute=True)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 8286, "status": "ok", "timestamp": 1526677809472, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="YH7SamD21PBh" outputId="c2d420ab-2c62-4d89-b0d9-5767f85cab5f"
# Runs the function until the loss starts to increase
# keeps on increasing the learning rate until the loss starts to increase
lrf=learn.lr_find()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 279} colab_type="code" executionInfo={"elapsed": 1338, "status": "ok", "timestamp": 1526677817280, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="gdHYHtO21PBm" outputId="c7dd8e97-95e4-40cb-a32f-58759d6cad1d"
learn.sched.plot_lr()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 287} colab_type="code" executionInfo={"elapsed": 1717, "status": "ok", "timestamp": 1526677821893, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-xIMjld9GFZI/AAAAAAAAAAI/AAAAAAAABmA/kk47MyuPAjA/s50-c-k-no/photo.jpg", "userId": "104419005999346430241"}, "user_tz": -330} id="mXDW6RXL1PBs" outputId="d7bad2dc-c0d1-4bcd-ed86-e58a1c57c5d7"
learn.sched.plot()
# + [markdown] colab_type="text" id="jl0gJUbS1PBx"
# The loss is still clearly improving at lr=1e-2 (0.01), so that's what we use. Note that the optimal learning rate can change as we training the model, so you may want to re-run this function from time to time.
| Kaggle-Competitions/Dogs-vs-Cats-Redux-Kernels/Dogs-vs-cats-kaggle.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// [this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/csharp/Docs)
//
// # Math and LaTeX
// Math content and LaTeX are supported
// + dotnet_interactive={"language": "csharp"}
(LaTeXString)@"\begin{align}
\nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\
\nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\
\nabla \cdot \vec{\mathbf{B}} & = 0
\end{align}"
// + dotnet_interactive={"language": "csharp"}
(MathString)@"H← 60 + \frac{30(B−R)}{Vmax−Vmin} , if Vmax = G"
| samples/notebooks/csharp/Docs/Math-and-LaTeX.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1RbT1N-9N8wq" colab_type="text"
# # Análise de Sentimento utilizando Regressão Logística
# + [markdown] id="NhqP4QXHOTFh" colab_type="text"
# ## Introdução
#
# ### Sobre o conjunto de dados
#
# - O dataset utilizado será do IMDB contendo varias avaliações de filmes, está disponível em: http://ai.stanford.edu/~amaas/data/sentiment
#
# - O dataset contem 25000 avaliações positivas(label=1) e 25000 avaliações negativas(label=0)
# - O conjuto de dados possui apenas duas colunas: review(avaliação) e sentiment(sentimento)
#
#
# ### Modelo de regressão logística
# + [markdown] id="2c5_ZYQTOnew" colab_type="text"
# ### Importando as bibliotecas necessárias para o projeto
# ---
# + id="Bp7x_NBWOxm_" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from nltk.stem.porter import PorterStemmer
import pickle
# + id="oczVeWL9NOGe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="9b22b81d-f0f1-4682-f31c-b65526d03bce"
# Carregando o dataset
dataset_uri = "https://raw.githubusercontent.com/marcostark/Learning-Data-Science/master/desafios/datasets/imdb_movie_data.csv"
df_movies = pd.read_csv(dataset_uri)
df_movies.head()
# + id="5soB9qD5PNvL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a3e37932-159e-4fc9-98f5-09cc3b19e23f"
# Total de avalições
print(df_movies.shape)
# + id="6KOB-ZXbSmSq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="87ba62e8-63e5-42f9-f1dd-cdb3f290de05"
df_movies.columns
# + id="NJzAwG-SRs5I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 125} outputId="0cfa3842-efcb-4eb7-ee04-a8c80561bc3f"
# Labels do dataset: (label=1), Negativo(label=0)
df_movies.set_index(['review', 'sentiment']).count(level='sentiment')
# + id="AbMJGDSwTEqz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="82a5303c-38d7-45a6-b39c-185e6c040a4f"
# Número de labels que representam sentimento positivo
df_movies[df_movies.sentiment==1].count()
# + id="v_o43P-_TRnd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="6b7e43d4-a01a-4a8b-e16b-0a30813353c3"
# Número de labels que representam sentimento negativo
df_movies[df_movies.sentiment==0].count()
# + [markdown] id="dQFk_8ZSWc48" colab_type="text"
# ## Transformando documentos em vetores
# + id="NdQDgN8Jr4bR" colab_type="code" colab={}
count = CountVectorizer()
docs = np.array(['The sun is shinnig',
'The weather is sweet',
'The sun if shinning, the weather is sweet and one and one is two'])
bag = count.fit_transform(docs)
# + id="cEQwjGXIsah_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5da9374d-351f-475a-e0be-e9c9239f67a3"
print(bag.toarray())
# + id="gdm_GTDGswIR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ee2f02f1-f0e0-41be-a95b-36b3075749fc"
term = df_movies.loc[0,'review'][-50:]
term
# + [markdown] id="CpYVJXGEWrzq" colab_type="text"
# ## Preparação dos dados
# + id="V4wQ8w-Ws5-n" colab_type="code" colab={}
import re
def preprocessor(text):
text = re.sub('<[^>]*>','',text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text)
text = re.sub('[\W]+',' ', text.lower()) +\
' '.join(emoticons).replace('-', '')
return text
# + id="k_wWZf10uGx_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6bed8e11-2f2e-4b52-c3a4-e88442ff12d9"
print(preprocessor(term))
# + id="s58HgnN4udiG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d19abeb0-026b-435d-a6fe-3ca6e2bb9666"
print(preprocessor("</a>This ;) is a :( test :-)!"))
# + id="404NAr5ZurIW" colab_type="code" colab={}
df_movies['review'] = df_movies['review'].apply(preprocessor)
# + id="YZlUjlLguM2l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="44419249-bc2e-4847-9919-62d9fcf6f726"
df_movies['review']
# + [markdown] id="Fve2HH1EWu3n" colab_type="text"
# ## Etapa de tokenização dos dados
#
# - Consiste oo processo que divide uma sentença em unidades mais básicas
# + id="MxYRI3U7u7KF" colab_type="code" colab={}
porter = PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
# + id="RFxERDX3vMYO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c8372034-3461-47c0-b34b-5f3eca8e7835"
tokenizer('Luminous beings are we. Not this crude matter.')
# + id="ju4Gfk5yvSTO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f412e8e1-c94a-4fb4-d7f2-5e6bf01c51ea"
tokenizer_porter('Luminous beings are we. Not this crude matter.')
# + id="M9-i5RHWvZDR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="bc25d14b-a232-42b4-a0af-5cc56aa01012"
import nltk
nltk.download('stopwords')
# + id="xq_C_WBYvbrv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0401f50a-2e89-4301-d76b-9c258859534d"
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('Luminous beings are we. Not this crude matter.')[-10:] if w not in stop]
# + id="4nB8INV4tc1n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="facf300b-1374-482d-cbb0-e02649434901"
df_movies.review.head()
# + [markdown] id="QUC95iD-wBX-" colab_type="text"
# ## Transformando documentos em vetores TF-IDF
#
# - TF-IDF (Term Frequency - Inverse Document Frequency) - utilizado para diminuir
# a importância das palabreas exibidas em muitos documentos em comum, que são consideradas de discernir os documentos, em vez de simplesmente contas a frequência das oalavras, como é feito com o CountVectorizer.
# + id="5-78i05dv683" colab_type="code" colab={}
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None,
tokenizer = tokenizer_porter,
use_idf=True,
norm='l2',
smooth_idf=True)
y = df_movies.sentiment.values
X = tfidf.fit_transform(df_movies.review)
# + [markdown] id="2GeUQWc5W0ql" colab_type="text"
# ## Classficando documentos utilizando modelo de regressão logística
# + id="i0duJ9zvvRnm" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=0.5, shuffle=False)
# + id="r5TGhQHyW94W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="438364ae-66f5-4585-aa73-c530daee08fc"
from sklearn.linear_model import LogisticRegressionCV
clf = LogisticRegressionCV(
cv=5,
scoring='accuracy',
random_state=0,
n_jobs=-1,
verbose=3,
max_iter=300)
clf.fit(X_train, y_train)
# + [markdown] id="9GWWfip4BdOD" colab_type="text"
# ### Aprende o vocabulário do vetorizador com base nos parametros de treinamento , esse vectorizer será salvo para ser aplicado em uma nova sentença.
# + id="lJuJxu2Y_P9v" colab_type="code" colab={}
## Salvando vectorizer em um arquivo
with open('vectorizer.pkl', 'wb') as f:
pickle.dump(tfidf, f)
# + [markdown] id="grFK_Y4WXF9F" colab_type="text"
# ## Salvando modelo em um arquivo
# + id="vGu6QU0mPIlD" colab_type="code" colab={}
## Salvando modelo em um arquivo
with open('sentiment_analysis_model.pkl', 'wb') as f:
pickle.dump(clf, f)
# + id="rD9Bdwxsq7Jr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cac42809-8880-4fcf-a750-757cad514743"
print('Precisão do teste: {:.3f}'.format(clf.score(X_test, y_test)))
# + [markdown] id="qt6qDPrfC7e7" colab_type="text"
# ## Carregando arquivos do modelo e do vetorizados para ser utilizando em novas predições
# + id="Cj7sjPS4CnWo" colab_type="code" colab={}
file_model = 'sentiment_analysis_model.pkl'
file_vectorizer = 'vectorizer.pkl'
with open(file_vectorizer, 'rb') as f:
vectorizer = pickle.load(f)
with open(file_model, 'rb') as f:
model = pickle.load(f)
# + id="qrw2cG-2tj5y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="db165684-d7b3-4436-a66f-ccfd66436844"
# user_input = "I think I'm a good developer with really good understanding of .NET"
user_input = "I didn't like this movie, it sucks"
review = vectorizer.transform([str(user_input)])
prediction = model.predict(review)
result = 'Negativa' if prediction == 0 else 'Positiva'
output = {'Predição': result}
output
# + id="1-kZUjhfFy5C" colab_type="code" colab={}
| web/utils/notebook/Sentiment_Analysis_with_Logistic_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://pythonista.io)
# # Tipos de datos en Python 3.
# Python ha evolucionado para ofrecer poderosos tipos de datos que lo diferencian de otros lenguajes de programación por su sencillez y flexibilidad.
#
# Aún cuando Python cuenta con una amplia biblioteca que incluye muy diversos tipos de datos, ofrece ciertos tipos básicos.
# ## Particularidades los tipos de datos en Python.
#
# ### Tipos dinámicos.
#
# Python es un lenguaje que no requiere que se defina el tipo de un objeto. El intérprete "infiere" el tipo de dato del que se trata.
#
# ### Fuertemente tipado.
#
# Existen operaciones que no están permitidas entre tipos que no sean compatibles.
#
# ### Los tipos son clases.
#
# En Python todos sus elementos son objetos y los datos, una vez identificados, se convierten en objetos instanciados del tipo al que pertenecen.
# ## Tabla de tipos de datos básicos de Python 3.
#
# La siguiente tabla resume y describe los tipos de datos básicos de Python 3.
#
# |Tipo de dato|Colección|Indexable|Mutable|Contenido|Ejemplo|
# |:-----------:|:--:|:--:|:--:|:----:|:-----|
# |```int```|NO|NO|NO|Números enteros|```-12```|
# |```float```|NO|NO|NO|Números de punto flotante|```4.361```|
# |```complex```|NO|NO|NO|Números complejos|```(41.6-11.3j)```|
# |```bool```|NO|NO|NO|Valores booleanos|```True```|
# |```NoneType```|NO|NO|NO|Sin valor|```None```|
# |```str```|SÍ|Numérico|NO|Caracteres Unicode|```'Gödel'```|
# |```bytes```|SÍ|Numérico|NO|Caracteres ASCII|```b'Hola'```|
# |```bytearray```|SÍ|Numérico|SÍ|Caracteres ASCII|```bytearray(b'Hola')```|
# |```list```|SÍ|Numérico|SÍ|Cualquier objeto|```[1, 2.0, 'Tres']```|
# |```tuple```|SÍ|Numérico|NO|Cualquier objeto|```(1, 2.0, 'Tres')```|
# |```dict```|SÍ|Por clave|Sí|Pares *clave:valor*|```{'nombre':'Juan', 'promedio':10}```|
# |```set```|SÍ|NO|SÍ|Objetos inmutables|```{1, False, 'María'}```|
# |```frozenset```|SÍ|NO|NO|Objetos inmutables|```frozenset({{1, False, 'María'})```|
#
# * **Las colecciones** son objetos que contienen a otros objetos. A los objetos contenidos también se les refiere como elementos.
#
# * **Los tipos indexables** tienen la capacidad de asignar a cada uno de los elementos que contienen un identificador único (índice) que puede consistir en un número entero o una clave dependiendo del tipo del que se trate.
#
# * **Los tipos mutables** permiten eliminar, sustituir e incluso añadir elementos a su contenido.
#
#
# ## Números enteros (```int```).
#
# Python identifica a los número enteros como un tipo de dato el cual puede ser expresado de la siguiente manera.
#
# * Decimal: ```24```, ```60```.
# * Binario: ```0b010011```, ```0b1101```.
# * Hexadecimal: ```0x18```, ```0x3cf4```.
# * Octal: ``0o30``, ```0o74```.
#
# Python 2 también identifica a un tipo llamado entero largo (```long```), al cual se le añade la letra ```L``` al final de número, pero ya no es reconocido por Python 3.
# **Ejemplos:**
# * La siguiente celda define al número ```24```.
24
# * La siguiente celda define al número ```139``` en formato binario.
0b10001011
# * La siguiente celda define al número ```1522``` en formato hexadecimal.
0x5f2
# * La siguiente celda define al número ```159``` en formato octal.
0o237
# ## Números de punto flotante (``float``).
#
# Los objetos tipo ``float`` corresponden al conjunto de los números reales.
#
# **Ejemplos:**
#
# * ```3.141595```
# * ```12.```
# * ```-45.35```
# #### Precisión de los números flotantes.
#
# Hay que tomar en cuenta que la precisión de los números dependen en gran medida de la capacidad del equipo de cómputo, por lo que en ocasiones una operación con números de tipo ```float``` no dará el resultado exacto, sino una aproximación.
# **Ejemplo:**
# * La siguiente expresión da por resultado un número racional que corresponde a una suceción infinita del dígito ```6``` después del punto decimal.
2 / 3
# En este caso, es imposible para Python calcular una sucesión infinita de ```6``` y por ende, el intérprete truncó el número a 16 decimales.
# ## Números complejos (```complex```).
#
# Los objetos de tipo ```complex``` corresponden al conjunto de los números complejos.
#
# Siempre que el componente en los números reales sea distinto de ```0```, los objetos de tipo ```complex``` se expresarán como un par de números de tipo ```float``` separados por el operador de adición ```+```, en el que el primer número corresponde al componente en los números reales y el componente en los números imaginarios es identificado añadiéndole la letra ```j``` al final.
#
# La sintaxis estricta es la siguiente:
#
#
# * Cuando el componente imagionario es postivo.
#
# ```
# (<real>+<imaginario>j)
# ```
#
# * Cuando el componente imaginario es negativo.
#
# ```
# (<real>-<imaginario>j)
#
# ```
#
# * Cuando el componente real es igual a ```0```.
#
# ```
# <imaginario>j
# ```
#
# Donde:
#
# * ```<real>``` es el componente real del número compejo.
# * ```<imaginario>``` es el componente imaginario del número complejo.
# **Ejemplos:**
# * La siguiente celda muestra la forma estricta en la que se define un objeto tipo ```complex```.
(2+5j)
# * Las siguientes expresiones implican operaciones aritméticas con números imaginarios, las cuales dan por resultado objetos tipo ```complex```.
1.323 - 1j
-2.9j - 23.03
12.4 + 0j
# * En caso de que sólo se defina el componente imaginario, el resultado sólo será ```15j```.
15j
# ## Valores booleanos (```bool```).
#
# El tipo ```bool``` es una especie de tipo numérico que es utilizado para evaluar expresiones lógicas y tiene dos valores: ```True``` y ```False```.
#
# * Si una expresión lógica es válida, el resultado es ```True```.
# * Si una expresión lógica NO es válida, el resultado es ```False```.
# * ```False``` equivale numéricamente a ```0```.
# * Cualquier otro valor no vacío equivale a ```True``` y su valor por defecto es ```1```.
#
# **Nota:** Las expresiones lógicas se estudiarán más adelante.
# **Ejemplos:**
True
False
# ## ```NoneType```.
#
# El único objeto de este tipo es ```None``` y representa un valor nulo.
#
# Una expresión que dé por resultado ```None``` no es desplegado por el intérprete.
# **Ejemplo:**
# * La siguiente celda incluye al objeto ```None```, pero al ejecutarla, no se despliega nada.
None
# <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
# <p style="text-align: center">© <NAME>. 2020.</p>
| 06_tipos_de_datos_en_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/C-HARRIETH/Dog-Breed-Image-Classification/blob/main/SuperBrains.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xMxtPsqcTsSH"
# # **DOG BREED IMAGE CLASSIFICATION**
# + [markdown] id="kLG2VTrnTvYL"
# ## 1. Defining the Question
# + [markdown] id="XecOwPNorl2W"
# ### a) Specifying the Question
# + [markdown] id="8l3-hBO6bODP"
# We have been tasked by an upcoming dog shelter to build a model that classifies different dog breeds from given images.
# + [markdown] id="J4wfHZwQrs-t"
# ### b) Defining the Metric for Success
# + [markdown] id="wtuuUD4lhoOp"
# The project will be considered successful when we create a model with an accuracy score of 80%.
# + [markdown] id="a9BPYqunry97"
# ### c) Understanding the context
# + [markdown] id="j1mjh1PZiSTX"
# This project is important to dog shelters because, it will help the workers classify the dogs correctly for better care since different breeds of dogs require different care.
# + [markdown] id="7KMRBJ7zr9HD"
# ### d) Recording the Experimental Design
# + [markdown] id="YmsiILy-mGoT"
# The following are the steps taken in the analysis.
#
# 1. Acquiring relevant data
# 2. Exploring the data
# 3. Data pre-processing
# 4. Training the model
# 5. Evaluating the model
# + [markdown] id="zSGyg6kWsBUl"
# ### e) Data Relevance
# + [markdown] id="Fxo8ZhRTpCM2"
# The relevant data contains images of different breeds of dogs.
#
# The data relevance will be measured against the matrix of success.
# + [markdown] id="iUNbvIvnT7ep"
# ## 2. Loading libraries
# + id="KR6vCguwrXeS"
import warnings
warnings.filterwarnings('ignore')
# + id="M2gTsuTKtFtT"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import scipy.ndimage as spi
import tensorflow as tf
plt.rcParams['figure.figsize'] = [16, 16]
# + id="nAHsq8K6rjOT"
from fastai.vision import *
from fastai.metrics import accuracy, error_rate
# + id="a1MfJ1ylY4T2"
import tensorflow.keras.layers as L
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPool2D, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.applications import Xception
from sklearn.metrics import confusion_matrix, f1_score
# + [markdown] id="OI3P3YnHUEBk"
# ## 3. Loading the Data
# + id="3O1fCB9Ir1Ke"
# Empty list that contains dog breeds.
dog_breeds = ["chihuahua", "rottweiler", "germanshepherd", "pitbull", "mongrel", "labrador", "doberman", "pomeranian", "ridgeback", "basenji"]
# Folder that stores the data and path to the folders.
path = Path('data/Inputdata')
# Empty list that saves the images.
folder = []
for i in dog_breeds:
# Assign path to the dog breeds in the list
dest = path/i
# Directories to store the images
dest.mkdir(parents=True, exist_ok=True)
# Add to the empty list
folder.append(dest)
# + id="I7Fldk9cuQ9T"
# Creating an empty list that stores the csv files
csv = []
for j in dog_breeds:
files = j + '.csv'
csv.append(files)
# + colab={"base_uri": "https://localhost:8080/"} id="gLMlMbDauUgy" outputId="502226b7-9d5e-4259-df03-28a5153e31c8"
# Checking path to the folders.
folder
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="kmwbOA04uvTl" outputId="a9a801f3-2db0-4873-d12a-b8a7bbf52747"
# Preview of one of dog breed csv files(basenji)
df = pd.read_csv("basenji.csv")
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="gvjYtcRsOVOh" outputId="14153840-ac16-4db7-b974-d5aebd760d91"
# Download the images and store them in folders created
m = 0 # Initialize variable
# Looping through the csvs to download 400 images using the download_images function
# for each dog breed.
while m < len(csv):
download_images(csv[m], folder[m], max_pics=400)
m += 1
# + [markdown] id="aEY0yEPwG4dG"
# ## 4. Exploratory Data Analysis
# + colab={"base_uri": "https://localhost:8080/", "height": 199} id="NL1-a7zSO1Ac" outputId="cf7a07f6-9269-48ce-8a2b-fd691d03bcf1"
# Using the verify_images function to confirm the absence of corrupt images for the classes.
for c in dog_breeds:
print(c)
verify_images(path/c, delete=True, max_size=500)
# + colab={"base_uri": "https://localhost:8080/"} id="diA8-lyJhcRc" outputId="c8cf8e66-7870-4ad8-b711-720df3040f8c"
# Creating a function that loads the dog breed csv files and outputs their shapes
def load_data(name, printname, filename):
name= pd.read_csv(filename)
print('The shape of '+printname+' is', name.shape)
print(load_data(df, 'basenji', 'basenji.csv'))
print(load_data(df, 'chihuahua', 'chihuahua.csv'))
print(load_data(df, 'doberman', 'doberman.csv'))
print(load_data(df, 'germanshepherd', 'germanshepherd.csv'))
print(load_data(df, 'labrador', 'labrador.csv'))
print(load_data(df, 'mongrel', 'mongrel.csv'))
print(load_data(df, 'pitbull', 'pitbull.csv'))
print(load_data(df, 'pomeranian', 'pomeranian.csv'))
print(load_data(df, 'ridgeback', 'ridgeback.csv'))
print(load_data(df, 'rottweiler', 'rottweiler.csv'))
# + [markdown] id="vTbdjSrhVIiT"
# ## 5. Implementing the Solution with TensorFlow and Keras
# + colab={"base_uri": "https://localhost:8080/"} id="ledmiWxUA819" outputId="dfeb1fab-7378-4a54-bb4a-4cae3e1e3fb4"
# installing splitfolders
# !pip install split_folders
# + id="XTE3H-XYBDWj"
import splitfolders
# + colab={"base_uri": "https://localhost:8080/"} id="p_qHyBnCC2U5" outputId="9a635ecf-960d-40d5-f7f5-01b9ab90927f"
# Path to the folders
input_folder = "/content/data/Inputdata"
output = "/content/data/Processeddata"
# Splitting the data into train, test and validation sets.
splitfolders.ratio(input_folder, output, seed=42, ratio=(.6, .2, .2))
# + colab={"base_uri": "https://localhost:8080/"} id="SKhNDMhMD3-o" outputId="4d03da68-ac5c-4d38-edcb-6c21b73eb500"
# Ratios information
help(splitfolders.ratio)
# + id="vQz2uD0Qw-z7"
# Specifying height and width of ResNet50's input layer.
img_height, img_width = (224, 224)
# Specifying train and validation set batch size.
batch_size= 32
# Defining the train, test and validation directory paths
train_data_dir = r'/content/data/Processeddata/train'
valid_data_dir = r'/content/data/Processeddata/val'
test_data_dir = r'/content/data/Processeddata/test'
# + colab={"base_uri": "https://localhost:8080/"} id="4ViMQkew1b96" outputId="9bb9f6c0-3e29-4968-fd73-517645fe1a52"
# Pre-processing
# Defining our parameters for the ImageDataGenerator
train_datagen = ImageDataGenerator(preprocessing_function = preprocess_input,
shear_range=0.2, zoom_range=0.2,
horizontal_flip= True, validation_split= 0.2)
# Specifying train features
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size= batch_size,
class_mode= 'categorical',
subset = 'training')
# Specifying validation features
valid_generator = train_datagen.flow_from_directory(
valid_data_dir,
target_size = (img_height, img_width),
batch_size= batch_size,
class_mode= 'categorical',
subset = 'validation')
# + colab={"base_uri": "https://localhost:8080/"} id="Xp_2Z-l26eIc" outputId="a39bd251-826d-4d2a-ce1d-6aec9014ed0e"
# Specifying validation features
test_generator = train_datagen.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size= 1,
class_mode= 'categorical',
subset = 'validation')
# + colab={"base_uri": "https://localhost:8080/"} id="fkl4uX0N7Bn3" outputId="17ba498f-c0c9-40e3-ff2f-27ffae4c49d5"
# Training the model
# Defining the base model by setting it to ResNet50
base_model = ResNet50(include_top=False, weights= 'imagenet')
# Taking the output of the base model
x= base_model.output
# Additional layers after the output of the ResNet50
x= GlobalAveragePooling2D()(x)
x= Dense(1024, activation='relu')(x)
# Utilising our own classes to predict from the pre-trained ResNet50
predictions = Dense(train_generator.num_classes, activation= 'softmax')(x)
# Defining the transfer learn model taking the input from the ResNet50 and
# output is the prediction on the final most layer.
model = Model(inputs= base_model.input, outputs= predictions)
# Keeping the layers of our transfer learning model non-trainable.
for layer in base_model.layers:
layer.trainable = False
# Compiling the model by passing adam as the optimizer and categorical_crossentropy as the
# loss function and accuracy as the metric for viewing the accuracy for each training epoch.
model.compile(optimizer= 'adam', loss='categorical_crossentropy', metrics= ['accuracy'])
# Fitting the model
model.fit(train_generator, epochs = 10)
# + colab={"base_uri": "https://localhost:8080/"} id="MPET1zS8rFLl" outputId="3d9804c2-470d-49ba-8aa3-e772059c4b0e"
# Viewing all the layers of the network using the model's summary method.
model.summary()
# + id="_jiqY4JsB_rY"
model.save('/content/data/SavedModel/ResNet50_breeds.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="jJBQZ_P2AAYu" outputId="8cfdff47-d97f-48f7-9f41-606b7b7a65a2"
test_loss, test_acc = model.evaluate(test_generator, verbose=2)
print('\n Test Accuracy: ', test_acc)
# + colab={"base_uri": "https://localhost:8080/", "height": 921} id="5BAbxVrBA5HH" outputId="2c3d61d8-8f2b-4bb2-f02c-1cd7d3208e69"
# Confusion matrix
# Loading the saved model.
model = tf.keras.models.load_model('/content/data/SavedModel/ResNet50_breeds.h5')
# Acquiring the dog breed filenames in the test set
filenames = test_generator.filenames
# Initializing nb_samples to the number of files in test set.
nb_samples = len(test_generator)
# Creating an empty list for predicted labels
y_prob = []
# Creating an empty list for the actual images.
y_act = []
test_generator.reset()
for _ in range (nb_samples):
X_test, Y_test = test_generator.next()
y_prob.append(model.predict(X_test))
y_act.append(Y_test)
# Confusion matrix
predicted_class = [list(train_generator.class_indices.keys())[i.argmax()] for i in y_prob]
actual_class = [list(train_generator.class_indices.keys())[i.argmax()] for i in y_act]
out_df = pd.DataFrame(np.stack([predicted_class, actual_class]).T, columns=['predicted_class', 'actual_class'])
confusion_matrix = pd.crosstab(out_df['actual_class'], out_df['predicted_class'], rownames= ['Actual'], colnames=['Predicted'])
# Plotting the confusion matrix
sn.heatmap(confusion_matrix, cmap='Blues', annot=True, fmt= 'd')
plt.show()
plt.savefig('Confusion_matrix.jpg')
# Printing the accuracy score of the model on the test set
print('Test Accuracy; {}'.format((np.diagonal(confusion_matrix).sum()/confusion_matrix.sum().sum()*100)))
# + [markdown] id="lQ2G4ZPDVOXE"
# ## 7. Challenging the solution with Fastai
# + id="N5DzxJfkYmKz"
# Selecting a random seed
np.random.seed(42)
# Splitting the data into train and validation sets
breed = ImageDataBunch.from_folder(path, train='.', valid_pct=0.4, ds_tfms=get_transforms(), size=224).normalize(imagenet_stats)
# + colab={"base_uri": "https://localhost:8080/"} id="bV9JFi7VYl5L" outputId="c8cbdd5a-1d08-4ff2-f375-f01ccbc40445"
# Viewing the classes
breed.classes
# + colab={"base_uri": "https://localhost:8080/", "height": 729} id="e6oVXOAvYlhn" outputId="5f53fd3b-7fb7-4779-fbda-bfd024c83fc7"
# Viewing the data
breed.show_batch(4, figsize=(12,10))
# + colab={"base_uri": "https://localhost:8080/", "height": 105, "referenced_widgets": ["0b204a0a00e84639bc3894075491b16a", "a2bedc7f8636431599f2331ef5f854a5", "fd0fc22686634a878d2bcbc8191d923d", "c098710132ee4ce994616c87b22a2a69", "85c80f773bbd47d997de205ecd0fac9e", "d4645343ba084318898c875707a08d7c", "4f2d0624eed149cdae2868ee38396361", "078891c1c92a463781cdaeb7f16f8c4d"]} id="axh86a-oQkd3" outputId="c83cc1d9-5e97-4c91-f334-04461b10d96e"
# Creating a model and preliminary training
# using create_cnn to create a convolutional neural network.
learn = create_cnn(breed, models.resnet34, metrics=[accuracy, error_rate])
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="-4xmu2xyQnqt" outputId="b346b4f2-455a-431f-a289-ecd6ede311d7"
defaults.device = torch.device('cuda')
# Fitting the model
learn.fit_one_cycle(5)
# + colab={"base_uri": "https://localhost:8080/"} id="bl1ONKWjQpuP" outputId="3981fd4b-cab7-4be4-db60-84faaef50039"
# Model Architecture
learn.model
# + [markdown] id="xrmHVMVsVS--"
# ## 8. Follow up questions
# + [markdown] id="HPQviDmNtta8"
# ### a). Did we have the right data?
# + [markdown] id="F5EAf4uyXgMU"
# The images scraped were relevant to the project, but some had other unwanted objects and captions.
# + [markdown] id="qjFHK1CKty7o"
# ### b). Do we need other data to answer our question?
# + [markdown] id="usK1GpDoYHMh"
# No, we don't.
# + [markdown] id="HSsicSdvt4Zs"
# ### c). Did we have the right question?
# + [markdown] id="QGPTh4sAYTea"
# We had the right analysis question.
| SuperBrains.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Overview
#
# Plot GW plots of variant density
#
# Obtain summed values of discovered SNPs etc.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import matplotlib.lines as mlines
from matplotlib.ticker import MultipleLocator
# +
total_snps_union_path = "../content/tables/snp_discovery/gambcoluarab_{chrom}_tot.csv"
# nonref = pd.read_csv(, index_col=1)
# segsites = pd.read_csv("../content/tables/fraction_segregating_windows.csv", index_col=1)
# -
from ag3 import release_data, GenomeFigure
v3 = release_data()
autosomes = '2R', '2L', '3R', '3L'
chromosomes = autosomes + ('X',)
d = {c: pd.read_csv(
total_snps_union_path.format(chrom=c), index_col=0) for c in chromosomes}
r = pd.concat(d).groupby(level=1).agg(np.sum).T
r.T
r["is_multiallelic_gambcoluarab"] / r["is_segregating_gambcoluarab"]
# ## gamb_colu only
total_snps_byspecies_path = "../content/tables/snp_discovery/{species}_{chrom}_tot.csv"
d = {c: pd.read_csv(
total_snps_byspecies_path.format(
species="gamb_colu", chrom=c), index_col=0) for c in chromosomes}
gc = pd.concat(d).groupby(level=1).agg(np.sum).T
gc.T
gc["is_multiallelic"] / gc["is_segregating"]
1 / (gc["is_segregating"] / gc["total_accessible_bases"])
# ## Arabiensis
d = {c: pd.read_csv(
total_snps_byspecies_path.format(
species="arab", chrom=c), index_col=0) for c in chromosomes}
ab = pd.concat(d).groupby(level=1).agg(np.sum).T
ab.T
ab["is_multiallelic"] / ab["is_segregating"]
1 / (ab["is_segregating"] / ab["total_accessible_bases"])
# ## GW plotting
#
# tbc
genome = {
"2R": 61545105,
"3R": 53200684,
"2L": 49364325,
"UNKN": 42389979,
"3L": 41963435,
"X": 24393108,
# "Y_unplaced": 237045,
# "Mt": 15363
}
def plot_genome_line(chrom, ax, df=None, species="gamb_colu", minv=0, maxv=1.0, **kwargs):
if isinstance(species, list) and chrom == 'UNKN':
# ie draw legend
pal = sns.color_palette('muted', len(species))
ax.axis("off")
ax.set_title("")
l = [mlines.Line2D([], [], color=pal[i], label=sp) for i, sp in enumerate(species)]
ax.legend(handles=l, loc='center')
return None
elif isinstance(species, list):
pal = sns.color_palette('muted', len(species))
for i, sp in enumerate(species):
y = df.loc[chrom][sp]
ax.plot(df.loc[chrom].start.values, y, color=pal[i], label=sp)
elif chrom == "UNKN":
ax.axis("off")
ax.set_title("")
return None
else:
x = df.loc[chrom].start.values
y = df.loc[chrom][species]
ax.plot(x, y)
ax.set_ylim([minv, maxv])
mid = (minv + maxv) / 2
ax.yaxis.set_major_locator(MultipleLocator(mid))
ax.yaxis.set_minor_locator(MultipleLocator(mid/2))
# ## Plot fraction nonref alleles
g = GenomeFigure(genome, figsize=(8, 4))
g.apply(plot_genome_line, df=nonref, species=["gamb_colu", "arab"], maxv=0.08)
# ## Plot fraction of sites that are segregating
g = GenomeFigure(genome, figsize=(8, 4))
g.apply(plot_genome_line, df=segsites, species=["gamb_colu", "arab"], maxv=1.0)
| notebooks/snpdiscovery-gw-plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <div align="center">TF-IDF</div>
# ---------------------------------------------------------------------
#
# you can Find me on Github:
# > ###### [ GitHub](https://github.com/lev1khachatryan)
#
# <img src="pics/main.jpg" />
# #### TF-IDF stands for term frequency-inverse document frequency.
# The TFIDF weight is used in text mining and IR. The weight is a measure used to evaluate how important a word is to a document in a collection of documents.
#
# When using a simple technique like a frequency table of the terms in the document, we remove stop words, punctuation and stem the word to its root. And then, the importance of the word is measured in terms of its frequency; higher the frequency, more important the word.
#
# In case of TF-IDF, the only text pre-processing is removing punctuation and lower casing the words. We do not have to worry about the stop words.
#
# TF-IDF is the product of the TF and IDF scores of the term.
#
# TF = number of times the term appears in the doc/total number of words in the doc
#
# IDF = ln(number of docs/number docs the term appears in)
#
# Higher the TFIDF score, the rarer the term is and vice-versa.
#
# TFIDF is successfully used by search engines like Google, as a ranking factor for content.
#
# The whole idea is to weigh down the frequent terms while scaling up the rare ones.
# ### We’ll now implement TFIDF, manually, over a piece of text.
# We are going to calculate the TFIDF score of each term in a piece of text. The text will be tokenized into sentences and each sentence is then considered a document.
#
# Starting off with importing the required libraries.
from sklearn.feature_extraction.text import TfidfVectorizer
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names())
# ['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', 'this']
print(X.shape)
# (4, 9)
corpus
X.data
# <img src="pics/tf-idf.png" />
| Lectures/Levon/TF-IDF/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
# -
class Star(object):
def __init__(self, x, y, vx, vy) -> None:
self.x = x
self.y = y
self.vx = vx
self.vy = vy
def move(self, time: int = 1) -> None:
self.x += self.vx * time
self.y += self.vy * time
def move_back(self, time: int = 1) -> None:
self.x -= self.vx * time
self.y -= self.vy * time
with open('input') as reader:
raw = reader.read()
pattern = r'^position=< *([-\d]+), *([-\d]+)> *velocity=<\s*([-\d]+), *([-\d]+)>$'
matches = re.finditer(pattern, raw, re.MULTILINE)
stars = []
for match in matches:
x, y, vx, vy = match.groups()
stars.append(Star(int(x),-int(y), int(vx), -int(vy)))
p = figure(title='0', x_axis_label='x', y_axis_label='y', width=990)
x = [star.x for star in stars]
y = [star.y for star in stars]
r = p.circle(x, y, size=10, fill_color='blue', line_color=None)
show(p, notebook_handle=True)
def update(steps, move):
for star in stars:
getattr(star, move)(steps)
r.data_source.data['x'] = [star.x for star in stars]
r.data_source.data['y'] = [star.y for star in stars]
steps = steps if move == 'move' else -steps
p.title.text = str(int(p.title.text) + steps)
push_notebook()
from ipywidgets import interact_manual
interact_manual(update, steps=[1, 10, 100, 1000, 10000], move=['move', 'move_back'])
| Day10/Stars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Docking Featurizer
#
# This shows how we can dock a small molecule in a protein using the Hybrid docking protocol implemented in the OpenEye toolkit.
from kinoml.core.ligands import Ligand
from kinoml.core.proteins import BaseProtein
from kinoml.core.systems import ProteinLigandComplex
from kinoml.features.complexes import OEHybridDockingFeaturizer, OEKLIFSKinaseHybridDockingFeaturizer
# ## OEHybridDockingFeaturizer
# Let's perform a Hybrid docking into [4YNE](https://www.rcsb.org/structure/4YNE) retrieved from PDB with larotrectinib read from a smiles (reading from file is possible with the FileLigand class). First we need to generate the `System`, which is a `ProteinLigandComplex`.
ligand = Ligand.from_smiles(smiles="C1CC(N(C1)C2=NC3=C(C=NN3C=C2)NC(=O)N4CCC(C4)O)C5=C(C=CC(=C5)F)F",
name="larotrectinib")
base_protein = BaseProtein(name="4yne")
base_protein.pdb_id = "4yne"
protein_ligand_complex = ProteinLigandComplex(components=[base_protein, ligand])
# Next perform the docking. The current implementation will detect the co-crystallized ligand in 4YNE and perform a hybrid docking.
#
# > Note: The OE Spruce loop database is needed for this step. The location is specified by the `loop_db` argument. You can request a copy [here](https://www.eyesopen.com/database-downloads).
docking_featurizer = OEHybridDockingFeaturizer(loop_db="~/.OpenEye/rcsb_spruce.loop_db")
system = docking_featurizer.featurize([protein_ligand_complex])
system
# This docking featurizer is quite general, so it could be applied to any protein family and can be seen as a template for more advanced and protein family specific docking featurizers.
# ## OEKLIFSKinaseHybridDockingFeaturizer
# Next, we will do the same but via the kinase specific `OEKLIFSKinaseHybridDockingFeaturizer`. This featurizer only needs a SMILES and a KLIFS kinase ID as input. It will automatically select pdb structures for generating the complex of interest.
ligand = Ligand.from_smiles(smiles='O=C(Nc1cnn2ccc(N3CCC[C@@H]3c3cc(F)ccc3F)nc12)N1CC[C@H](O)C1', name='larotrectinib')
base_protein = BaseProtein(name='NTRK1')
base_protein.klifs_kinase_id = 480
kinase_ligand_complex = ProteinLigandComplex(components=[base_protein, ligand])
docking_featurizer = OEKLIFSKinaseHybridDockingFeaturizer(loop_db="~/.OpenEye/rcsb_spruce.loop_db")
system = docking_featurizer.featurize([kinase_ligand_complex])
system
| examples/docking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="AVgFQmmeHpkW"
# Machine Learning envolve uma mudança de paradigma: Enviamos dados + respostas para o modelo e ele nos retorna a regra que leva os dados às respostas.
# Neste primeiro exemplo, vamos explorar a funcionalidade para buscar a regra (função) que leva os valores X em Y / f(x) = y
#
# + id="jhxrNMcLHizd"
import math
# + colab={"base_uri": "https://localhost:8080/"} id="lDOso0FQHkWG" outputId="fe19163a-e217-43c6-af9a-cffd68be572b"
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
#vamos supor que o modelo é linear e estimar uma curva que encaixe o Y em função do valor de X
# y = a * x + b
a = 3
b = -2
#vamos armazenar os dados estimados para comparação posterior
Y_estimado = []
for item in x:
Y_calculado = a*item+b
Y_estimado.append(Y_calculado)
print('Y real:', y)
print('Y estimado:', Y_estimado)
# vamos definir a função de erro como sendo a diferença entre o Y que desejamos e o que calculamos
# vamos somar o erro de cada par de Y (calculado e esperado) para medir a precisão do modelo
# como podemos ter valores positivos e negativos, vamos elevar todas diferenças ao quadrado para evitar este inconveniente
erro_quadratico_total = 0
for i in range(0,len(y)):
erro_quadratico = (y[i] - Y_estimado[i])**2
erro_quadratico_total = erro_quadratico_total + erro_quadratico
print('Erro quadratico total ', str(math.sqrt(erro_quadratico_total)))
# + id="c6sGE8icLKbt"
| hello-world/linear_regression_loss_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Distributed Estimation
#
# This notebook goes through a couple of examples to show how to use `distributed_estimation`. We import the `DistributedModel` class and make the exog and endog generators.
# +
import numpy as np
from scipy.stats.distributions import norm
from statsmodels.base.distributed_estimation import DistributedModel
def _exog_gen(exog, partitions):
"""partitions exog data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
ii = 0
while ii < n_exog:
jj = int(min(ii + n_part, n_exog))
yield exog[ii:jj, :]
ii += int(n_part)
def _endog_gen(endog, partitions):
"""partitions endog data"""
n_endog = endog.shape[0]
n_part = np.ceil(n_endog / partitions)
ii = 0
while ii < n_endog:
jj = int(min(ii + n_part, n_endog))
yield endog[ii:jj]
ii += int(n_part)
# -
# Next we generate some random data to serve as an example.
X = np.random.normal(size=(1000, 25))
beta = np.random.normal(size=25)
beta *= np.random.randint(0, 2, size=25)
y = norm.rvs(loc=X.dot(beta))
m = 5
# This is the most basic fit, showing all of the defaults, which are to use OLS as the model class, and the debiasing procedure.
debiased_OLS_mod = DistributedModel(m)
debiased_OLS_fit = debiased_OLS_mod.fit(
zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}
)
# Then we run through a slightly more complicated example which uses the GLM model class.
# +
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Gaussian
debiased_GLM_mod = DistributedModel(
m, model_class=GLM, init_kwds={"family": Gaussian()}
)
debiased_GLM_fit = debiased_GLM_mod.fit(
zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}
)
# -
# We can also change the `estimation_method` and the `join_method`. The below example show how this works for the standard OLS case. Here we using a naive averaging approach instead of the debiasing procedure.
# +
from statsmodels.base.distributed_estimation import _est_regularized_naive, _join_naive
naive_OLS_reg_mod = DistributedModel(
m, estimation_method=_est_regularized_naive, join_method=_join_naive
)
naive_OLS_reg_params = naive_OLS_reg_mod.fit(
zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}
)
# -
# Finally, we can also change the `results_class` used. The following example shows how this work for a simple case with an unregularized model and naive averaging.
# +
from statsmodels.base.distributed_estimation import (
_est_unregularized_naive,
DistributedResults,
)
naive_OLS_unreg_mod = DistributedModel(
m,
estimation_method=_est_unregularized_naive,
join_method=_join_naive,
results_class=DistributedResults,
)
naive_OLS_unreg_params = naive_OLS_unreg_mod.fit(
zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}
)
| examples/notebooks/distributed_estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: lambda_venv
# language: python
# name: lambda_venv
# ---
# <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
# <br></br>
# <br></br>
#
# ## *Data Science Unit 4 Sprint 3 Assignment 1*
#
# # Recurrent Neural Networks and Long Short Term Memory (LSTM)
#
# 
#
# It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.
#
# This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt
#
# Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.
#
# Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.
#
# Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# + colab={} colab_type="code" id="Ltj1je1fp5rO"
# TODO - Words, words, mere words, no matter from the heart.
from urllib.request import urlopen
res = urlopen("https://www.gutenberg.org/files/100/100-0.txt")
if res.status == 200:
text = res.read().decode("utf-8")
# +
chars = list(set(text))
i_to_c = {i: c for i, c in enumerate(chars)}
c_to_i = {c: i for i, c in enumerate(chars)}
encoded = [c_to_i[c] for c in text]
sequences = []
next_char = []
# +
maxlen = 30
step = 5
def preprocess(text):
text = re.sub("[^A-Za-z0-9 ]", "", text)
encoded = [c_to_i[c] for c in text]
for i in range(0, len(encoded) - maxlen, step):
sequences.append(encoded[i : i + maxlen])
next_char.append(encoded[i + maxlen])
for i in range(0, len(encoded) - maxlen, step):
sequences.append(encoded[i : i + maxlen])
next_char.append(encoded[i + maxlen])
X = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sequences), len(chars)), dtype=np.bool)
for i, seq in enumerate(sequences):
for t, char in enumerate(seq):
X[i, t, char] = 1
y[i, next_char[i]] = 1
return X, y
# +
import numpy as np
import re
X, y = preprocess(text)
X.shape
# +
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Embedding
max_features = 20000
model = Sequential()
model.add(LSTM(len(chars), input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam")
model.summary()
# +
def sample(preds):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / 1
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(text) - maxlen - 1)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, c_to_i[char]] = 1
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds)
next_char = i_to_c[next_index]
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
# +
import random, sys
model.fit(X, y, batch_size=128, epochs=3, callbacks=[print_callback],)
# +
res = urlopen("https://www.gutenberg.org/files/100/100-0.txt")
if res.status == 200:
text = res.read().decode("utf-8")
section_titles = """
THE SONNETS
ALL’S WELL THAT ENDS WELL
THE TRAGEDY OF ANTONY AND CLEOPATRA
AS YOU LIKE IT
THE COMEDY OF ERRORS
THE TRAGEDY OF CORIOLANUS
CYMBELINE
THE TRAGEDY OF HAMLET, PRINCE OF DENMARK
THE FIRST PART OF KING HENRY THE FOURTH
THE SECOND PART OF KING HENRY THE FOURTH
THE LIFE OF KING HENRY THE FIFTH
THE FIRST PART OF HENRY THE SIXTH
THE SECOND PART OF KING HENRY THE SIXTH
THE THIRD PART OF KING HENRY THE SIXTH
KING HENRY THE EIGHTH
KING JOHN
THE TRAGEDY OF <NAME>
THE TRAGEDY OF KING LEAR
LOVE’S LABOUR’S LOST
THE TRAGEDY OF MACBETH
MEASURE FOR MEASURE
THE MERCHANT OF VENICE
THE MERRY WIVES OF WINDSOR
A MIDSUMMER NIGHT’S DREAM
MUCH ADO ABOUT NOTHING
THE TRAGEDY OF OTHELLO, MOOR OF VENICE
PERICLES, PRINCE OF TYRE
KING RICHARD THE SECOND
KING RICHARD THE THIRD
THE TRAGEDY OF ROMEO AND JULIET
THE TAMING OF THE SHREW
THE TEMPEST
THE LIFE OF TIMON OF ATHENS
THE TRAGEDY OF TITUS ANDRONICUS
THE HISTORY OF TROILUS AND CRESSIDA
TWELFTH NIGHT; OR, WHAT YOU WILL
THE TWO GENTLEMEN OF VERONA
THE TWO NOBLE KINSMEN
THE WINTER’S TALE
A LOVER’S COMPLAINT
THE PASSIONATE PILGRIM
THE PHOENIX AND THE TURTLE
THE RAPE OF LUCRECE
VENUS AND ADONIS
"""
titles = [title.strip() for title in section_titles.split("\n") if title]
# +
titles = [
r"(((\n)|(\r)){3,}" + title + "((\n)|(\r)){3,})"
for title in titles
]
titles_re = re.compile(
"|".join(titles)
)
res = titles_re.split(text)
# +
import re
titles_re = re.compile(
"|".join(titles)
)
# -
res = titles_re.split(text)
filtered = [x for x in res if x][1:]
thresh = 10
while len(filtered) > 44:
filtered = [x for x in filtered if x and len(x) > thresh]
thresh += 1
title_to_contents = {
title: contents
for title, contents in zip(titles, filtered)
}
title_to_contents2 = {}
for k, v in title_to_contents.items():
title_to_contents2[k[16:-14]] = v
print(k[16:-14], v.strip()[:100])
print()
print()
# +
max_features = 20000
model = Sequential()
model.add(LSTM(len(chars), input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam")
model.summary()
# +
sonnets = title_to_contents2["THE SONNETS"]
X, y= preprocess(sonnets)
X[0]
# -
model.fit(X, y, batch_size=128, epochs=10, callbacks=[print_callback],)
# +
import random, sys
on_epoch_end(3, "_")
# +
from bs4 import BeautifulSoup
import requests
res = requests.get("https://www.azlyrics.com/d/dropkickmurphys.html")
res.status_code
# +
soup = BeautifulSoup(res.text)
urls = []
for item in soup.find_all("div", "listalbum-item"):
urls.append(item.find("a")["href"])
urls = ["https://www.azlyrics.com/" + url[3:] for url in urls]
urls[:3]
# +
from time import sleep
lyrics = []
for url in urls:
res = requests.get(url)
if res.status_code != 200:
print(f"Failed to get url: {url}")
continue
soup = BeautifulSoup(res.text)
lyrics.append(" ".join(soup.find("div", "main-page").stripped_strings))
print(url)
print(lyrics[-1])
sleep(10)
# -
with open("dkm.txt", "w") as f:
f.writelines(lyrics)
# +
max_features = 20000
model = Sequential()
model.add(LSTM(len(chars), input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam")
model.summary()
# -
lyrics[:2]
X, y = preprocess(" ".join(lyrics))
X[0]
import random
# +
model.fit(X, y, batch_size=128, epochs=10, callbacks=[print_callback],)
# + [markdown] colab_type="text" id="zE4a4O7Bp5x1"
# # Resources and Stretch Goals
# + [markdown] colab_type="text" id="uT3UV3gap9H6"
# ## Stretch goals:
# - Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets)
#
# - Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from
# - Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.)
# - Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier
# - Run on bigger, better data
#
# ## Resources:
# - [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN
# - [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness"
# - [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset
# - [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation
# - [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
# -
| module1-rnn-and-lstm/LS_DS_431_RNN_and_LSTM_Assignment.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Julia 深度學習:類神經網路模型簡介
# 本範例需要使用到的套件有 Flux,請在執行以下範例前先安裝。
#
# ```
# ] add Flux
# ```
#
# 注意:近期 Flux 正在持續更新,請確保您的 Julia 在 v1.3 版以上,以及 Flux 在 v0.10.4 以上或是最新版。
using Flux
using Flux.Data: DataLoader
using Flux: @epochs, onecold, onehotbatch, throttle, logitcrossentropy
using MLDatasets
using Statistics
# ## 載入資料
train_X, train_y = MNIST.traindata(Float32)
test_X, test_y = MNIST.testdata(Float32)
train_X = Flux.flatten(train_X)
test_X = Flux.flatten(test_X)
train_y = onehotbatch(train_y, 0:9)
test_y = onehotbatch(test_y, 0:9)
batchsize = 1024
train = DataLoader(train_X, train_y, batchsize=batchsize, shuffle=true)
test = DataLoader(test_X, test_y, batchsize=batchsize)
# ## FFN 模型
model = Chain(
Dense(784, 256, relu),
Dense(256, 128, relu),
Dense(128, 10),
softmax)
# ## 損失函數
loss(x, y) = logitcrossentropy(model(x), y)
# ## Callback 函式
function test_loss()
l = 0f0
for (x, y) in test
l += loss(x, y)
end
l/length(test)
end
evalcb() = @show(test_loss())
# ## 模型訓練
epochs = 20
@epochs epochs Flux.train!(loss, params(model), train, ADAM(0.005), cb=throttle(evalcb, 10))
# ## 模型評估
accuracy(x, y) = mean(onecold(model(x)) .== onecold(y))
accuracy(test_X, test_y)
| example/julia_032_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.font_manager
import numpy as np
import seaborn as sns
# %matplotlib inline
from scipy import stats
from pyod.models.abod import ABOD
from pyod.models.cblof import CBLOF
from pyod.models.feature_bagging import FeatureBagging
from pyod.models.hbos import HBOS
from pyod.models.iforest import IForest
from pyod.models.knn import KNN
from pyod.models.lof import LOF
dataset=pd.read_csv('ML_Project/player_playoffs_career.csv')
dataset.head()
dataset.describe()
dataset.info()
# +
dataset.sort_values(by='gp', ascending=False).head(10)[['ilkid','gp']].plot.scatter(x='ilkid',y='gp',figsize=(12,6))
# -
dataset['gp'].plot.hist(bins=50)
dataset['pts'].plot.hist(bins=50)
sns.lmplot(x='gp',y='pts', data=dataset)
sns.jointplot(x='gp',y='pts', data=dataset)
| outlier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Aligning Labels
#
#
# Aligning xlabel and ylabel using `.Figure.align_xlabels` and
# `.Figure.align_ylabels`
#
# `.Figure.align_labels` wraps these two functions.
#
# Note that the xlabel "XLabel1 1" would normally be much closer to the
# x-axis, and "YLabel1 0" would be much closer to the y-axis of their
# respective axes.
#
# +
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
fig = plt.figure(tight_layout=True)
gs = gridspec.GridSpec(2, 2)
ax = fig.add_subplot(gs[0, :])
ax.plot(np.arange(0, 1e6, 1000))
ax.set_ylabel('YLabel0')
ax.set_xlabel('XLabel0')
for i in range(2):
ax = fig.add_subplot(gs[1, i])
ax.plot(np.arange(1., 0., -0.1) * 2000., np.arange(1., 0., -0.1))
ax.set_ylabel('YLabel1 %d' % i)
ax.set_xlabel('XLabel1 %d' % i)
if i == 0:
for tick in ax.get_xticklabels():
tick.set_rotation(55)
fig.align_labels() # same as fig.align_xlabels(); fig.align_ylabels()
plt.show()
| matplotlib/gallery_jupyter/subplots_axes_and_figures/align_labels_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
Dir = "C:\\Users\\Junyu\\OneDrive\\200-Areas\\BioSoft"
import os
for subdirs, dirs, files in os.walk(Dir):
for file in files:
if file.endswith(".md"):
print(os.path.join(subdirs, file))
# +
file = open('C:\\Users\\Junyu\\OneDrive\\200-Areas\\BioSoft\\BioSoft.md', 'r')
try:
text_lines = file.readlines()
print(type(text_lines), text_lines)
for line in text_lines:
print(type(line), line)
finally:
file.close()
# -
# Opening file
file1 = open('C:\\Users\\Junyu\\OneDrive\\200-Areas\\BioSoft\\Conda.md', 'r')
# Using for loop
for line in file1:
print(line.strip())
# Closing files
file1.close()
import re
regex = r"(\[\[.*?\]\])"
match = re.search(regex, "[[Anaconda]]")
match = re.match(regex, "[[Anaconda]]")
print(match)
match.group()
match.group().replace("[[", "").replace("]]", "")
# should not use sub, just replace `[[`
node = re.sub(r"(\[\[.*?\]\])", "", "[[Anaconda]]")
node
import pandas as pd
Links = pd.DataFrame()
Links = Links.append({"Source":str(file), "Target":str(node), "Value":"1", ignore_index=True)
# Opening file
file = open('C:\\Users\\Junyu\\OneDrive\\200-Areas\\BioSoft\\Conda.md', 'r')
# Using for loop
Links = pd.DataFrame()
regex = r"(\[\[.*?\]\])"
for line in file:
#node = ""
print(line.strip())
match = re.search(regex, line.strip())
node = match.group().replace("[[", "").replace("]]", "")
Links = Links.append({"Source":str(file), "Target":str(node), "Value":"1"}, ignore_index=True)
print(node)
# Closing files
file.close()
# `Links` & `Points`
Links
# + jupyter={"outputs_hidden": true}
regex = r"(\[\[.*?\]\])"
Links = pd.DataFrame()
for subdirs, dirs, files in os.walk(Dir):
for file in files:
if file.endswith(".md"):
#print(os.path.join(subdirs, file))
filePath = os.path.join(subdirs, file)
infile = open(filePath, 'r')
for line in infile:
#print(line.strip())
match = re.search(regex, line.strip())
if match != None:
node = match.group().replace("[[", "").replace("]]", "")
Links = Links.append({"Source":str(file.replace(".md", "")), "Target":str(node), "Value":"1"}, ignore_index=True)
print(node)
# Closing files
infile.close()
# + jupyter={"outputs_hidden": true}
Links
# -
Links.to_csv("Links.csv", index=False)
# + jupyter={"outputs_hidden": true}
Links["Source"]
# -
Sum = pd.concat([Links["Source"], Links["Target"]], ignore_index=True, sort=False)
Sum
Points = Sum.value_counts()
Points
Points.columns = ["ID", "Size"]
Points
type(Points)
Points.to_csv("Points.csv")
# + jupyter={"outputs_hidden": true}
result.drop_duplicates()
# -
# How to add Group?
| Obsidian2Flourish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scikit-Learn
#
# See: http://scikit-learn.org/stable/documentation.html
#
# *Author: <NAME>*
#
# *Copyright © 2017 CATALIT LLC*
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# ## 1. Read data from Files
df = pd.read_csv('data/geoloc.csv')
df.head()
df.plot(kind='scatter', c='target', x='lat', y='lon', cmap='bwr')
# ## 2. Define features (X) and target (y)
X = df[['lat', 'lon']]
y = df['target']
# ## 3. Train/Test split
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
# -
# ## 4. Fit a Decision Tree model
# +
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3, random_state=0)
model.fit(X_train, y_train)
# -
# ## 5. Accuracy score on benchmark, train and test sets
bm_score = y.value_counts()[0] / len(y)
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print("Accuracy | Benchmark: {:0.3}, Train: {:0.3}, Test: {:0.3}".format(bm_score, train_score, test_score))
# ## 6. Confusion Matrix and Classification Report
# +
from sklearn.metrics import confusion_matrix, classification_report
y_pred = model.predict(X_test)
# +
cm = confusion_matrix(y_test, y_pred)
pd.DataFrame(cm,
index=["Miss", "Hit"],
columns=['pred_Miss', 'pred_Hit'])
# -
print(classification_report(y_test, y_pred))
# ## 7. Display the tree
# +
from sklearn.externals.six import StringIO
import pydotplus
from sklearn.tree import export_graphviz
from IPython.display import Image
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
feature_names=X.columns,
class_names=['Miss','Hit'],
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# -
# ## 8. Display the decision boundary
# +
hticks = np.linspace(-1.5, 1.5, 101)
vticks = np.linspace(-1.5, 1.5, 101)
aa, bb = np.meshgrid(hticks, vticks)
ab = np.c_[aa.ravel(), bb.ravel()]
c = model.predict(ab)
cc = c.reshape(aa.shape)
ax = df.plot(kind='scatter', c='target', x='lat', y='lon', cmap='bwr')
ax.contourf(aa, bb, cc, cmap='bwr', alpha=0.2)
# -
# ## Exercise
#
#
# Iterate and improve on the decision tree model. Now you have a basic pipeline example. How can you improve the score? Try some of the following:
#
# 1. change some of the initialization parameters of the decision tree re run the code.
# - Does the score change?
# - Does the decision boundary change?
# 2. try some other model like Logistic Regression, SVM, Naive Bayes or any other model you like from [here](http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html)
# 3. what's the highest score you can get?
| 03_Scikit_Learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example Usage for GeoCluster Package
# +
## Basic stuff
# %load_ext autoreload
# %autoreload
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
display(HTML("""<style>div.output_area{max-height:10000px;overflow:scroll;}</style>"""))
## Python Version
import sys
print("Python: {0}".format(sys.version))
## Install
from timeUtils import clock, elapsed
from ioUtils import saveJoblib
from geocluster import geoClusters
from geoUtils import convertMetersToLat, convertLatToMeters, convertMetersToLong, convertLongToMeters
from geoclusterUtils import genCenters, genCluster, genClusters, genTripsBetweenClusters
import datetime as dt
start = dt.datetime.now()
print("Notebook Last Run Initiated: "+str(start))
# -
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# # Example GPS Data
genMax = 75
distMax = 500
raw = genClusters(20, 250, latRange=[29.8, 30.2], lngRange=[49.8, 50.2], dist="gauss", maxrad=genMax)
# +
def plotMeters(ax1, longMeters, latMeters):
ax2 = ax1.twinx()
ax2.plot(longMeters, latMeters, color='b', lw=0)
ax3 = ax1.twiny()
ax3.plot(longMeters, latMeters, color='b', lw=0)
def plotRawData(rawdata, color='cyan'):
import seaborn as sns
from matplotlib import pyplot as plt
fig, ax1 = plt.subplots()
lat = rawdata[:,0]
long = rawdata[:,1]
ax1.scatter(long, lat, s=15, linewidth=0, color='cyan', alpha=1) #c=cluster_member_colors, alpha=1)
return ax1
def clusterData(rawdata, distMax):
# %load_ext autoreload
# %autoreload
gc = geoClusters(key="test", points=rawdata, distMax=distMax, debug=True)
gc.findClusters(seedMin=2, debug=True)
if True:
print("Found {0} clusters using {1} cells and {2} counts".format(gc.getNClusters(), gc.getNCells(), gc.getNCounts()))
return gc
def plotClusters(ax1, gc, color='red'):
from seaborn import color_palette
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
clusters = gc.getClusters()
coms = gc.getClusterCoMs()
color_palette = color_palette('deep', 2)
patches = []
print("Plotting {0} clusters".format(len(clusters)))
for cl, cluster in clusters.items():
radius = cluster.getRadius()
com = cluster.getCoM()
quant = cluster.getQuantiles()
radius = quant[-1]
ax1.scatter(com[1], com[0], s=10, marker='x', linewidth=2, c='black', alpha=1)
latDist = convertMetersToLat(radius)
circle = Circle(xy=(com[1], com[0]), radius=latDist)
patches.append(circle)
p = PatchCollection(patches, facecolor='red', alpha=0.25)
from numpy import array, linspace
#p.set_array(linspace(0,1,len(pcols)))
ax1.add_collection(p)
#latOff = lat - min(lat)
#latMeters = convertLatToMeters(latOff)
#lngOff = long - min(long)
#lngMeters = convertLongToMeters(lngOff, lat)
#plotMeters(ax1, latMeters, lngMeters)
gc = clusterData(raw, distMax=distMax)
ax1 = plotRawData(raw)
ax1 = plotClusters(ax1, gc)
# -
# ## Generate Random Data From Clusters
# +
# %load_ext autoreload
# %autoreload
from geoclusterUtils import genCenters, genCluster, genClusters, genTripsBetweenClusters
data = genTripsBetweenClusters(1000, gc, returnLoc=True, returnDF=True)
# -
saveJoblib(data, "../network/trips.p")
x = genTripsBetweenClusters(100, gc, returnDF=True)
| example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: VPython
# language: python
# name: vpython
# ---
# +
from vpython import *
# <NAME>, August 2011; GlowScript VPython version November 2014; Jupyter VPython version Feb. 2016
scene.title = "A display of most VPython 3D objects"
scene.width = 640
scene.height = 400
#scene.range = 5
scene.background = color.gray(0.7)
scene.center = vector(0,0.5,0)
scene.forward = vector(-.3,0,-1)
gslabel = label(pos=vector(1.1,2,0), text='VPython', xoffset=40, height=16, color=color.yellow)
box(pos=vector(-2,0,0), size=vector(.3,2.5,2.5), color=color.red)
box(pos=vector(.25,-1.4,0), size=vector(4.8,.3,2.5), color=color.red)
cylinder(pos=vector(-2,2,1.25), radius=0.7, axis=vector(0,0,-2.5), color=color.blue)
ball = sphere(pos=vector(2,1,0), radius=0.5, color=color.cyan)
ptr = arrow(pos=vector(0,0,2), axis=vector(2,0,0), color=color.yellow)
cone(pos=vector(-2,0,0), radius=1, length=3, color=color.green, opacity=0.5)
ring(pos=vector(.2,0,0), radius=.6, axis=vector(1,0,0), thickness=0.12, color=color.gray(0.4))
ellipsoid(pos=vector(-.3,2,0), color=color.orange, size=vector(.3,1.5,1.5))
pyramid(pos=vector(.3,2,0), color=vector(0,0.5,.25), size=vector(0.8,1.2,1.2))
spring = helix(pos=vector(2,-1.25,0), radius=0.3, axis=vector(0,1.8,0), color=color.orange, thickness=.1)
angle = 0
da = .01
trail = curve(color=color.magenta, radius= .02)
trail.append(vector(1,0,0))
trail.append(vector(1,0,2))
trail.append(vector(2,0,2))
while angle < 3*pi/4:
rate(100)
ptr.rotate(angle=da, axis=vector(0,0,1), origin=ptr.pos)
trail.append(ptr.pos+ptr.axis)
angle += da
sleep(1) # sleep for 1 second
scene.autoscale = False
t = 0
dt = .01
y0 = gslabel.pos.y
ball_yo = ball.pos.y
while t < 4:
rate(1/dt)
ball.pos.y = ball_yo+0.5*sin(-4*t)
spring.length = ball.pos.y-spring.pos.y-ball.radius+0.15
gslabel.yoffset = 28*sin(-4*t)
t += dt
scene.append_to_caption("""Drag the mouse and you'll drag a sphere.
On a touch screen, press and hold, then drag.""")
s = sphere(color=color.magenta, radius=0.3, visible=False)
drag = False
def down(ev):
global drag
s.pos = ev.pos
s.visible = True
drag = True
def move(ev):
global drag
if not drag: return
s.pos = ev.pos
def up(ev):
global drag
s.visible = False
drag = False
scene.bind("mousedown", down)
scene.bind("mousemove", move)
scene.bind("mouseup", up)
# -
| Demos/VPythonObjects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Radical Stabilisation Energy Calculation Workflow
#
# This workflow takes a user through the entire process to the calculate the Radical Stabilisation Energy in a ligand-protein complex. It will hold you hand (if you want) throughout the entire workflow. If you would like to do something a little different you will need to alter certain parts but hopefully this is clear! The settings used here are for 'demo' mode so a user can get a flavour of the workflow
#
# It takes a PDB as starting structure parameterises, minimises and equilibrates the structure before running a production run. It then takes the ligand adds point charges and runs QM energy calculations. Finally it calculates the vRSE and produces some nice graphs. Ill work on a nice workflow diagram and paragraph to explain it as well.
#
# <NAME>
#
# <EMAIL>
# +
#This part checks for any dependencies we will need throughout the workflow
all_good = True
try:
import xbowflow
except ImportError:
print('Error - you neeed to install xbowflow')
all_good = False
try:
import numpy as np
except ImportError:
print('Error - you neeed to install numpy')
all_good = False
try:
import mdtraj as mdt
except ImportError:
print('Error - you need to install mdtraj')
all_good = False
try:
import matplotlib.pyplot as plt
# %matplotlib inline
except ImportError:
print('Error - you need to install matplotlib')
all_good = False
try:
import nglview as nv
except ImportError:
print('Error - you need to install nglview')
all_good = False
try:
import pinda
except ImportError:
print('Error - you need to install pinda')
all_good = False
#Pinda is nice wrapper for docker. You will also need docker.
if all_good:
print('Success - everything is ready to go.')
# +
from utilities import *
# %matplotlib inline
pdb_code = '4njh' # downloads PDB from https://www.rcsb.org/ it can be changed for a local copy
#pdb_4njh = mdt.load('4njh')
structures = download(pdb_code)
# -
# In the next cell, we use the display() function to view our molecule:
view0 = display(structures)
view0
peptides = select(structures, 'protein') # only keep parts of the structure that are 'protein'
model_number = 0
clean_structure = clean_up(peptides[model_number]) # cleans structure using pdb4amber
# The below cell parameterises the protein-ligand complex using 'standard' methods
parameters = ['leaprc.protein.ff14SB', 'leaprc.water.tip3p']
script = """
solvateoct x TIP3PBOX 10.0
addions x Cl- 0
"""
topology, start_coordinates = run_leap(parameters, script, clean_structure)
view = display(start_coordinates, topology)
view
# Next we run the energy minimisation job. The commands for this in the input script are very simple: a) do a minimization (imin=1), b) run for a maximum of 1000 steps (maxcyc=1000), c) use *periodic boundary conditions* (ntb=1).
#
# The minimize() command produces two outputs: the minimized coordinates, and a log file with information about what happened during rhe run. This is a fairly heavy calculation, the cell may take a minute to run.
# +
minimization_script = '''
&cntrl
imin=1, maxcyc=1000,
ntb=1,
/
'''
minimized_coordinates, logfile = minimize(minimization_script, start_coordinates, topology)
minimized_coordinates.save("em_out.ncrst")
logfile.save("em_out.log")
# -
# Look at the results. Load both the starting and final coordinates into the viewer, so you can see how the structure has changed over the minimization.
view2 = display([start_coordinates, minimized_coordinates], topology)
view2
# Before moving on, see how complete the enetgy minimisation process was, by plotting how the total energy of the system changed over the 1000 steps:
plot_energies(logfile)
# You should see that the energy minimisation seems to have been fairly succesful - the energy of the system is almost stable by the end of the run.
#
# Now we run the main molecular dynamics (MD) simulation. The script is more detailed, you will need to consult the [Amber manual](http://ambermd.org/doc12/Amber16.pdf) to get the details.
#
# The run_md() command produces three outputs: the final coordinates of the system, a *trajectory* file, and another log file. This is a major computation, and will take a couple of minutes, or maybe longer, to complete.
# +
md_script = '''
&cntrl
imin=0, irest=0, ntx=1, dt=0.002,
ntt=3, temp0=300, gamma_ln=5.0,
ntp=1, taup=2.0,
ntb=2, ntc=2, ntf=2,
nstlim=1000, ntwx=500, ntpr=500,
/
'''
final_coordinates, trajectory, md_log = run_md(md_script, minimized_coordinates, topology)
final_coordinates.save("md_out.ncrst")
trajectory.save("md_out.nc")
md_log.save("md_out.log")
# -
# Look at the results. The script above set the total simulation time to be twenty picoseconds, and asked for a snapshot to be saved to the trajectory every picosecond, so there are twenty frames in the animation.
view3 = display(trajectory, topology)
view3
# ## vRSE Calculation using point charges
#
# Extracts ligand from trajectory of MD simulations. Creates inputs for Single Point Energy Calculations.
#
# Assumes a QM package is available
from utilities import *
# %matplotlib inline
NJI = mdt.load('data/4NJI-1C3-2Mg--1--150ns-chainA-250.pdb')
view = display(NJI)
view
ligand = select(NJI, 'resname MOL')
view1 = display(ligand)
view1
for i in range(301):
ligand[i].save('data/REG/lig_reg_{}.xyz'.format(i))
ligand_rad = select(NJI , 'resname MOL and not index 3272')
display(ligand_rad)
for i in range(301):
ligand_rad[i].save('data/RAD/lig_rad_{}.xyz'.format(i))
# ## vRSE Calculation
| WorkFlow/.ipynb_checkpoints/SweetHunter-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from setup import *
import sys
# if DATA_PATH not in sys.path: sys.path.append(DATA_PATH)
# %matplotlib inline
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 4)
pd.set_option('display.max_columns', 200)
tfdf = pd.read_csv(os.path.join(DATA_PATH, 'tweet_vocab.csv.gz'), index_col=0, compression='gzip',
quotechar='"', quoting=pd.io.common.csv.QUOTE_NONNUMERIC, low_memory=False)
tfdf.describe().round().astype(int)
# If you try to allocate a 16k word by 100k document DataFrame of 64-bit integers, you'll get a memory error on a 16 GB laptop.
# Later we'll learn about "constant RAM" tools that can handle an unlimitted stream of documents with a large (1M word) vocabulary. But first let's be frugal and see what we can do with robust, mature tools like Pandas.
# Rather than cutting back on those 100k tweets, lets cut back on the words. What are all those 16k words and how often are they all used (maybe we can ignore infrequent words).
GB = 8 * (100 * 1000 * len(tfdf)) / 1.e9
GB
tfdf
# Fortunately the odd words are at the top and bottom of an alphabetical index!
# And it does look like the less useful tokens aren't used many times or in many documents.
# What do you notice that might help distinguish "natural" words (zoom, zoos, zope, zynga) from URLs and machine-code (000, zzp, zsl107)?
tfdf = tfdf[tfdf.df > 9]
tfdf = tfdf[(tfdf.df > 9) & (((tfdf.df - tfdf.tf) / tfdf.tf) < 0.15)]
tfdf = tfdf[(tfdf.df > 20) & (((tfdf.df - tfdf.tf) / tfdf.tf) < 0.15)]
tfdf
Numpy arrays (guts of Pandas DataFrame) require 8 bytes for each double-precision value (int64)
GB = 8 * (100 * 1000 * len(tfdf)) / 1.e9
GB
# Memory requirements (4 GB) are doable
# But we've lost important words: **"zoom"**
# And there's still a bit of garbage: **"zh3gs0wbno"**
# These look like keys, slugs, hashes or URLs
# Even though the tweets.json format includes a column for URLs
# The URLs are left within the raw text as well
# Let's use a formal but simple grammar engine:
#
# ## Extended regular expressions
# +
url_scheme_popular = r'(\b(' + '|'.join(uri_schemes_popular) + r')[:][/]{2})'
fqdn_popular = r'(\b[a-zA-Z0-9-.]+\b([.]' + r'|'.join(tld_popular) + r'\b)\b)'
url_path = r'(\b[\w/?=+#-_&%~\'"\\.,]*\b)'
pd.set_option('display.max_rows', 14)
pd.Series(uri_schemes_iana)
# -
url_popular = r'(\b' + r'(http|https|svn|git|apt)[:]//' + fqdn_popular + url_path + r'\b)'
tweet = "Play the [postiive sum game](http://totalgood.com/a/b?c=42) of life instead of svn://us.gov."
import re
re.findall(url_popular, tweet)
# +
# email = re.compile(r'^([\w-]+(?:\.[\w-]+)*)@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)')
fqdn = r'(\b[a-zA-Z0-9-.]+([.]' + r'|'.join(tld_iana) + r')\b)'
fqdn_popular = r'(\b[a-zA-Z0-9-.]+\b([.]' + r'|'.join(tld_popular) + r'\b)\b)'
username = r'(\b[a-zA-Z0-9-.!#$%&*+-/=?^_`{|}~]+\b)'
email = re.compile(r'(\b' + username + r'\b@\b' + fqdn + r'\b)')
email_popular = re.compile(r'(\b' + username + r'\b@\b' + fqdn_popular + r'\b)')
# TODO: unmatched surrounding symbols are accepted/consumed, likewise for multiple dots/ats
at = r'(([-@="_(\[{\|\s]+(at|At|AT)[-@="_)\]\}\|\s]+)|[@])'
dot = r'(([-.="_(\[{\|\s]+(dot|dt|Dot|DOT)[-.="_)\]\}\|\s]+)|[.])'
fqdn_obfuscated = r'(\b(([a-zA-Z0-9-]+' + dot + r'){1,7})(' + r'|'.join(tld_iana) + r')\b)'
fqdn_popular_obfuscated = r'(\b(([a-zA-Z0-9-]+' + dot + r'){1,7})(' + r'|'.join(tld_popular) + r')\b)'
username_obfuscated = r'(([a-zA-Z0-9!#$%&*+/?^`~]+' + dot + r'?){1,7})'
email_obfuscated = re.compile(r'(\b' + username_obfuscated + at + fqdn_obfuscated + r'\b)')
email_popular_obfuscated = re.compile(r'(\b' + username_obfuscated + at + fqdn_popular_obfuscated + r'\b)')
url_path = r'(\b[^\s]+)'
url_scheme = r'(\b(' + '|'.join(uri_schemes_iana) + r')[:][/]{2})'
url_scheme_popular = r'(\b(' + '|'.join(uri_schemes_popular) + r')[:][/]{2})'
url = r'(\b' + url_scheme + fqdn + url_path + r'?\b)'
# -
| docs/notebooks/03 Data -- Getting Selective.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tMwCQUecq3Ig"
# ###Getting started with For Loop
#
# * For loop is used to iterate over a sequence, starting from the first value to the last. The number of iterations to be performed depends upon the length of the list.
# * You can use range() function to run loop specified number of times.
# * The range() function defaults to 0 as a starting, and increments by 1 by default.
#
# Syntax:
# ```
# for iteratingVariable in sequence:\
# statement 1\
# statement 2\
# . . . .\
# statement n
# ```
#
#
# NOTE: Iteration begins from "start" and ends at "stop-1".
# + colab={"base_uri": "https://localhost:8080/"} id="WCOdGawSr5pJ" outputId="ff8c4274-7226-422e-de19-475d79789dab"
# Loop will itrate from 0 to 10
for i in range(11):
print(i,end=" ")
print("\n")
# Your can specify start and stop. Here, loop will itrate from 1 to 10
for i in range(1,11):
print(i,end=" ")
print("\n")
# we can also specify steps
for i in range(1,11,3):
print(i,end=" ")
# + [markdown] id="lwuTGLEjztrd"
# ###Nested loop
#
# * A loop inside a loop is called nested loop.
#
# Let's take pyramid example to understand this.
#
# * Here, "inner loop" will be executed i times for each iteration of the "outer loop".
# * In 1st iteration of outer loop, inner loop runs 1 time.
# * In 2nd iteration of outer loop, inner loop runs 2 times and so on...
# + colab={"base_uri": "https://localhost:8080/"} id="3GNQNmRpzyBu" outputId="52618cf2-0e06-4b7f-f897-586fbdcf29ec"
for i in range(1,6):
for j in range(i):
print(i,end = '')
print()
# + [markdown] id="WhZXsI6R10ex"
# ##Else in For Loop
#
# Else part will be executed after the loop is finish its execution.
# + colab={"base_uri": "https://localhost:8080/"} id="gqt5jsSr2JbM" outputId="4698b7e8-7669-4605-9d66-855f0895e9e7"
for i in range(8):
print(i)
else:
print("Loop i finished")
# + [markdown] id="oXkXcYCf2YBr"
# ###break and continue statement
#
# break: use to terminate loop in between
#
# continue: use to stop current iteration and start with next itration, so code below continue statement will not be executed.
# + id="_nTq__CW4mTz"
# loop will be terminated when i=2
for i in range(4):
if (i==2):
break
print(i,end=" ")
print("\n")
# loop will be skip 2 while printing elements
for i in range(4):
if (i==2):
continue
print(i,end=" ")
# + [markdown] id="KIS503AfSCkp"
# #***Extras***
# + [markdown] id="kmUoowB7T0Xa"
# ###One line For loop
# + colab={"base_uri": "https://localhost:8080/"} id="yOeOCIRpT4wW" outputId="cf5a69af-df59-4ef5-a3df-323180159ec2"
# For loop in one line
for i in range(3): print(i)
print("-----")
# Nested For loop in one line
for i in range(4):
for j in range(2):
print((i,j))
print([(i,j) for i in range(4) for j in range(2)]) # for i in range(3): for j in range(3): print((i,j)) :: syntax error
print("-----")
# List operations in For loop in one line
cubes = []
for i in range(4):
cubes.append(i**3)
print(cubes)
print([i**3 for i in range(4)])
print("-----")
# One line For loop with If
cubes = []
for i in range(13):
if i%3==0:
cubes.append(i**3)
print(cubes)
print([i**3 for i in range(13) if i%3==0])
print("-----")
# One line For loop with If Else
for i in range(13):
if i<8:
j = i**3
else:
j = 0
print(j)
for i in range(13): print(i**3 if i<8 else 0)
# + [markdown] id="I2fx2MOZr7lI"
# ###For loop to iterate List, Tuple, Set, String
#
#
# * Same syntax to iterate all 4
# * Difference between these 4 during iteration is is\
# List : ordered, mutable \
# Tuple : ordered, immutable\
# Set : unordered, mutable \
# String : mutable
#
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="GpXVfjATtAYu" outputId="4f92287a-5630-4d03-e718-183d0322fb7a"
fruits_list = ["mango", "orange", "cherry"]
fruits_tuple = ("mango", "orange", "cherry")
fruits_set = {"mango", "orange", "cherry"}
fruit_string = "apple"
for fruit in fruits_list:
print(fruit,end=" ")
print("")
for fruit in fruits_tuple:
print(fruit,end=" ")
print("")
for fruit in fruits_set:
print(fruit,end=" ")
print("")
for fruit in fruit_string:
print(fruit,end=" ")
# + [markdown] id="Qaw9CuHHtBDj"
# ###For loop to iterate dictionary
# + id="UkNpioC1tIki" colab={"base_uri": "https://localhost:8080/"} outputId="7a541cd6-08c3-4683-e929-ec98ea9be467"
data_dict = {'color': 'red', 'game': 'cricket','fruit': 'mango'}
for data in data_dict:
print(data,end=" ")
print("")
for data in data_dict:
print(data_dict[data],end=" ")
print("")
for data_key,data_value in data_dict.items():
print(data+":"+data_value,end=" ")
print("")
for data in data_dict.keys():
print(data,end=" ")
print("")
for data in data_dict.values():
print(data,end=" ")
| Python/Loops/For Loop in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# imports
import pandas as pd
import matplotlib.pyplot as plt
acc = pd.read_csv("plot_acc.csv",header=None)
acc.columns= ["epochs","accuracy"]
acc
# +
plt.title("Accuracy plot for 3 GPU's")
plt.xlabel("epochs")
plt.ylabel("Accuracy")
plt.plot(acc.epochs,acc.accuracy)
plt.show()
# +
loss = pd.read_csv("plot_loss.csv",header=None)
loss.columns= ["epochs","Loss"]
plt.title("Loss plot for 3 GPU's")
plt.xlabel("epochs")
plt.ylabel("Loss")
plt.plot(loss.epochs,loss.Loss)
plt.show()
# -
loss
# +
time = pd.read_csv("plot_time.csv",header=None)
time.columns= ["epochs","Time"]
time= time.head(10)
plt.title("Time plot for 3 GPU's")
plt.xlabel("epochs")
plt.ylabel("Time(Sec)")
plt.plot(time.epochs+1,time.Time)
plt.show()
# -
print("Avg time over total epochs : ",time.Time.mean())
| Code/project code from discovery cluster/gpu3_batch128/Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# # Introduction:
#
# Une sonde spatiale est un véhicule spatial sans équipage lancé dans l'espace pour étudier à plus ou moins grande distance différents objets célestes et elle est amenée à franchir de grandes distances et à fonctionner loin de la Terre et du Soleil. Le facteur principale qui doit être mis en jeu afin de réussir la mission de la sonde est la précision et la complexité de la navigation.
#
# Dans cette optique, on essayera dans ce projet de simuler la trajectoire d'une sonde spatiale dans le système solaire en intégrant les équations de mouvement de Newton, et pour ce faire on commencera d'abord dans la section 1 et 2 par simuler le système solaire et comprendre sa dynamique, et dans la section 3, on va essayer de simuler la trajectoire de la sonde spatiale New Horizons.
#
# Dans cette première partie, on se fixe comme objectif de simuler la dynamique du système solaire en 2 dimensions.
#
# **Remarques :**
#
# Tout les codes de cette partie vont être dans le dossier **In_2dim**.
#
# # 1. Simulation de système solaire:
#
# ## 1.1 Classe des objets.
#
# Pour simplifier la tâche de manipulation des planètes et des sondes, il vaut mieux assigner à chaque objet une classe qui le caractérise avec ses propres attributs:
#
# 1. Attributs
# 1. Masse
# 2. Position initiale $(x_0,y_0)$
# 3. Vitesse initiale $(vx_0, vy_0)$
# 4. Nom de l'objet
# 5. Liste des positions $(x, y)$
# 7. Liste des vitesses $(vx, vy)$
#
# On peut alors définir cette classe avec le code en bas.
#
# (La classe est créée dans le fichier objet.py)
# -
class objet:
""" Classe représentant les objets qui influence par la gravitation
Attributs:
nom
masse: Kg
position (x, y): au (astronomical unit)
vitesse (v_x, v_y) : au/day
"""
nom = "objet"
masse = None
x0 = 0
y0 = 0
vx0 = 0
vy0 = 0
#Listes des positions et vitesse
x = None
y = None
vx = None
vy = None
#Definition de constructeur
def __init__(self, nom = "objet", masse = None, x0 = 0, y0 = 0, vx0 = 0, vy0 = 0):
"""Constructeur de notre classe"""
self.nom = nom
self.masse = masse
self.x0 = x0
self.y0 = y0
self.vx0 = vx0
self.vy0 = vy0
# **Variables Globales:**
#
# Dans toute cette partie, on va utiliser ces variables globales qui vont servir dans les calculs prochains.
# +
#Definitions de parametres
au = 1.49597870e11 #Unité astronomique
jour = 24*3600 #Un jour
G = 6.67408e-11 #Constante gravitationelle
# -
# ## 1.2 Equations de Newtons:
#
# Les équations qu'on va utiliser pour décrire le mouvement des planètes et les sondes spatiales sont les équations de Newton, donc, pour décrire le mouvement d'un objet dans le système solaire qui subit une force gravitationnelle de la part du soleil, il suffit d'intégrer les équations de Newton de secondes ordres en unités internationales.
#
#
# \begin{equation}
# \frac{d²x}{dt²} = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} x \\
# \frac{d²y}{dt²} = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} y
# \end{equation}
#
#
# Mais, pour des résultats plus signicatifs, il vaut mieux travailler avec des distance en $au$ (unité astronomique $:=$ distance soleil-terre) et pour le temps en $jour$, d'où les équations suivantes en unités pratiques:
#
# \begin{equation}
# \frac{d²x}{dt²} = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} x \ \frac{(day)²}{(au)³} \\
# \frac{d²y}{dt²} = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} y \ \frac{(day)²}{(au)³}
# \end{equation}
#
#
# **Implémentation :** (dans le fichier objet.py) -> NB: Toutes les fonctions seront stockés dans le fichier "objet.py"
#
# Pour implémenter les équations de Newton, il est préférable de définir des fonctions $fx$ et $fy$ qui prennent en argument la masse de l'objet qu'on gravite autour et les coordonnées de l'objet gravitant et qui donnent comme output l'acceleration gravitationnelle subit par l'objet suivant $\vec{x}$ et $\vec{y}$.
#
# +
#Definition de fonction fx(M,x,y) et fy(M,x,y)
def fx(M,x,y):
"""
Retourne l'acceleration gravitationnelle suivant x dû à un objet de masse M distants de l'objet étudié de x**2+y**2
"""
return -((G*M)/(x**2+y**2)**(3/2))*x*(jour**2/au**3)
def fy(M,x,y):
"""
Retourne l'acceleration gravitationnelle suivant y dû à un objet de masse M distants de l'objet étudié de x**2+y**2
"""
return -((G*M)/(x**2+y**2)**(3/2))*y*(jour**2/au**3)
# -
# ## 1.3 Simulation d'interaction entre le soleil et une autre planète.
#
# Puisque la masse de Soleil $ M_{soleil} >> M_{planète} $, on peut consider que le soleil reste fixe au cours de mouvement, donc on peut se servir des fonctions de la partie **1.2** pour intérgrer les équations de Newton, il nous faut juste une certaine condition initiale sur la position et la vitesse qu'on a pris d'ici: http://vo.imcce.fr/webservices/miriade.
#
# Dans un premier temps, on peut faire cette première simulation pour le soleil et la terre.
#
# Dans le code ci-dessous, on définit les conditions initiales de la terre, le pas de temps $dt$ et la période d'intégration $T$ et on rénitialise les attributs $terre.x/y$ et $terre.vx/vy$ qui vont contenir les positions et les vitesses de la terre durant toute la période d'intégration.
# +
#Pour faire des plots
import numpy as np
import matplotlib.pyplot as plt
# Definition des objets
soleil = objet("Soleil", 1.989*1e30, 0, 0, 0, 0) #(nom, masse, x, y, vx, vy)
#Données prises de http://vo.imcce.fr/webservices/miriade
terre = objet ("Terre", 5.972*1e24, -0.7528373239252, 0.6375222355089, -0.0113914294224, -0.0131912591762)
dt = 1 #step, un jour
T = int(365/dt)*20 # Periode d'integration (Nombre de steps) -> une année * ...
#Definition des tableau de coordonnés et initiation
terre.x = np.zeros(T) ; terre.x[0] = terre.x0
terre.y = np.zeros(T) ; terre.y[0] = terre.y0
terre.vx = np.zeros(T) ; terre.vx[0] = terre.vx0
terre.vy = np.zeros(T) ; terre.vy[0] = terre.vy0
# -
# ### 1.3.1 Comment estimer la précision des algorithmes d'intégration?
#
# **Conservation d'énergie totale de système:**
#
# Au cours de l'intégration des équations de mouvement, l'énergie de système doit rester conservée, donc on va utiliser comme critère de précision, la variation de l'énergie mécanique (massique) de l'objet étudié, si l'énergie mécanique reste la plus proche possible de la valeur initiale, alors la méthode d'intégration choisie est plus précise. On va calculer alors l'énergie mécanique (massique) à l'aide de la fonction $E$.
#
def E(M, x, y, vx, vy):
return 0.5*(vx**2+vy**2)*(au**2/jour**2)-(G*M)/(np.sqrt(x**2+y**2)*au)
E = np.vectorize(E)
# N.B:
#
# * La fonction $E$ calcule l'énergie (massique) d'un objet sous effet d'un autre objet (seul) de masse M. On verra après une fonction qui permettra de calculer l'énergie d'un objet qui subit l'effet de gravitations de plusieurs autres objets.
#
#
# **Comparaison des trajectoires:**
#
# Aussi pour s'assurer des validités des calculs, on va comparer les trajectoires simulées aux données prises par les observations astrométriques.
# ### 1.3.2 Intégration par la méthode d'Euler:
#
# **Méthode:**
#
# A titre d'initiation, on va commencer par intégrer les équations de Newton à l'aide de méthode d'Euler qui consiste à faire les étapes suivantes:
#
# $$\vec{X}_{i+1} = \vec{X_{i}} + \frac{h}{2}.\vec{V}_{i} $$
#
# $$\vec{V}_{i+1} = \vec{V}_{i} + \frac{h}{2}.\vec{F}(\vec{X}_{i}) $$
#
# Avec $\vec{F}$ l'accelération gravitationnelle, pour notre cas particulier de Système Soleil-Terre on a:
#
# \begin{equation}
# F_x = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} x \ \frac{(day)²}{(au)³} \\
# F_y = -G \frac{M_{soleil}}{(x²+y²)^{3/2}} y \ \frac{(day)²}{(au)³}
# \end{equation}
#
# Et $h$ le pas d'intégration qui correspond à la variable $dt$.
# **Implémentation:** (dans le fichier Interaction_Soleil_Planete_Euler.py)
# +
#----------------------------------------------------------------------------------------------------------
# Integration des equations de newton par methode d'euler
#-------------------------
for i in range(T-1):
#Affectation des vitesses a l'instant i+1
terre.vx[i+1] = terre.vx[i] + dt*fx(soleil.masse, terre.x[i], terre.y[i])
terre.vy[i+1] = terre.vy[i] + dt*fy(soleil.masse, terre.x[i], terre.y[i])
#Affectation des positions a l'instant i+1
terre.x[i+1] = terre.x[i] + dt*terre.vx[i]
terre.y[i+1] = terre.y[i] + dt*terre.vy[i]
#----------------------------------------------------------------------------------------------------
# -
# **Plot de trajectoire:**
# +
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot de la trajectoire simulee:
ax.plot(terre.x, terre.y) #Plot de la trajectoire simulee de la terre
plt.xlabel("y (Au)")
plt.ylabel("x (Au)")
plt.gca().set_aspect('equal', adjustable='box') #equal ratios of x and y
plt.show()
# -
# **Estimation précision en regardant l'énergie mécanique:**
# +
Nrg = E(soleil.masse, terre.x, terre.y, terre.vx, terre.vy) #Calcul d'energie mecanique
Nrg /= np.abs(Nrg[0]) #Pour Normaliser l'energie et pour faire un plot plus significatif
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot d'energie en fonction de temps
t = np.linspace(1,T,T)*dt
ax.plot(t, Nrg)
ax.set_xlabel("t (jour)")
ax.set_ylabel("E/$|E_0|$")
ax.get_yaxis().get_major_formatter().set_useOffset(False) #Disable scaling of values in plot wrt y-axis
#Affichage de l'energie moyenne
print("Résultats : ")
print("Energie moyenne = " + str(np.mean(Nrg)) + ", Ecart_Type = " + str(np.std(Nrg)))
plt.show()
# -
# On remarque si on augmente le pas d'intégration $dt$ la précision diminue, en plus, la trajectoire simulée n'est pas fermée, par conséquent, il faut utiliser une méthode d'intégration plus précise.
#
# ### 1.3.3 Intégration par la méthode de Runge-Kutta d'ordre 2
#
# La méthode d'Euler est une méthode dite de premier d'ordre où l'erreur d'intergration est de l'ordre $h$, donc pour encore avoir plus de précision, il faut utiliser une méthode plus précise d'où la méthode de Runge-Kutta d'ordre 2.
#
# **Méthode:**
#
# Soit $h$ le pas d'intégration, le méthode de Runge-Kutta d'ordre 2 consiste à faire les étapes suivantes:
#
# $$\vec{X}_{i+1} = \vec{X_{i}} + \frac{h}{2}.\vec{V}_{i+1/2} $$
#
# $$\vec{V}_{i+1} = \vec{V}_{i} + \frac{h}{2}.\vec{F}(\vec{X}_{i+1/2}) $$
#
# telles que :
#
# $$ \vec{X}_{i+1/2} = \vec{X}_{i} + \frac{h}{2}.\vec{V}_{i} $$
#
#
# $$ \vec{V}_{i+1/2} = \vec{V}_{i} + \frac{h}{2}.\vec{F}(\vec{X}_{i}) $$
#
# Avec $\vec{F}$ déjà définie dans la méthode d'Euler (1.3.2)
#
# La particularité de cette méthode consiste à définir des variables au milieu $\vec{X}_{i+1/2}$ et $\vec{V}_{i+1/2}$ qui servent comme intermédiaires dans le calcul.
#
# **Implémentation:** (dans le fichier Interaction_Soleil_Planete_Runge-Kutta2.py)
# +
#----------------------------------------------------------------------------------------------------------
# Integration des equations de newton par Runge Kutta2
#-------------------------
for i in range(T-1):
#Definition des variables de milieux
vx_demi = terre.vx[i] + (dt/2)*fx(soleil.masse, terre.x[i], terre.y[i])
vy_demi = terre.vy[i] + (dt/2)*fy(soleil.masse, terre.x[i], terre.y[i])
x_demi = terre.x[i] + (dt/2)*terre.vx[i]
y_demi = terre.y[i] + (dt/2)*terre.vy[i]
# Affectation des positions et vitesses à l'indice i+1
terre.vx[i+1] = terre.vx[i] + dt*fx(soleil.masse, x_demi, y_demi)
terre.vy[i+1] = terre.vy[i] + dt*fy(soleil.masse, x_demi, y_demi)
terre.x[i+1] = terre.x[i] + dt*vx_demi
terre.y[i+1] = terre.y[i] + dt*vy_demi
#----------------------------------------------------------------------------------------------------
# -
# **Plot de trajectoire:**
# +
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot de la trajectoire simulee:
ax.plot(terre.x, terre.y) #Plot de la trajectoire simulee de la terre
plt.xlabel("y (Au)")
plt.ylabel("x (Au)")
plt.gca().set_aspect('equal', adjustable='box') #equal ratios of x and y
plt.show()
# -
# **Estimation précision en regardant l'énergie mécanique:**
# +
Nrg = E(soleil.masse, terre.x, terre.y, terre.vx, terre.vy) #Calcul d'energie mecanique
Nrg /= np.abs(Nrg[0]) #Pour Normaliser l'energie et pour faire un plot plus significatif
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot d'energie en fonction de temps
t = np.linspace(1,T,T)*dt
ax.plot(t, Nrg)
ax.set_xlabel("t (jour)")
ax.set_ylabel("E/$|E_0|$")
ax.get_yaxis().get_major_formatter().set_useOffset(False) #Disable scaling of values in plot wrt y-axis
#Affichage de l'energie moyenne
print("Résultats : ")
print("Energie moyenne = " + str(np.mean(Nrg)) + ", Ecart_Type = " + str(np.std(Nrg)))
plt.show()
# -
# On voit bien que le Runge-Kutta est plus précis que Euler compte tenu du fait que avec le même pas d'intégration $dt=1 \ jour$, on a une trajectoire plus précise et un écart-type de l'énergie plus petit dans le cas d'intégration par Runge-Kutta.
# ### 1.3.4 Intégration par Leapfrog:
#
# Puisque, les méthodes précédentes ne conservent pas l'énérgie mécanique, il faut envisager des méthodes d'intégrations qui conservent l'énergie, ces méthodes sont dites symplectiques.
#
# **Méthode:**
#
# Cette méthode d'intégration comme son nom l'indique, consiste à calculer les positions et les vitesses dans des endroits différents de la manière suivante:
#
# $$\vec{X}_{i+1} = \vec{X_{i}} + h.\vec{V}_{i+1/2} $$
#
# $$\vec{V}_{i+3/2} = \vec{V}_{i+1/2} + h.\vec{F}(\vec{X}_{i+1}) $$
#
# Ici, on aura besoin de $\vec{V}_{1/2}$ pour initier l'algorithme, on fait alors l'approximation suivante:
#
# $$ \vec{V}_{1/2} = \vec{V}_0 + \frac{h}{2}.\vec{F}(\vec{X}_{0}) $$
#
# Cette étape nous coûte un erreur de l'ordre de $h²$, ce qui est tolérable parce qu'il s'agit d'une méthode de second ordre, donc cette étape n'influe pas sur la précision globale de l'intégration.
#
# Pour plus d'informations sur ce schéma voir les liens ci-dessous:
#
# http://physics.ucsc.edu/~peter/242/leapfrog.pdf
#
# https://en.wikipedia.org/wiki/Leapfrog_integration
#
# **Implémentation:** (dans le fichier Interaction_Soleil_Planete_LeapFrog.py)
# +
#----------------------------------------------------------------------------------------------------------
# Integration des equations de newton par LeapFrog
#-------------------------
#Definition des vitesses au milieux
vx_demi = np.zeros(T); vx_demi[0] = terre.vx0 + (dt/2)*fx(soleil.masse, terre.x0, terre.y0)
vy_demi = np.zeros(T); vy_demi[0] = terre.vx0 + (dt/2)*fy(soleil.masse, terre.x0, terre.y0)
for i in range(T-1):
# Affectation des positions à l'indice i+1
terre.x[i+1] = terre.x[i] + dt*vx_demi[i]
terre.y[i+1] = terre.y[i] + dt*vy_demi[i]
#Affectation des vitesses:
vx_demi[i+1] = vx_demi[i] + dt*fx(soleil.masse, terre.x[i+1], terre.y[i+1])
vy_demi[i+1] = vy_demi[i] + dt*fy(soleil.masse, terre.x[i+1], terre.y[i+1])
#Affecter les vitesses de la terre par celles de milieu
terre.vx = vx_demi; terre.vy = vy_demi
# -
# **Plot de trajectoire:**
# +
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot de la trajectoire simulee:
ax.plot(terre.x, terre.y) #Plot de la trajectoire simulee de la terre
plt.xlabel("y (Au)")
plt.ylabel("x (Au)")
plt.gca().set_aspect('equal', adjustable='box') #equal ratios of x and y
plt.show()
# -
# **Estimation précision en regardant l'énergie mécanique:**
# +
Nrg = E(soleil.masse, terre.x, terre.y, terre.vx, terre.vy) #Calcul d'energie mecanique
Nrg /= np.abs(Nrg[0]) #Pour Normaliser l'energie et pour faire un plot plus significatif
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot d'energie en fonction de temps
t = np.linspace(1,T,T)*dt
ax.plot(t, Nrg)
ax.set_xlabel("t (jour)")
ax.set_ylabel("E/$|E_0|$")
ax.get_yaxis().get_major_formatter().set_useOffset(False) #Disable scaling of values in plot wrt y-axis
#Affichage de l'energie moyenne
print("Résultats : ")
print("Energie moyenne = " + str(np.mean(Nrg)) + ", Ecart_Type = " + str(np.std(Nrg)))
plt.show()
# -
# Vu les résultats ci-dessus, on voit bien que Leap-Frog est un schéma d'intégration symplectique qui conserve l'énergie, mais l'incovénient de cette méthode se manifeste dans le faite que la position et la vitesse ne sont pas calculées au même instant d'où la méthode suivante:
# ### 1.3.5 Intégration par Verlet:
#
# **Méthode:**
#
# Cette méthode d'intégration est similaire à la méthode de leapfrog, mais elle permet de calculer les positions et les vitesses aux mêmes endroits, ce qui permet par exemple de tracer un portrait de phase, on pourra implémenter cette méthode de la manière suivante:
#
# $$\vec{X}_{i+1} = \vec{X_{i}} + h.\vec{V}_{i+1/2} $$
#
# $$\vec{V}_{i+1} = \vec{V}_{i+1/2} + \frac{h}{2}.\vec{F}(\vec{X}_{i+1}) $$
#
# telle que :
#
# $$ \vec{V}_{i+1/2} = \vec{V}_{i} + \frac{h}{2}.\vec{F}(\vec{X}_{i}) $$
#
# Pour plus d'informations sur ce schéma voir ces liens:
#
# https://en.wikipedia.org/wiki/Verlet_integration
#
# http://www.fisica.uniud.it/~ercolessi/md/md/node21.html
# **Implémentation:** (dans le fichier Interaction_Soleil_Planete_Verlet.py)
# +
#----------------------------------------------------------------------------------------------------------
# Integration des equations de newton par l'integrateur de Verlet
#-------------------------
#Il faut re-initier les vitesses à cause de la modification introduite par leapfrog
terre.vx[0] = terre.vx0; terre.vy[0] = terre.vy0
for i in range(T-1):
#Definition des variables de milieux
vx_demi = terre.vx[i] + (dt/2)*fx(soleil.masse, terre.x[i], terre.y[i])
vy_demi = terre.vy[i] + (dt/2)*fy(soleil.masse, terre.x[i], terre.y[i])
# Affectation des positions à l'indice i+1
terre.x[i+1] = terre.x[i] + dt*vx_demi
terre.y[i+1] = terre.y[i] + dt*vy_demi
terre.vx[i+1] = vx_demi + (dt/2)*fx(soleil.masse, terre.x[i+1], terre.y[i+1])
terre.vy[i+1] = vy_demi + (dt/2)*fy(soleil.masse, terre.x[i+1], terre.y[i+1])
#----------------------------------------------------------------------------------------------------
# -
# ** Plot de trajectoire: **
# +
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot de la trajectoire simulee:
ax.plot(terre.x, terre.y) #Plot de la trajectoire simulee de la terre
plt.xlabel("y (Au)")
plt.ylabel("x (Au)")
plt.gca().set_aspect('equal', adjustable='box') #equal ratios of x and y
plt.show()
# -
# **Estimation précision en regardant l'énergie mécanique:**
# +
Nrg = E(soleil.masse, terre.x, terre.y, terre.vx, terre.vy) #Calcul d'energie mecanique
Nrg /= np.abs(Nrg[0]) #Pour Normaliser l'energie et pour faire un plot plus significatif
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Plot d'energie en fonction de temps
t = np.linspace(1,T,T)*dt
ax.plot(t, Nrg)
ax.set_xlabel("t (jour)")
ax.set_ylabel("E/$|E_0|$")
ax.get_yaxis().get_major_formatter().set_useOffset(False) #Disable scaling of values in plot wrt y-axis
#Affichage de l'energie moyenne
print("Résultats : ")
print("Energie moyenne = " + str(np.mean(Nrg)) + ", Ecart_Type = " + str(np.std(Nrg)))
plt.show()
# -
# On peut conclure que la méthode de Verlet est la plus précise, vu les valeurs de l'écart-type de l'énergie, donc d'ici jusqu'à la fin de ce rapport on utilisera le schéma de Verlet comme méthode d'intégration des équations de Newton.
# ## 1.4 Implémentation de système solaire:
#
# Maintenant, puisque on a compris comment évolue une seule planète autour de soleil, on va simuler la dynamique des planètes de système solaire autour de soleil.
#
# Dans cette partie, on doit faire appel à une approche similaire mais un peu différente, parce que pour chaque planète de système solaire on doit tenir compte de la force gravitationnelle des autres planètes, donc on doit intégrer des nouvelles équations qui tiennent en compte du couplage entre les planètes.
#
# ### 1.4.1 Equations de Newton:
#
# On va faire l'hypothèse que le soleil reste fixe à cause de sa grande masse, alors on aura:
#
# $$
# (\forall \ i \in \ [| 1,8 |] )\ ; \ \frac{d²\vec{r}_i}{dt²} = -G\sum_{j = 0 ; \ j \neq i}^{9} \frac{M_j}{||\vec{r_i}-\vec{r_j}||^{3}} (\vec{r_i}-\vec{r_j})$$
#
# Avec:
#
# * Objet 0: Soleil
# * Objet $i$ tel que $i \ \in \ [|1,9|]$: les planètes de système solaire de Mercure à Neptune et la planète naine Pluto.
#
#
# ### 1.4.2 Conditions Initiales et définition de système solaire:
#
# Une façon de stocker les positions initiales des objets consiste à les stocker dans un fichier texte qu'on peut nommer "initial_conditions_solarsystem.txt", pour des raisons techniques de Python, on va stocker les noms des objets dans un autre fichier "names_solarsystem.txt". (A cause des problèmes d'encodage on ne peut qu'importer un seul type de données avec la méthode de **numpy** : $np.genfromtxt$).
#
# On choisit comme date de début le "2017-02-28" à "00:00 GMT".
#
# Maintenant on a tout ce qu'il faut pour définir les objets de notre système solaire. Tout d'abord, on va créer les objets à partir de la classe **objet**.
bodies = np.array([objet() for i in range(10)]) #Creation d'une liste des objets (on a au total 10 objets: soleil et 8 planetes et Pluto)
# Après, il faut charger les données relatives aux paramètres des objets de système solaire afin d'initialiser leurs attributs.
# +
import os
os.chdir("/home/mh541/Desktop/Projet_Numerique/In_2dim") #Please change to the to the directory where 'initial_conditions_solarsystem.txt' is saved
data = np.genfromtxt("initial_conditions_solarsystem.txt", usecols=(1,2,3,4,5), skip_header=1) #On ne peut pas importer du texte avec genfromtxt
names = np.loadtxt("names_solarsystem.txt", dtype = str, skiprows=1, usecols=(1,))
# -
# Ici, il ne reste qu'à affecter les valeurs chargées aux attributs des objets. On définit aussi "Nbr_obj" la variable qui contient le nombre total des objets dans notre système.
# +
#Definition des parametres de chaque objet
Nbr_obj = len(bodies) #Definition de Nbr d'objets
for i in range(Nbr_obj):
bodies[i].nom = names[i][2:-1] # [2:-1] pour supprimer les caracteres indesires
bodies[i].masse = data[i][0]
bodies[i].x0 = data[i][1]
bodies[i].y0 = data[i][2]
bodies[i].vx0 = data[i][3]
bodies[i].vy0 = data[i][4]
# -
# ### 1.4.3 Calcul d'accélération et d'énergie totale:
#
# Comme approche naîve, on va tenir compte des couplages entre les autres objets autre que le soleil.
# Pour simplifier la tâche de calcul d'accelération dû à la gravitation subit par un objet, ça serait mieux de définir la fonction **acceleration** qui permet de calculer cette accelération à un instant donné pour un objet donné.
#
# (Cette fonction est dans objet.py)
def acceleration(bodies, i, j):
"""
Calculer l'acceleration relative à un objet bodies[i]
bodies: tous les objets
i: index of concerned body which undergoes the gravitation of other objects.
j: index of the step
"""
N = len(bodies)
ax = 0; ay = 0 #L'acceleration
for jp in range(N):
#Chaque objet bodies[jp] applique une force de gravitation sur l'objet bodies[i]
if jp == i: #On ne veut pas avoir le même objet bodies[jp]
continue
ax += fx(bodies[jp].masse, bodies[i].x[j]-bodies[jp].x[j], bodies[i].y[j]-bodies[jp].y[j]) #Effet du à l'objet bodies[jp]
ay += fy(bodies[jp].masse, bodies[i].x[j]-bodies[jp].x[j], bodies[i].y[j]-bodies[jp].y[j]) #---
return ax, ay
# Cette fonction permet de retourner les accélérations suivant les axes x et y, en prenant comme paramètres la liste des objets $bodies$, et l'indice de l'objet concerné en plus de l'indice de pas voulu.
#
# Pour évaluer la conservation d'énergie pendant l'intergration des équation de mouvement, c'est préférable de créer la fonction **Energy** qui permet de calculer l'énergie mécanique de chaque objet.
#
# (Ces fonctions seront dans objet.py)
# +
#On calcule d'abord l'energie potentielle
def pot(M, x, y):
"""
Retourne le potentiel massique d'un objet par rapport à un autre objet de masse M et distants de x**2+y**2
"""
return -(G*M)/(np.sqrt(x**2+y**2)*au)
def Energy(bodies, i):
"""
L'Energie massique d'un objet sous l'effet d'autres objet qui lui entoure.
"""
N = len(bodies)
potential = 0
for jp in range(N):
if jp == i:
continue
potential += pot(bodies[jp].masse, bodies[i].x-bodies[jp].x, bodies[i].y-bodies[jp].y)
return 0.5*(au**2/jour**2)*(bodies[i].vx**2+bodies[i].vy**2)+potential
# -
# ### 1.4.4 Intégrations des équations de mouvement:
#
# Les objets maintement sont bien défini, il suffit maintenant d'intérger les équations de mouvement définit dans **1.4.1** pour avoir les trajectoires.
#
# Puisque dans la partie précédente **1.3**, on a vu que le schéma d'intergration de Verlet est le plus précis et permet aussi de conserver l'énergie mécanique, alors on va s'en servir pour déduire les trajectoires des planètes.
#
# **Implémentation:**
# +
#Redefinition des steps
dt = 2 #step
T = int(365/dt)*165 # (Nombre de steps)<-> Periode d'integration
#Definition des vitesses au milieu
vx_demi = np.zeros(Nbr_obj)
vy_demi = np.zeros(Nbr_obj)
#Intialisation des attributs x,y,vx,vy de chaque objet bodies[i]
for i in range(Nbr_obj):
bodies[i].x = np.zeros(T); bodies[i].x[0] = bodies[i].x0
bodies[i].y = np.zeros(T); bodies[i].y[0] = bodies[i].y0
bodies[i].vx = np.zeros(T); bodies[i].vx[0] = bodies[i].vx0
bodies[i].vy = np.zeros(T); bodies[i].vy[0] = bodies[i].vy0
#Integration a l'aide de schema de Verlet
for j in range(T-1):#A chaque pas de temps j
#Phase 1: Calcul de vitesses milieu et affectation des position a l'intant j+1
for i in range(1,Nbr_obj): #Modification des parametres pour chaque objet a un instant donne j
fx_j, fy_j = acceleration(bodies, i, j) #Calcul de l'acceleration au pas j relative à l'objet i
#Affectation des vitesses de milieu
vx_demi[i] = bodies[i].vx[j] + (dt/2)*fx_j
vy_demi[i] = bodies[i].vy[j] + (dt/2)*fy_j
# Affectation des positions à l'indice j+1
bodies[i].x[j+1] = bodies[i].x[j] + dt*vx_demi[i]
bodies[i].y[j+1] = bodies[i].y[j] + dt*vy_demi[i]
#Phase 2: Affectation des vitesse a l'instant j+1
for i in range(1,Nbr_obj):
#L'acceleration au pas j+1 relative à l'objet j
fx_jplus1, fy_jplus1 = acceleration(bodies, i, j+1) #Il faut faire cette étape après le calcul de postion à l'indice i+1
# Affectation des vitesses à l'indice j+1
bodies[i].vx[j+1] = vx_demi[i] + (dt/2)*fx_jplus1
bodies[i].vy[j+1] = vy_demi[i] + (dt/2)*fy_jplus1
# -
# **Plot des trajectoires:**
# +
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
#Pour chaque objet faire un plot (Soleil non inclus)
for i in range(1,Nbr_obj):
ax.plot(bodies[i].x, bodies[i].y, label= bodies[i].nom)
plt.xlabel("x (Au)")
plt.ylabel("y (Au)")
plt.gca().set_aspect('equal', adjustable='box') #equal ratios of x and y
plt.legend()
plt.show()
# -
# **Estimation précision en regardant l'énergie mécanique:**
# +
#Definition de figure
fig=plt.figure(figsize=(9, 6), dpi= 100, facecolor='w', edgecolor='k') #To modify the size of the figure
ax = fig.add_subplot(111) #definition de l'axe
Nrg = Energy(bodies, 1) #Cacul de l'energie d'un objet -> Changez le numero pour voir l'energie de chaque objet;
Nrg /= np.abs(Nrg[0]) #Pour Normaliser
#Plot de l'energie
t = np.linspace(1,T,T)*dt
ax.plot(t, Nrg)
# ax.plot(t[:365], Nrg[:365])
ax.set_xlabel("t (jour)")
ax.set_ylabel("E/$|E_0|$")
ax.get_yaxis().get_major_formatter().set_useOffset(False) #Disable scaling of values in plot wrt y-axis
#Affichage des résulats
print("Résultats : ")
print("Energie moyenne = " + str(np.mean(Nrg)) + ", Ecart_Type = " + str(np.std(Nrg)))
plt.show()
# -
# Pour une période d'intégration de 165 ans avec un pas de 2 jours, on observe que les planètes décrivent des trajectoires fermées avec une très bonne précision.
#
# On remarque aussi que l'écart-type de l'énergie dimunie si on raffine le pas d'intégration $dt$, ce qui montre que l'implémentation du schéma d'intégration marche bien.
#
# Dans la figure ci-dessus, on voit des oscillations rapides en energie de Mercure par rapport à la période totale d'intégration, pour bien voir les oscillations d'énergie mécanique (massique) de chaque planète, remplacez "ax.plot(t, Nrg)" par "ax.plot(t[:365], Nrg[:365])" dans le code au-dessus.
#
# Pour conclure cette partie, on peut dire que notre schéma d'intégration de Verlet permet de simuler les trajectoires des planètes en **2D** avec une très bonne précision.
| Solar System in 2D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Vectorization" data-toc-modified-id="Vectorization-1"><span class="toc-item-num">1 </span>Vectorization</a></div><div class="lev1 toc-item"><a href="#Vectorizing-Logistic-Regression" data-toc-modified-id="Vectorizing-Logistic-Regression-2"><span class="toc-item-num">2 </span>Vectorizing Logistic Regression</a></div><div class="lev1 toc-item"><a href="#Vectorizing-Logistic-Regression's-Gradient-Output" data-toc-modified-id="Vectorizing-Logistic-Regression's-Gradient-Output-3"><span class="toc-item-num">3 </span>Vectorizing Logistic Regression's Gradient Output</a></div><div class="lev1 toc-item"><a href="#A-note-on-python/numpy-vectors" data-toc-modified-id="A-note-on-python/numpy-vectors-4"><span class="toc-item-num">4 </span>A note on python/numpy vectors</a></div>
# -
# # Vectorization
# 
import numpy as np
a = np.random.rand(1000000)
b = np.random.rand(1000000)
# %time c = np.dot(a, b)
def loop():
c = 0
for i in range(1000000):
c += a[i] * b[i]
# %time loop()
# 
# 
# 
# # Vectorizing Logistic Regression
#
# 
# # Vectorizing Logistic Regression's Gradient Output
# 
# # A note on python/numpy vectors
a = np.random.randn(5)
a
a.shape
a.T
np.dot(a, a.T)
a = np.random.randn(5, 1)
a
a.shape
a.T
np.dot(a, a.T)
# 
# %load_ext version_information
# %version_information numpy
| 01-Neural-Networks-and-Deep-Learning/week2/02-vectorization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
# pow(2, 31) 次方,二进制最长也就是 31 为,可以遍历
length = len(bin(n)[2:])
res = []
while n:
v = (n >> 0) & 1
if not res or v != res[-1]:
res.append(v)
else:
break
n >>= 1
return len(res) == length
solution = Solution()
solution.hasAlternatingBits(11)
len(bin(pow(2, 31))[2:])
| Bit Manipulation/1222/693. Binary Number with Alternating Bits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Schauen wir uns den Flughafen Basel an
import requests
from bs4 import BeautifulSoup
import pandas as pd
# Die Dokumentatiovon BeautifulSoup ist wirklich sehr beautiful. Es lohnt sich [hier einen Blick](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) darauf zu werfen. Beginnen wir damit, die Arrivals Site des Flughafens Basel/Mulhouse zu analysieren.
# ### URL einlesen
url = "https://www.euroairport.com/en/flights/daily-arrivals.html"
response = requests.get(url)
arrivals_soup = BeautifulSoup(response.text, 'html.parser')
arrivals_soup
# ### Developer Tools im Browser
# Suchen wir die Daten, der uns wirklich interessiert. Beginnen bei den **Developers Tools**.
# ### Find und Findall
arrivals_soup.find('tbody')
arrivals_soup.find('div', {'class': 'cblock modules-flights-flightlist modules-flights'})
# Definieren wir eine Variable damit.
table = arrivals_soup.find('div', {'class': 'cblock modules-flights-flightlist modules-flights'})
type(table)
table.text
# Holen wir alle "row-1" und "row-0" heraus.
row0 = table.find_all('tr', {'class': 'row-0'})
row1 = table.find_all('tr', {'class': 'row-1'})
allrows= table.find_all('tr')
# # Arbeit mit den Listen & find next sibling
type(row0)
len(row1)
len(row0)
len(allrows)
allrows[0]
allrows = allrows[1:]
len(allrows)
allrows[0]
allrows[0].find('td', {'class':'first'}).text
allrows[0]
allrows[0].find('td').find_next_sibling('td').text
row0[0].find('td').find_next_sibling('td') \
.find_next_sibling('td') \
.text.replace('\n', '') \
.replace('\t','')
row0[0].find('td').find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
row0[0].find('td').find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
row0[0].find('td', {'class': 'last'}).text
# # Packen wir das alles in einen Loop?
# +
fluege = []
for elem in allrows[1:]:
ga_zeit = elem.find('td', {'class':'first'}).text
herkunft = elem.find('td', {'class': 'first'}).find_next_sibling('td').text
airline = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.text.replace('\n', '').replace('\t','')
nummer = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
a_zeit = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
typ = elem.find('td', {'class': 'last'}).text
mini_dict = {'Geplante Ankunft': ga_zeit,
'Ankunft': a_zeit,
'Herkunft': herkunft,
'Flugnummer': nummer,
'Passagier/Cargo': typ}
fluege.append(mini_dict)
# -
# # Gehen wir die zweite Liste durch?
# +
fluege1 = [] #das ändern
for elem in row1:
ga_zeit = elem.find('td', {'class': 'first'}).text
herkunft = elem.find('td', {'class': 'first'}).find_next_sibling('td').text
airline = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.text.replace('\n', '').replace('\t','')
nummer = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
a_zeit = elem.find('td', {'class': 'first'}).find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td') \
.find_next_sibling('td').text \
.replace('\t','').replace('\n', '')
typ = elem.find('td', {'class': 'last'}).text
mini_dict = {'Geplante Ankunft': ga_zeit,
'Ankunft': a_zeit,
'Ankunft aus': herkunft,
'Flugnummer': nummer,
'Passagier/Cargo': typ}
fluege1.append(mini_dict) #und hier
# -
# # Verbinden wir beide Listen
f = fluege
pd.DataFrame(f)
df = pd.DataFrame(f)
df.to_csv('fluege_BS.csv')
| 05 beautifulsoup/01 Beautifulsoup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The famous *diffusion equation*, also known as the *heat equation*,
# reads
# $$
# \frac{\partial u}{\partial t} =
# \dfc \frac{\partial^2 u}{\partial x^2},
# $$
# where $u(x,t)$ is the unknown function to be solved for, $x$ is a coordinate
# in space, and $t$ is time. The coefficient $\dfc$ is the *diffusion
# coefficient* and determines how fast $u$ changes in time. A quick
# short form for the diffusion equation is $u_t = \dfc u_{xx}$.
#
# Compared to the wave equation, $u_{tt}=c^2u_{xx}$, which looks very similar,
# the diffusion equation features solutions that are very different from
# those of the wave equation. Also, the diffusion equation
# makes quite different demands to the numerical
# methods.
#
#
# Typical diffusion problems may experience rapid change in the very
# beginning, but then the evolution of $u$ becomes slower and slower.
# The solution is usually very smooth, and after some time, one cannot
# recognize the initial shape of $u$. This is in sharp contrast to
# solutions of the wave equation where the initial shape is preserved in
# homogeneous media - the solution is then basically a moving initial
# condition. The standard wave equation $u_{tt}=c^2u_{xx}$ has solutions
# that propagate with speed $c$ forever, without changing shape, while
# the diffusion equation converges to a *stationary solution* $\bar
# u(x)$ as $t\rightarrow\infty$. In this limit, $u_t=0$, and $\bar u$ is
# governed by $\bar u''(x)=0$. This stationary limit of the diffusion
# equation is called the *Laplace* equation and arises in a very wide
# range of applications throughout the sciences.
#
# mathcal{I}_t is possible to solve for $u(x,t)$ using an explicit scheme, as we
# do in the section [An explicit method for the 1D diffusion equation](#diffu:pde1:FEsec), but the time step restrictions
# soon become much less favorable than for an explicit scheme applied to
# the wave equation. And of more importance, since the solution $u$ of
# the diffusion equation is very smooth and changes slowly, small time
# steps are not convenient and not required by accuracy as the diffusion
# process converges to a stationary state. Therefore, implicit schemes
# (as described in the section [Implicit methods for the 1D diffusion equation](#diffu:pde1:implicit)) are popular, but
# these require solutions of systems of algebraic equations. We shall
# use ready-made software for this purpose, but also program some simple
# iterative methods.
# The exposition is, as usual in this book, very basic and focuses on
# the basic ideas and how to implement. More comprehensive mathematical
# treatments and classical analysis
# of the methods are found in lots of textbooks. A favorite
# of ours in this respect is the one by LeVeque [[LeVeque_2007]](#LeVeque_2007).
# The books by Strikwerda [[Strikwerda_2007]](#Strikwerda_2007) and by
# Lapidus and Pinder [[Lapidus_Pinder_1982]](#Lapidus_Pinder_1982) are also highly recommended
# as additional material on the topic.
#
#
# # An explicit method for the 1D diffusion equation
# <div id="diffu:pde1:FEsec"></div>
#
# Explicit finite difference methods for the wave equation $u_{tt}=c^2u_{xx}$
# can be used, with small modifications, for solving $u_t = \dfc u_{xx}$
# as well.
# % if BOOK == "book":
# The exposition below assumes that the reader is familiar with the
# basic ideas of discretization and implementation of wave
# equations from the chapter [ch:wave](#ch:wave). Readers not familiar with the
# Forward Euler, Backward Euler, and Crank-Nicolson (or centered or
# midpoint) discretization methods in time should consult, e.g., Section 1.1
# in [[Langtangen_decay]](#Langtangen_decay).
# % endif
#
# ## The initial-boundary value problem for 1D diffusion
#
# To obtain a unique solution of the diffusion equation, or equivalently,
# to apply numerical methods, we need initial and boundary conditions.
# The diffusion equation goes with one initial condition $u(x,0)=I(x)$, where
# $I$ is a prescribed function. One boundary condition is required at
# each point on the boundary, which in 1D means that $u$ must be known,
# $u_x$ must be known, or some combination of them.
#
#
# We shall start with the simplest boundary condition: $u=0$. The
# complete initial-boundary value diffusion problem in one space
# dimension can then be specified as
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1"></div>
#
# $$
# \begin{equation}
# \frac{\partial u}{\partial t} =
# \dfc \frac{\partial^2 u}{\partial x^2} + f, \quad x\in (0,L),\ t\in (0,T]
# \label{diffu:pde1} \tag{1}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:ic:u"></div>
#
# $$
# \begin{equation}
# u(x,0) = I(x), \quad x\in [0,L]
# \label{diffu:pde1:ic:u} \tag{2}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:bc:0"></div>
#
# $$
# \begin{equation}
# u(0,t) = 0, \quad t>0,
# \label{diffu:pde1:bc:0} \tag{3}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:bc:L"></div>
#
# $$
# \begin{equation}
# u(L,t) = 0, \quad t>0\thinspace .
# \label{diffu:pde1:bc:L} \tag{4}
# \end{equation}
# $$
# With only a first-order derivative in time,
# only one *initial condition* is needed, while the second-order
# derivative in space leads to a demand for two *boundary conditions*.
# We have added a source term $f=f(x,t)$, which is
# convenient when testing implementations.
#
#
# Diffusion equations like ([1](#diffu:pde1)) have a wide range of
# applications throughout physical, biological, and financial sciences.
# One of the most common applications is propagation of heat, where
# $u(x,t)$ represents the temperature of some substance at point $x$ and
# time $t$. Other applications are listed in the section [diffu:app](#diffu:app).
#
#
# ## Forward Euler scheme
# <div id="diffu:pde1:FE"></div>
#
# The first step in the discretization procedure is to replace the
# domain $[0,L]\times [0,T]$ by a set of mesh points. Here we apply
# equally spaced mesh points
# $$
# x_i=i\Delta x,\quad i=0,\ldots,N_x,
# $$
# and
# $$
# t_n=n\Delta t,\quad n=0,\ldots,N_t \thinspace .
# $$
# Moreover, $u^n_i$ denotes the mesh function that
# approximates $u(x_i,t_n)$ for $i=0,\ldots,N_x$ and $n=0,\ldots,N_t$.
# Requiring the PDE ([1](#diffu:pde1)) to be fulfilled at a mesh point $(x_i,t_n)$
# leads to the equation
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step2"></div>
#
# $$
# \begin{equation}
# \frac{\partial}{\partial t} u(x_i, t_n) =
# \dfc\frac{\partial^2}{\partial x^2} u(x_i, t_n) + f(x_i,t_n),
# \label{diffu:pde1:step2} \tag{5}
# \end{equation}
# $$
# The next step is to replace the derivatives by finite difference approximations.
# The computationally simplest method arises from
# using a forward difference in time and a central difference in
# space:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step3a"></div>
#
# $$
# \begin{equation}
# [D_t^+ u = \dfc D_xD_x u + f]^n_i \thinspace .
# \label{diffu:pde1:step3a} \tag{6}
# \end{equation}
# $$
# Written out,
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step3b"></div>
#
# $$
# \begin{equation}
# \frac{u^{n+1}_i-u^n_i}{\Delta t} = \dfc \frac{u^{n}_{i+1} - 2u^n_i + u^n_{i-1}}{\Delta x^2} + f_i^n\thinspace .
# \label{diffu:pde1:step3b} \tag{7}
# \end{equation}
# $$
# We have turned the PDE into algebraic equations, also often called
# discrete equations. The key property of the equations is that they
# are algebraic, which makes them easy to solve.
# As usual, we anticipate that $u^n_i$ is already computed such that
# $u^{n+1}_i$ is the only unknown in ([7](#diffu:pde1:step3b)).
# Solving with respect to this unknown is easy:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step4"></div>
#
# $$
# \begin{equation}
# u^{n+1}_i = u^n_i + F\left(
# u^{n}_{i+1} - 2u^n_i + u^n_{i-1}\right) + \Delta t f_i^n,
# \label{diffu:pde1:step4} \tag{8}
# \end{equation}
# $$
# where we have introduced the *mesh Fourier number*:
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# F = \dfc\frac{\Delta t}{\Delta x^2}\thinspace .
# \label{_auto1} \tag{9}
# \end{equation}
# $$
# **$F$ is the key parameter in the discrete diffusion equation.**
#
# Note that $F$ is a *dimensionless* number that lumps the key physical
# parameter in the problem, $\dfc$, and the discretization parameters
# $\Delta x$ and $\Delta t$ into a single parameter. Properties
# of the numerical method are critically dependent upon the value of
# $F$ (see the section [diffu:pde1:analysis](#diffu:pde1:analysis) for details).
#
#
#
# The computational algorithm then becomes
#
# 1. compute $u^0_i=I(x_i)$ for $i=0,\ldots,N_x$
#
# 2. for $n=0,1,\ldots,N_t$:
#
# a. apply ([8](#diffu:pde1:step4)) for all the internal
# spatial points $i=1,\ldots,N_x-1$
#
# b. set the boundary values
# $u^{n+1}_i=0$ for $i=0$ and $i=N_x$
#
#
# The algorithm is compactly and fully specified in Python:
# +
import numpy as np
I = lambda x: 1
Nx = 100000
a = 2.0
L = 2.0
dx = L/Nx
dt = dx**2/(2*a)
T = 100*dt
Nt = int(round(T/float(dt)))
x = np.linspace(0, L, Nx+1) # mesh points in space
dx = x[1] - x[0]
t = np.linspace(0, T, Nt+1) # mesh points in time
dt = t[1] - t[0]
F = a*dt/dx**2
u = np.zeros(Nx+1) # unknown u at new time level
u_n = np.zeros(Nx+1) # u at the previous time level
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_n[i] = I(x[i])
for n in range(0, Nt):
# Compute u at inner mesh points
for i in range(1, Nx):
u[i] = u_n[i] + F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) + \
dt*f(x[i], t[n])
# Insert boundary conditions
u[0] = 0; u[Nx] = 0
# Update u_n before next step
u_n[:]= u
# -
# Note that we use `a` for $\dfc$ in the code, motivated by easy visual
# mapping between the variable name and the mathematical symbol in formulas.
#
# We need to state already now that the shown algorithm does not
# produce meaningful results unless $F\leq 1/2$. Why is explained in
# the section [diffu:pde1:analysis](#diffu:pde1:analysis).
#
# ## Implementation
# <div id="diffu:pde1:FE:code"></div>
#
# The file [`diffu1D_u0.py`](${src_diffu}/diffu1D_u0.py)
# contains a complete function `solver_FE_simple`
# for solving the 1D diffusion equation with $u=0$ on the boundary
# as specified in the algorithm above:
# +
import numpy as np
def solver_FE_simple(I, a, f, L, dt, F, T):
"""
Simplest expression of the computational algorithm
using the Forward Euler method and explicit Python loops.
For this method F <= 0.5 for stability.
"""
import time; t0 = time.clock() # For measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1)
u_n = np.zeros(Nx+1)
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_n[i] = I(x[i])
for n in range(0, Nt):
# Compute u at inner mesh points
for i in range(1, Nx):
u[i] = u_n[i] + F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) + \
dt*f(x[i], t[n])
# Insert boundary conditions
u[0] = 0; u[Nx] = 0
# Switch variables before next step
#u_n[:] = u # safe, but slow
u_n, u = u, u_n
t1 = time.clock()
return u_n, x, t, t1-t0 # u_n holds latest u
# -
# A faster alternative is available in the function `solver_FE`, which
# adds the possibility of solving the finite difference scheme by vectorization.
# The vectorized version replaces the explicit loop
for i in range(1, Nx):
u[i] = u_n[i] + F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) \
+ dt*f(x[i], t[n])
# by arithmetics on displaced slices of the `u` array:
u[1:Nx] = u_n[1:Nx] + F*(u_n[0:Nx-1] - 2*u_n[1:Nx] + u_n[2:Nx+1]) \
+ dt*f(x[1:Nx], t[n])
# or
u[1:-1] = u_n[1:-1] + F*(u_n[0:-2] - 2*u_n[1:-1] + u_n[2:]) \
+ dt*f(x[1:-1], t[n])
# For example,
# the vectorized version runs 70 times faster than the scalar version
# in a case with 100 time steps and a spatial mesh of $10^5$ cells.
#
# The `solver_FE` function also features a callback function such that the
# user can process the solution at each time level. The callback
# function looks like `user_action(u, x, t, n)`, where `u` is the array
# containing the solution at time level `n`, `x` holds all the
# spatial mesh points, while `t` holds all the temporal mesh points.
# The `solver_FE` function is very similar to `solver_FE_simple` above:
def solver_FE(I, a, f, L, dt, F, T,
user_action=None, version='scalar'):
"""
Vectorized implementation of solver_FE_simple.
"""
import time; t0 = time.clock() # for measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1) # solution array
u_n = np.zeros(Nx+1) # solution at t-dt
# Set initial condition
for i in range(0,Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
for n in range(0, Nt):
# Update all inner points
if version == 'scalar':
for i in range(1, Nx):
u[i] = u_n[i] +\
F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) +\
dt*f(x[i], t[n])
elif version == 'vectorized':
u[1:Nx] = u_n[1:Nx] + \
F*(u_n[0:Nx-1] - 2*u_n[1:Nx] + u_n[2:Nx+1]) +\
dt*f(x[1:Nx], t[n])
else:
raise ValueError('version=%s' % version)
# Insert boundary conditions
u[0] = 0; u[Nx] = 0
if user_action is not None:
user_action(u, x, t, n+1)
# Switch variables before next step
u_n, u = u, u_n
t1 = time.clock()
return t1-t0
# ## Verification
# <div id="diffu:pde1:FE:verify"></div>
#
# ### Exact solution of discrete equations
#
# <div id="diffu:pde1:FE:verify:exact"></div>
#
# Before thinking about running the functions in the previous section,
# we need to construct a suitable test example for verification. mathcal{I}_t
# appears that a manufactured solution that is linear in time and at
# most quadratic in space fulfills the Forward Euler scheme
# exactly. With the restriction that $u=0$ for $x=0,L$, we can try the
# solution
# $$
# u(x,t) = 5tx(L-x)\thinspace .
# $$
# Inserted in the PDE, it requires a source term
# $$
# f(x,t) = 10\dfc t + 5x(L-x)\thinspace .
# $$
# % if BOOK == 'book':
# With the formulas from [sec:form:fdtn](#sec:form:fdtn) we can easily check
# % else:
# Let us check
# % endif
# that the manufactured `u` fulfills the scheme:
# $$
# \begin{align*}
# \lbrack D_t^+ u = \dfc D_x D_x u + f\rbrack^n_i &=
# \lbrack 5x(L-x)D_t^+ t = 5 t\dfc D_x D_x (xL-x^2) +\\
# &\quad\quad 10\dfc t + 5x(L-x)\rbrack^n_i\\
# &=
# \lbrack 5x(L-x) = 5 t\dfc (-2) + 10\dfc t + 5x(L-x) \rbrack^n_i,
# \end{align*}
# $$
# which is a 0=0 expression.
# The computation of the source term, given any $u$,
# is easily automated with `sympy`:
# +
import sympy as sym
x, t, a, L = sym.symbols('x t a L')
u = x*(L-x)*5*t
def pde(u):
return sym.diff(u, t) - a*sym.diff(u, x, x)
f = sym.simplify(pde(u))
f
# -
# Now we can choose any expression for `u` and automatically
# get the suitable source term `f`. However, the manufactured solution
# `u` will in general
# not be exactly reproduced by the scheme: only constant and linear
# functions are differentiated correctly by a forward difference, while only
# constant, linear, and quadratic functions are differentiated exactly by
# a $[D_xD_x u]^n_i$ difference.
#
# The numerical code will need to access the `u` and `f` above
# as Python functions. The exact solution is wanted as a Python
# function `u_exact(x, t)`, while the source term is wanted as
# `f(x, t)`. The parameters `a` and `L` in `u` and `f` above
# are symbols and must be replaced by `float` objects in a Python
# function. This can be done by redefining `a` and `L` as
# `float` objects and performing substitutions of symbols by
# numbers in `u` and `f`. The appropriate code looks like this:
a = 0.5
L = 1.5
u_exact = sym.lambdify(
[x, t], u.subs('L', L).subs('a', a), modules='numpy')
f = sym.lambdify(
[x, t], f.subs('L', L).subs('a', a), modules='numpy')
I = lambda x: u_exact(x, 0)
# Here we also make a function `I` for the initial condition.
#
# The idea now is that our manufactured solution should be
# exactly reproduced by the code (to machine precision).
# For this purpose we make a test function for comparing
# the exact and numerical solutions at the end of the
# time interval:
def test_solver_FE():
# Define u_exact, f, I as explained above
dx = L/3 # 3 cells
F = 0.5
dt = F*dx**2
u, x, t, cpu = solver_FE_simple(
I=I, a=a, f=f, L=L, dt=dt, F=F, T=2)
u_e = u_exact(x, t[-1])
diff = abs(u_e - u).max()
tol = 1E-14
assert diff < tol, 'max diff solver_FE_simple: %g' % diff
u, x, t, cpu = solver_FE(
I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=None, version='scalar')
u_e = u_exact(x, t[-1])
diff = abs(u_e - u).max()
tol = 1E-14
assert diff < tol, 'max diff solver_FE, scalar: %g' % diff
u, x, t, cpu = solver_FE(
I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=None, version='vectorized')
u_e = u_exact(x, t[-1])
diff = abs(u_e - u).max()
tol = 1E-14
assert diff < tol, 'max diff solver_FE, vectorized: %g' % diff
# **The critical value $F=0.5$.**
#
# We emphasize that the value `F=0.5` is critical: the tests above
# will fail if `F` has a larger value. This is because the Forward
# Euler scheme is unstable for $F>1/2$.
#
# The reader may wonder if
# $F=1/2$ is safe or if $F<1/2$ should be required. Experiments show
# that $F=1/2$ works fine for $u_t=\dfc u_{xx}$, so
# there is no accumulation of rounding
# errors in this case and hence no need to introduce any safety factor
# to keep $F$ away from the limiting value 0.5.
#
#
#
#
# ### Checking convergence rates
#
# <div id="diffu:pde1:FE:verify:convrates"></div>
#
#
# If our chosen exact solution does not satisfy the discrete equations
# exactly, we are left with checking the convergence rates, just as we did
# previously for the wave equation. However, with the Euler scheme here,
# we have different accuracies in time and space, since we use a second
# order approximation to the spatial derivative and a first order approximation
# to the time derivative. Thus, we must expect different convergence rates in
# time and space. For the numerical error,
# $$
# E = C_t\Delta t^r + C_x\Delta x^p,
# $$
# we should get convergence rates $r=1$ and $p=2$ ($C_t$ and $C_x$ are unknown constants).
# As previously,
# in the section [wave:pde2:fd:MMS](#wave:pde2:fd:MMS),
# we simplify matters by introducing a single discretization parameter $h$:
# $$
# h = \Delta t,\quad \Delta x = Kh^{r/p},
# $$
# where $K$ is any constant. This allows us to factor out only *one*
# discretization parameter $h$ from the formula:
# $$
# E = C_t h + C_x (Kh^{r/p})^p = \tilde C h^r,\quad
# \tilde C = C_t + C_sK^r\thinspace .
# $$
# The computed rate $r$ should approach 1 with increasing resolution.
#
# mathcal{I}_t is tempting, for simplicity,
# to choose $K=1$, which gives $\Delta x = h^{r/p}$, expected to be
# $\sqrt{\Delta t}$. However,
# we have to control the stability requirement: $F\leq\frac{1}{2}$,
# which means
# $$
# \frac{\dfc\Delta t}{\Delta x^2}\leq\frac{1}{2}\quad\Rightarrow
# \quad \Delta x \geq \sqrt{2\dfc}h^{1/2} ,
# $$
# implying that $K=\sqrt{2\dfc}$ is our choice in experiments where we
# lie on the stability limit $F=1/2$.
#
#
# ## Numerical experiments
# <div id="diffu:pde1:FE:experiments"></div>
#
# When a test function like the one above runs silently without errors,
# we have some evidence for a correct implementation of the numerical
# method. The next step is to do some experiments with more interesting
# solutions.
#
# We target a scaled diffusion problem where $x/L$ is a new spatial
# coordinate and $\dfc t/L^2$ is a new time coordinate. The source term
# $f$ is omitted, and $u$ is scaled by $\max_{x\in [0,L]}|I(x)|$ (see Section 3.2 in
# [[Langtangen_scaling]](#Langtangen_scaling) for details).
# The governing PDE is then
# $$
# \frac{\partial u}{\partial t} = \frac{\partial^2 u}{\partial x^2},
# $$
# in the spatial domain $[0,L]$, with boundary conditions $u(0)=u(1)=0$.
# Two initial conditions will be tested: a discontinuous plug,
# $$
# I(x) = \left\lbrace\begin{array}{ll}
# 0, & |x-L/2| > 0.1\\
# 1, & \hbox{otherwise}
# \end{array}\right.
# $$
# and a smooth Gaussian function,
# $$
# I(x) = e^{-\frac{1}{2\sigma^2}(x-L/2)^2}\thinspace .
# $$
# The functions `plug` and `gaussian` in [`diffu1D_u0.py`](${src_diffu}/diffu1D_u0.py) run the two cases,
# respectively:
# +
def plug(scheme='FE', F=0.5, Nx=50):
L = 1.
a = 1.
T = 0.1
# Compute dt from Nx and F
dx = L/Nx; dt = F/a*dx**2
def I(x):
"""Plug profile as initial condition."""
if abs(x-L/2.0) > 0.1:
return 0
else:
return 1
cpu = viz(I, a, L, dt, F, T,
umin=-0.1, umax=1.1,
scheme=scheme, animate=True, framefiles=True)
print('CPU time:', cpu)
def gaussian(scheme='FE', F=0.5, Nx=50, sigma=0.05):
L = 1.
a = 1.
T = 0.1
# Compute dt from Nx and F
dx = L/Nx; dt = F/a*dx**2
def I(x):
"""Gaussian profile as initial condition."""
return exp(-0.5*((x-L/2.0)**2)/sigma**2)
u, cpu = viz(I, a, L, dt, F, T,
umin=-0.1, umax=1.1,
scheme=scheme, animate=True, framefiles=True)
print('CPU time:', cpu)
# -
# These functions make use of the function `viz` for running the
# solver and visualizing the solution using a callback function
# with plotting:
def viz(I, a, L, dt, F, T, umin, umax,
scheme='FE', animate=True, framefiles=True):
def plot_u(u, x, t, n):
plt.plot(x, u, 'r-', axis=[0, L, umin, umax],
title='t=%f' % t[n])
if framefiles:
plt.savefig('tmp_frame%04d.png' % n)
if t[n] == 0:
time.sleep(2)
elif not framefiles:
# mathcal{I}_t takes time to write files so pause is needed
# for screen only animation
time.sleep(0.2)
user_action = plot_u if animate else lambda u,x,t,n: None
cpu = eval('solver_'+scheme)(I, a, L, dt, F, T,
user_action=user_action)
return cpu
# Notice that this `viz` function stores all the solutions in a
# list `solutions` in the callback function. Modern computers have
# hardly any problem with storing a lot of such solutions for moderate
# values of $N_x$ in 1D problems, but for 2D and 3D problems, this
# technique cannot be used and solutions must be stored in files.
#
# [hpl 1: Better to show the scalable file solution here?]
#
# Our experiments employ a time step $\Delta t = 0.0002$ and
# simulate for $t\in [0,0.1]$. First we try the highest value of
# $F$: $F=0.5$. This resolution corresponds to
# $N_x=50$. A possible terminal command is
# Terminal> python -c 'from diffu1D_u0 import gaussian
# gaussian("solver_FE", F=0.5, dt=0.0002)'
#
# The $u(x,t)$ curve as a function of $x$ is shown in [Figure](#diffu:pde1:FE:fig:F=0.5) at four time levels.
#
# <!-- dom:MOVIE: [https://raw.githubusercontent.com/hplgit/fdm-book/master/doc/pub/book/html/mov-diffu/diffu1D_u0_FE_plug/movie.ogg] -->
# <!-- begin movie -->
from IPython.display import HTML
_s = """
<div>
<video loop controls width='640' height='365' preload='none'>
<source src='https://raw.githubusercontent.com/hplgit/fdm-book/master/doc/pub/book/html/mov-diffu/diffu1D_u0_FE_plug/movie.mp4' type='video/mp4; codecs="avc1.42E01E, mp4a.40.2"'>
<source src='https://raw.githubusercontent.com/hplgit/fdm-book/master/doc/pub/book/html/mov-diffu/diffu1D_u0_FE_plug/movie.webm' type='video/webm; codecs="vp8, vorbis"'>
<source src='https://raw.githubusercontent.com/hplgit/fdm-book/master/doc/pub/book/html/mov-diffu/diffu1D_u0_FE_plug/movie.ogg' type='video/ogg; codecs="theora, vorbis"'>
</video>
</div>
<p><em></em></p>
<!-- Issue warning if in a Safari browser -->
<script language="javascript">
if (!!(window.safari)) {
document.write("<div style=\"width: 95%%; padding: 10px; border: 1px solid #100; border-radius: 4px;\"><p><font color=\"red\">The above movie will not play in Safari - use Chrome, Firefox, or Opera.</font></p></div>")}
</script>
"""
HTML(_s)
# <!-- end movie -->
#
#
# <!-- [movie](${doc_notes}/pub/diffu/html/mov-diffu/diffu1D_u0_FE_plug/movie.ogg) -->
# <!-- Does not work: -->
# <!-- http://tinyurl.com/pu5uyfn/pub/diffu/html/mov-diffu/diffu1D_u0_FE_plug/movie.ogg -->
# <!-- Works: -->
# <!-- https://raw.githubusercontent.com/hplgit/fdm-book/master/doc/.src/book/mov-diffu/diffu1D_u0_FE_plug/movie.ogg -->
#
# We see that the curves have saw-tooth waves in the beginning of the
# simulation. This non-physical noise is smoothed out with time, but
# solutions of the diffusion equations are known to be smooth, and
# this numerical solution is definitely not smooth.
# Lowering $F$ helps: $F\leq 0.25$ gives a smooth solution, see
# % if FORMAT == "pdflatex":
# [Figure](#diffu:pde1:FE:fig:F=0.25) (and a
# [movie](${docraw}/mov-diffu/diffu1D_u0_FE_plug_F025/movie.ogg)).
# % else:
# [Figure](#diffu:pde1:FE:fig:F=0.25).
#
# <!-- dom:MOVIE: [mov-diffu/diffu1D_u0_FE_plug_F025/movie.ogg] -->
# <!-- begin movie -->
_s = """
<div>
<video loop controls width='640' height='365' preload='none'>
<source src='mov-diffu/diffu1D_u0_FE_plug_F025/movie.mp4' type='video/mp4; codecs="avc1.42E01E, mp4a.40.2"'>
<source src='mov-diffu/diffu1D_u0_FE_plug_F025/movie.webm' type='video/webm; codecs="vp8, vorbis"'>
<source src='mov-diffu/diffu1D_u0_FE_plug_F025/movie.ogg' type='video/ogg; codecs="theora, vorbis"'>
</video>
</div>
<p><em></em></p>
<!-- Issue warning if in a Safari browser -->
<script language="javascript">
if (!!(window.safari)) {
document.write("<div style=\"width: 95%%; padding: 10px; border: 1px solid #100; border-radius: 4px;\"><p><font color=\"red\">The above movie will not play in Safari - use Chrome, Firefox, or Opera.</font></p></div>")}
</script>
"""
HTML(_s)
# <!-- end movie -->
#
# % endif
#
# Increasing $F$ slightly beyond the limit 0.5, to $F=0.51$,
# gives growing, non-physical instabilities,
# as seen in [Figure](#diffu:pde1:FE:fig:F=0.51).
#
# <!-- dom:FIGURE: [fig-diffu/plug_FE_F05.png, width=800 frac=1] Forward Euler scheme for $F=0.5$. <div id="diffu:pde1:FE:fig:F=0.5"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:FE:fig:F=0.5"></div>
#
# <p>Forward Euler scheme for $F=0.5$.</p>
# <img src="fig-diffu/plug_FE_F05.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/plug_FE_F025.png, width=800 frac=1] Forward Euler scheme for $F=0.25$. <div id="diffu:pde1:FE:fig:F=0.25"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:FE:fig:F=0.25"></div>
#
# <p>Forward Euler scheme for $F=0.25$.</p>
# <img src="fig-diffu/plug_FE_F025.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/plug_FE_F051.png, width=800 frac=1] Forward Euler scheme for $F=0.51$. <div id="diffu:pde1:FE:fig:F=0.51"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:FE:fig:F=0.51"></div>
#
# <p>Forward Euler scheme for $F=0.51$.</p>
# <img src="fig-diffu/plug_FE_F051.png" width=800>
#
# <!-- end figure -->
#
#
#
# Instead of a discontinuous initial condition we now try the smooth
# Gaussian function for $I(x)$. A simulation for $F=0.5$
# is shown in [Figure](#diffu:pde1:FE:fig:gauss:F=0.5). Now the numerical solution
# is smooth for all times, and this is true for any $F\leq 0.5$.
#
# % if FORMAT != "pdflatex":
# <!-- dom:MOVIE: [mov-diffu/diffu1D_u0_FE_gaussian1/movie.ogg] -->
# <!-- begin movie -->
_s = """
<div>
<video loop controls width='640' height='365' preload='none'>
<source src='mov-diffu/diffu1D_u0_FE_gaussian1/movie.mp4' type='video/mp4; codecs="avc1.42E01E, mp4a.40.2"'>
<source src='mov-diffu/diffu1D_u0_FE_gaussian1/movie.webm' type='video/webm; codecs="vp8, vorbis"'>
<source src='mov-diffu/diffu1D_u0_FE_gaussian1/movie.ogg' type='video/ogg; codecs="theora, vorbis"'>
</video>
</div>
<p><em></em></p>
<!-- Issue warning if in a Safari browser -->
<script language="javascript">
if (!!(window.safari)) {
document.write("<div style=\"width: 95%%; padding: 10px; border: 1px solid #100; border-radius: 4px;\"><p><font color=\"red\">The above movie will not play in Safari - use Chrome, Firefox, or Opera.</font></p></div>")}
</script>
"""
HTML(_s)
# <!-- end movie -->
#
# % endif
#
# <!-- dom:FIGURE: [fig-diffu/gaussian_FE_F05.png, width=800 frac=1] Forward Euler scheme for $F=0.5$. <div id="diffu:pde1:FE:fig:gauss:F=0.5"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:FE:fig:gauss:F=0.5"></div>
#
# <p>Forward Euler scheme for $F=0.5$.</p>
# <img src="fig-diffu/gaussian_FE_F05.png" width=800>
#
# <!-- end figure -->
#
#
# Experiments with these two choices of $I(x)$ reveal some
# important observations:
#
# * The Forward Euler scheme leads to growing solutions if $F>\frac{1}{2}$.
#
# * $I(x)$ as a discontinuous plug leads to a saw tooth-like noise
# for $F=\frac{1}{2}$, which is absent for $F\leq\frac{1}{4}$.
#
# * The smooth Gaussian initial function leads to a smooth solution
# for all relevant $F$ values ($F\leq \frac{1}{2}$).
#
# # Implicit methods for the 1D diffusion equation
# <div id="diffu:pde1:implicit"></div>
#
# Simulations with the Forward Euler scheme show that the time step
# restriction, $F\leq\frac{1}{2}$, which means $\Delta t \leq \Delta x^2/(2\dfc)$,
# may be relevant in the beginning of the diffusion process, when the
# solution changes quite fast, but as time increases, the process slows
# down, and a small $\Delta t$ may be inconvenient. With
# *implicit schemes*, which lead to coupled systems of linear equations
# to be solved at each time level, any size of $\Delta t$ is possible
# (but the accuracy decreases with increasing $\Delta t$).
# The Backward Euler scheme, derived and implemented below, is the
# simplest implicit scheme for the diffusion equation.
#
# ## Backward Euler scheme
# <div id="diffu:pde1:BE"></div>
#
# In ([5](#diffu:pde1:step2)), we now apply a backward difference in time,
# but the same central difference in space:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step3aBE"></div>
#
# $$
# \begin{equation}
# [D_t^- u = D_xD_x u + f]^n_i,
# \label{diffu:pde1:step3aBE} \tag{10}
# \end{equation}
# $$
# which written out reads
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step3bBE"></div>
#
# $$
# \begin{equation}
# \frac{u^{n}_i-u^{n-1}_i}{\Delta t} = \dfc\frac{u^{n}_{i+1} - 2u^n_i + u^n_{i-1}}{\Delta x^2} + f_i^n\thinspace .
# \label{diffu:pde1:step3bBE} \tag{11}
# \end{equation}
# $$
# Now we assume $u^{n-1}_i$ is already computed, but that all quantities at the "new"
# time level $n$ are unknown. This time it is not possible to solve
# with respect to $u_i^{n}$ because this value couples to its neighbors
# in space, $u^n_{i-1}$ and $u^n_{i+1}$, which are also unknown.
# Let us examine this fact for the case when $N_x=3$. Equation ([11](#diffu:pde1:step3bBE)) written for $i=1,\ldots,Nx-1= 1,2$ becomes
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# \frac{u^{n}_1-u^{n-1}_1}{\Delta t} = \dfc\frac{u^{n}_{2} - 2u^n_1 + u^n_{0}}{\Delta x^2} + f_1^n
# \label{_auto2} \tag{12}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# \frac{u^{n}_2-u^{n-1}_2}{\Delta t} = \dfc\frac{u^{n}_{3} - 2u^n_2 + u^n_{1}}{\Delta x^2} + f_2^n
# \label{_auto3} \tag{13}
# \end{equation}
# $$
# The boundary values $u^n_0$ and $u^n_3$ are known as zero. Collecting the
# unknown new values $u^n_1$ and $u^n_2$ on the left-hand side and multiplying
# by $\Delta t$ gives
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# \left(1+ 2F\right) u^{n}_1 - F u^{n}_{2} = u^{n-1}_1 + \Delta t f_1^n,
# \label{_auto4} \tag{14}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# - F u^{n}_{1} + \left(1+ 2F\right) u^{n}_2 = u^{n-1}_2 + \Delta t f_2^n\thinspace .
# \label{_auto5} \tag{15}
# \end{equation}
# $$
# This is a coupled $2\times 2$ system of algebraic equations for
# the unknowns $u^n_1$ and $u^n_2$. The equivalent matrix form is
# $$
# \left(\begin{array}{cc}
# 1+ 2F & - F\\
# - F & 1+ 2F
# \end{array}\right)
# \left(\begin{array}{c}
# u^{n}_1\\
# u^{n}_2
# \end{array}\right)
# =
# \left(\begin{array}{c}
# u^{n-1}_1 + \Delta t f_1^n\\
# u^{n-1}_2 + \Delta t f_2^n
# \end{array}\right)
# $$
# **Terminology: implicit vs. explicit methods.**
#
# Discretization methods that lead to a coupled system of equations
# for the unknown function at a new time level are said to be
# *implicit methods*.
# The counterpart, *explicit methods*, refers to discretization
# methods where there is a simple explicit formula for the values of
# the unknown function at each of the spatial mesh points at the new
# time level. From an implementational point of view, implicit methods
# are more comprehensive to code since they require
# the solution of coupled equations, i.e., a matrix system, at each time level.
# With explicit methods we have a closed-form formula for the value of
# the unknown at each mesh point.
#
# Very often explicit schemes have a restriction on the size of the time
# step that can be relaxed by using implicit schemes. In fact,
# implicit schemes are frequently unconditionally stable, so the size of the
# time step is governed by accuracy and not by stability. This is the great
# advantage of implicit schemes.
#
#
#
#
# In the general case, ([11](#diffu:pde1:step3bBE)) gives rise to
# a coupled $(N_x-1)\times (N_x-1)$ system of algebraic equations for
# all the unknown $u^n_i$ at the interior spatial points $i=1,\ldots,N_x-1$.
# Collecting the unknowns on the left-hand side,
# ([11](#diffu:pde1:step3bBE)) can be written
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step4BE"></div>
#
# $$
# \begin{equation}
# - F u^n_{i-1} + \left(1+ 2F \right) u^{n}_i - F u^n_{i+1} =
# u_{i-1}^{n-1},
# \label{diffu:pde1:step4BE} \tag{16}
# \end{equation}
# $$
# for $i=1,\ldots,N_x-1$.
# One can either view these equations as a system where the
# $u^{n}_i$ values at the internal mesh points, $i=1,\ldots,N_x-1$, are
# unknown, or we may append the boundary values $u^n_0$ and $u^n_{N_x}$
# to the system. In the latter case, all $u^n_i$ for $i=0,\ldots,N_x$
# are considered unknown, and we must add the boundary equations to
# the $N_x-1$ equations in ([16](#diffu:pde1:step4BE)):
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step4BE:BC:0"></div>
#
# $$
# \begin{equation}
# u_0^n = 0,\label{diffu:pde1:step4BE:BC:0} \tag{17}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:step4BE:BC:L"></div>
#
# $$
# \begin{equation}
# u_{N_x}^n = 0\thinspace .
# \label{diffu:pde1:step4BE:BC:L} \tag{18}
# \end{equation}
# $$
# A coupled system of algebraic equations can be written on matrix form,
# and this is important if we want to call up ready-made software for
# solving the system. The equations ([16](#diffu:pde1:step4BE))
# and ([17](#diffu:pde1:step4BE:BC:0))--([18](#diffu:pde1:step4BE:BC:L))
# correspond to the matrix equation
# $$
# AU = b
# $$
# where $U=(u^n_0,\ldots,u^n_{N_x})$, and
# the matrix $A$ has the following structure:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:pde1:matrix:sparsity"></div>
#
# $$
# \begin{equation}
# A =
# \left(
# \begin{array}{cccccccccc}
# A_{0,0} & A_{0,1} & 0
# &\cdots &
# \cdots & \cdots & \cdots &
# \cdots & 0 \\
# A_{1,0} & A_{1,1} & A_{1,2} & \ddots & & & & & \vdots \\
# 0 & A_{2,1} & A_{2,2} & A_{2,3} &
# \ddots & & & & \vdots \\
# \vdots & \ddots & & \ddots & \ddots & 0 & & & \vdots \\
# \vdots & & \ddots & \ddots & \ddots & \ddots & \ddots & & \vdots \\
# \vdots & & & 0 & A_{i,i-1} & A_{i,i} & A_{i,i+1} & \ddots & \vdots \\
# \vdots & & & & \ddots & \ddots & \ddots &\ddots & 0 \\
# \vdots & & & & &\ddots & \ddots &\ddots & A_{N_x-1,N_x} \\
# 0 &\cdots & \cdots &\cdots & \cdots & \cdots & 0 & A_{N_x,N_x-1} & A_{N_x,N_x}
# \end{array}
# \right)
# \label{diffu:pde1:matrix:sparsity} \tag{19}
# \end{equation}
# $$
# The nonzero elements are given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# A_{i,i-1} = -F
# \label{_auto6} \tag{20}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# A_{i,i} = 1+ 2F
# \label{_auto7} \tag{21}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# A_{i,i+1} = -F
# \label{_auto8} \tag{22}
# \end{equation}
# $$
# in the equations for internal points, $i=1,\ldots,N_x-1$. The first and last
# equation correspond to the boundary condition, where we know the solution,
# and therefore we must have
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# A_{0,0} = 1,
# \label{_auto9} \tag{23}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# A_{0,1} = 0,
# \label{_auto10} \tag{24}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto11"></div>
#
# $$
# \begin{equation}
# A_{N_x,N_x-1} = 0,
# \label{_auto11} \tag{25}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto12"></div>
#
# $$
# \begin{equation}
# A_{N_x,N_x} = 1\thinspace .
# \label{_auto12} \tag{26}
# \end{equation}
# $$
# The right-hand side $b$ is written as
# <!-- Equation labels as ordinary links -->
# <div id="_auto13"></div>
#
# $$
# \begin{equation}
# b = \left(\begin{array}{c}
# b_0\\
# b_1\\
# \vdots\\
# b_i\\
# \vdots\\
# b_{N_x}
# \end{array}\right)
# \label{_auto13} \tag{27}
# \end{equation}
# $$
# with
# <!-- Equation labels as ordinary links -->
# <div id="_auto14"></div>
#
# $$
# \begin{equation}
# b_0 = 0,
# \label{_auto14} \tag{28}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto15"></div>
#
# $$
# \begin{equation}
# b_i = u^{n-1}_i,\quad i=1,\ldots,N_x-1,
# \label{_auto15} \tag{29}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto16"></div>
#
# $$
# \begin{equation}
# b_{N_x} = 0 \thinspace . \label{_auto16} \tag{30}
# \end{equation}
# $$
# We observe that the matrix $A$ contains quantities that do not change
# in time. Therefore, $A$ can be formed once and for all before we enter
# the recursive formulas for the time evolution.
# The right-hand side $b$, however, must be updated at each time step.
# This leads to the following computational algorithm, here sketched
# with Python code:
# +
x = np.linspace(0, L, Nx+1) # mesh points in space
dx = x[1] - x[0]
t = np.linspace(0, T, N+1) # mesh points in time
u = np.zeros(Nx+1) # unknown u at new time level
u_n = np.zeros(Nx+1) # u at the previous time level
# Data structures for the linear system
A = np.zeros((Nx+1, Nx+1))
b = np.zeros(Nx+1)
for i in range(1, Nx):
A[i,i-1] = -F
A[i,i+1] = -F
A[i,i] = 1 + 2*F
A[0,0] = A[Nx,Nx] = 1
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_n[i] = I(x[i])
import scipy.linalg
for n in range(0, Nt):
# Compute b and solve linear system
for i in range(1, Nx):
b[i] = -u_n[i]
b[0] = b[Nx] = 0
u[:] = scipy.linalg.solve(A, b)
# Update u_n before next step
u_n[:] = u
# -
# Regarding verification, the same considerations apply as for the
# Forward Euler method (the section [Verification](#diffu:pde1:FE:verify)).
#
#
#
# ## Sparse matrix implementation
# <div id="diffu:pde1:impl:sparse"></div>
#
# We have seen from ([19](#diffu:pde1:matrix:sparsity)) that the matrix
# $A$ is tridiagonal. The code segment above used a full, dense matrix
# representation of $A$, which stores a lot of values we know are zero
# beforehand, and worse, the solution algorithm computes with all these
# zeros. With $N_x+1$ unknowns, the work by the solution algorithm is
# $\frac{1}{3} (N_x+1)^3$ and the storage requirements $(N_x+1)^2$. By
# utilizing the fact that $A$ is tridiagonal and employing corresponding
# software tools that work with the three diagonals, the work and
# storage demands can be proportional to $N_x$ only. This leads to a
# dramatic improvement: with $N_x=200$, which is a realistic resolution,
# the code runs about 40,000 times faster and reduces the storage to
# just 1.5%! mathcal{I}_t is no doubt that we should take advantage of the fact
# that $A$ is tridiagonal.
#
# The key idea is to apply a data structure for a tridiagonal or sparse
# matrix. The `scipy.sparse` package has relevant utilities. For
# example, we can store only the nonzero diagonals of a matrix. The
# package also has linear system solvers that operate on sparse matrix
# data structures. The code below illustrates how we can store only the
# main diagonal and the upper and lower diagonals.
# +
# Representation of sparse matrix and right-hand side
main = np.zeros(Nx+1)
lower = np.zeros(Nx)
upper = np.zeros(Nx)
b = np.zeros(Nx+1)
# Precompute sparse matrix
main[:] = 1 + 2*F
lower[:] = -F
upper[:] = -F
# Insert boundary conditions
main[0] = 1
main[Nx] = 1
A = scipy.sparse.diags(
diagonals=[main, lower, upper],
offsets=[0, -1, 1], shape=(Nx+1, Nx+1),
format='csr')
print A.todense() # Check that A is correct
# Set initial condition
for i in range(0,Nx+1):
u_n[i] = I(x[i])
for n in range(0, Nt):
b = u_n
b[0] = b[-1] = 0.0 # boundary conditions
u[:] = scipy.sparse.linalg.spsolve(A, b)
u_n[:] = u
# -
# The `scipy.sparse.linalg.spsolve` function utilizes the sparse storage
# structure of `A` and performs, in this case, a very efficient Gaussian
# elimination solve.
#
# The program [`diffu1D_u0.py`](${src_diffu}/diffu1D_u0.py)
# contains a function `solver_BE`, which implements the Backward Euler scheme
# sketched above.
# As mentioned in the section [Forward Euler scheme](#diffu:pde1:FE),
# the functions `plug` and `gaussian`
# run the case with $I(x)$ as a discontinuous plug or a smooth
# Gaussian function. All experiments point to two characteristic
# features of the Backward Euler scheme: 1) it is always stable, and
# 2) it always gives a smooth, decaying solution.
#
# ## Crank-Nicolson scheme
# <div id="diffu:pde1:CN"></div>
#
# The idea in the Crank-Nicolson scheme is to apply centered
# differences in space and time, combined with an average in time.
# We demand the PDE to be fulfilled at the spatial mesh points, but
# midway between the points in the time mesh:
# $$
# \frac{\partial}{\partial t} u(x_i, t_{n+\frac{1}{2}}) =
# \dfc\frac{\partial^2}{\partial x^2}u(x_i, t_{n+\frac{1}{2}}) + f(x_i,t_{n+\frac{1}{2}}),
# $$
# for $i=1,\ldots,N_x-1$ and $n=0,\ldots, N_t-1$.
#
# With centered differences in space and time, we get
# $$
# [D_t u = \dfc D_xD_x u + f]^{n+\frac{1}{2}}_i\thinspace .
# $$
# On the right-hand side we get an expression
# $$
# \frac{1}{\Delta x^2}\left(u^{n+\frac{1}{2}}_{i-1} - 2u^{n+\frac{1}{2}}_i + u^{n+\frac{1}{2}}_{i+1}\right) + f_i^{n+\frac{1}{2}}\thinspace .
# $$
# This expression is problematic since $u^{n+\frac{1}{2}}_i$ is not one of
# the unknowns we compute. A possibility is to replace $u^{n+\frac{1}{2}}_i$
# by an arithmetic average:
# $$
# u^{n+\frac{1}{2}}_i\approx
# \frac{1}{2}\left(u^{n}_i +u^{n+1}_{i}\right)\thinspace .
# $$
# In the compact notation, we can use the arithmetic average
# notation $\overline{u}^t$:
# $$
# [D_t u = \dfc D_xD_x \overline{u}^t + f]^{n+\frac{1}{2}}_i\thinspace .
# $$
# We can also use an average for $f_i^{n+\frac{1}{2}}$:
# $$
# [D_t u = \dfc D_xD_x \overline{u}^t + \overline{f}^t]^{n+\frac{1}{2}}_i\thinspace .
# $$
# After writing out the differences and average, multiplying by $\Delta t$,
# and collecting all unknown terms on the left-hand side, we get
# $$
# u^{n+1}_i - \frac{1}{2} F(u^{n+1}_{i-1} - 2u^{n+1}_i + u^{n+1}_{i+1})
# = u^{n}_i + \frac{1}{2} F(u^{n}_{i-1} - 2u^{n}_i + u^{n}_{i+1})\nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto17"></div>
#
# $$
# \begin{equation}
# \qquad + \frac{1}{2} f_i^{n+1} + \frac{1}{2} f_i^n\thinspace .
# \label{_auto17} \tag{31}
# \end{equation}
# $$
# Also here, as in the Backward Euler scheme, the new unknowns
# $u^{n+1}_{i-1}$, $u^{n+1}_{i}$, and $u^{n+1}_{i+1}$ are coupled
# in a linear system $AU=b$, where $A$ has the same structure
# as in ([19](#diffu:pde1:matrix:sparsity)), but with slightly
# different entries:
# <!-- Equation labels as ordinary links -->
# <div id="_auto18"></div>
#
# $$
# \begin{equation}
# A_{i,i-1} = -\frac{1}{2} F
# \label{_auto18} \tag{32}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto19"></div>
#
# $$
# \begin{equation}
# A_{i,i} = 1 + F
# \label{_auto19} \tag{33}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto20"></div>
#
# $$
# \begin{equation}
# A_{i,i+1} = -\frac{1}{2} F
# \label{_auto20} \tag{34}
# \end{equation}
# $$
# in the equations for internal points, $i=1,\ldots,N_x-1$. The equations
# for the boundary points correspond to
# <!-- Equation labels as ordinary links -->
# <div id="_auto21"></div>
#
# $$
# \begin{equation}
# A_{0,0} = 1,
# \label{_auto21} \tag{35}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto22"></div>
#
# $$
# \begin{equation}
# A_{0,1} = 0,
# \label{_auto22} \tag{36}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto23"></div>
#
# $$
# \begin{equation}
# A_{N_x,N_x-1} = 0,
# \label{_auto23} \tag{37}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto24"></div>
#
# $$
# \begin{equation}
# A_{N_x,N_x} = 1\thinspace .
# \label{_auto24} \tag{38}
# \end{equation}
# $$
# The right-hand side $b$ has entries
# <!-- Equation labels as ordinary links -->
# <div id="_auto25"></div>
#
# $$
# \begin{equation}
# b_0 = 0,
# \label{_auto25} \tag{39}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto26"></div>
#
# $$
# \begin{equation}
# b_i = u^{n-1}_i + \frac{1}{2}(f_i^n + f_i^{n+1}),\quad i=1,\ldots,N_x-1,
# \label{_auto26} \tag{40}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto27"></div>
#
# $$
# \begin{equation}
# b_{N_x} = 0 \thinspace . \label{_auto27} \tag{41}
# \end{equation}
# $$
# When verifying some implementation of the Crank-Nicolson scheme by convergence rate testing,
# one should note that the scheme is second order accurate in both space and time. The numerical
# error then reads
# $$
# E = C_t\Delta t^r + C_x\Delta x^r,
# $$
# where $r=2$ ($C_t$ and $C_x$ are unknown constants, as before).
# When introducing a single discretization parameter, we may now simply choose
# $$
# h = \Delta x = \Delta t,
# $$
# which gives
# $$
# E = C_th^r + C_xh^r = (C_t + C_x)h^r,
# $$
# where $r$ should approach 2 as resolution is increased in the convergence rate computations.
#
#
#
# ## The unifying $\theta$ rule
# <div id="diffu:pde1:theta"></div>
#
# For the equation
# $$
# \frac{\partial u}{\partial t} = G(u),
# $$
# where $G(u)$ is some
# spatial differential operator, the $\theta$-rule
# looks like
# $$
# \frac{u^{n+1}_i - u^n_i}{\Delta t} =
# \theta G(u^{n+1}_i) + (1-\theta) G(u^{n}_i)\thinspace .
# $$
# The important feature of this time discretization scheme is that
# we can implement one formula and then generate a family of
# well-known and widely used schemes:
#
# * $\theta=0$ gives the Forward Euler scheme in time
#
# * $\theta=1$ gives the Backward Euler scheme in time
#
# * $\theta=\frac{1}{2}$ gives the Crank-Nicolson scheme in time
#
# In the compact difference notation, we write the $\theta$ rule
# as
# $$
# [\overline{D}_t u = \dfc D_xD_x u]^{n+\theta}\thinspace .
# $$
# We have that $t_{n+\theta} = \theta t_{n+1} + (1-\theta)t_n$.
#
# Applied to the 1D diffusion problem, the $\theta$-rule gives
# $$
# \begin{align*}
# \frac{u^{n+1}_i-u^n_i}{\Delta t} &=
# \dfc\left( \theta \frac{u^{n+1}_{i+1} - 2u^{n+1}_i + u^{n+1}_{i-1}}{\Delta x^2}
# + (1-\theta) \frac{u^{n}_{i+1} - 2u^n_i + u^n_{i-1}}{\Delta x^2}\right)\\
# &\qquad + \theta f_i^{n+1} + (1-\theta)f_i^n
# \thinspace .
# \end{align*}
# $$
# This scheme also leads to a matrix system with entries
# $$
# A_{i,i-1} = -F\theta,\quad A_{i,i} = 1+2F\theta\quad,
# A_{i,i+1} = -F\theta,
# $$
# while right-hand side entry $b_i$ is
# $$
# b_i = u^n_{i} + F(1-\theta)
# \frac{u^{n}_{i+1} - 2u^n_i + u^n_{i-1}}{\Delta x^2} +
# \Delta t\theta f_i^{n+1} + \Delta t(1-\theta)f_i^n\thinspace .
# $$
# The corresponding entries for the boundary points are as in the Backward
# Euler and Crank-Nicolson schemes listed earlier.
#
# Note that convergence rate testing with implementations of the theta rule must
# adjust the error expression according to which of the underlying schemes is actually being run.
# That is, if $\theta=0$ (i.e., Forward Euler) or $\theta=1$ (i.e., Backward Euler), there should
# be first order convergence, whereas with $\theta=0.5$ (i.e., Crank-Nicolson), one should get
# second order convergence (as outlined in previous sections).
#
#
#
# ## Experiments
# <div id="diffu:pde1:theta:experiments"></div>
#
#
# We can repeat the experiments from the section [Numerical experiments](#diffu:pde1:FE:experiments)
# to see if the Backward Euler or Crank-Nicolson schemes have problems
# with sawtooth-like noise when starting with a discontinuous initial
# condition. We can also verify that we can have $F>\frac{1}{2}$,
# which allows larger time steps than in the Forward Euler method.
#
# <!-- dom:FIGURE: [fig-diffu/plug_BE_F05.png, width=800 frac=1] Backward Euler scheme for $F=0.5$. <div id="diffu:pde1:BE:fig:F=0.5"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:BE:fig:F=0.5"></div>
#
# <p>Backward Euler scheme for $F=0.5$.</p>
# <img src="fig-diffu/plug_BE_F05.png" width=800>
#
# <!-- end figure -->
#
#
# The Backward Euler scheme always produces smooth solutions for any $F$.
# [Figure](#diffu:pde1:BE:fig:F=0.5) shows one example.
# Note that the mathematical discontinuity at $t=0$ leads to a linear
# variation on a mesh, but the approximation to a jump becomes better
# as $N_x$ increases. In our simulation, we specify $\Delta t$ and $F$,
# and set $N_x$ to $L/\sqrt{\dfc\Delta t/F}$. Since $N_x\sim\sqrt{F}$,
# the discontinuity looks sharper in the Crank-Nicolson
# simulations with larger $F$.
#
# The Crank-Nicolson method produces smooth solutions for small $F$,
# $F\leq\frac{1}{2}$, but small noise gets more and more evident as $F$
# increases. Figures [diffu:pde1:CN:fig:F=3](#diffu:pde1:CN:fig:F=3) and [diffu:pde1:CN:fig:F=10](#diffu:pde1:CN:fig:F=10)
# demonstrate the effect for $F=3$ and $F=10$, respectively.
# The section [diffu:pde1:analysis](#diffu:pde1:analysis) explains why such noise occur.
#
# <!-- dom:FIGURE: [fig-diffu/plug_CN_F3.png, width=800 frac=1] Crank-Nicolson scheme for $F=3$. <div id="diffu:pde1:CN:fig:F=3"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:CN:fig:F=3"></div>
#
# <p>Crank-Nicolson scheme for $F=3$.</p>
# <img src="fig-diffu/plug_CN_F3.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/plug_CN_F10.png, width=800 frac=1] Crank-Nicolson scheme for $F=10$. <div id="diffu:pde1:CN:fig:F=10"></div> -->
# <!-- begin figure -->
# <div id="diffu:pde1:CN:fig:F=10"></div>
#
# <p>Crank-Nicolson scheme for $F=10$.</p>
# <img src="fig-diffu/plug_CN_F10.png" width=800>
#
# <!-- end figure -->
#
#
#
#
# ## The Laplace and Poisson equation
#
#
# The Laplace equation, $\nabla^2 u = 0$, and the Poisson equation,
# $-\nabla^2 u = f$, occur in numerous applications throughout science and
# engineering. In 1D these equations read
# $u''(x)=0$ and $-u''(x)=f(x)$, respectively.
# We can solve 1D variants of the Laplace equations with the listed
# software, because we can interpret $u_{xx}=0$ as the limiting solution
# of $u_t = \dfc u_{xx}$ when $u$ reaches a steady state limit where
# $u_t\rightarrow 0$.
# Similarly, Poisson's equation $-u_{xx}=f$ arises from solving
# $u_t = u_{xx} + f$ and letting $t\rightarrow\infty$ so $u_t\rightarrow 0$.
#
# Technically in a program, we can simulate $t\rightarrow\infty$
# by just taking one large time step:
# $\Delta t\rightarrow\infty$. In the limit, the Backward Euler
# scheme gives
# $$
# -\frac{u^{n+1}_{i+1} - 2u^{n+1}_i + u^{n+1}_{i-1}}{\Delta x^2} = f^{n+1}_i,
# $$
# which is nothing but the discretization $[-D_xD_x u = f]^{n+1}_i=0$ of
# $-u_{xx}=f$.
#
# The result above means that
# the Backward Euler scheme can solve the limit equation directly and
# hence produce a solution of the 1D Laplace equation.
# With the Forward Euler scheme we must do the time stepping since $\Delta t >
# \Delta x^2/\dfc$
# is illegal and leads to instability.
# We may interpret this time stepping
# as solving the equation system from $-u_{xx}=f$ by iterating on a
# pseudo time variable.
#
# [hpl 2: Better to say the last sentence when we treat iterative methods.]
| fdm-devito-notebooks/03_diffu/diffu_fd1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### NLP Sentiment Analysis Exercise
# +
# import numpy
import numpy as np
# import pandas
import pandas as pd
# import regex
import re
# import nltk
import nltk
# -
# load data
data_source_url = "https://raw.githubusercontent.com/kolaveridi/kaggle-Twitter-US-Airline-Sentiment-/master/Tweets.csv"
airline_tweets = pd.read_csv(data_source_url)
# **Task:** Print the top 5 rows.
airline_tweets.head(2)
# **Task:** Use the `'text'` column to create an array with the name `'features'`.
#
#
features_array = airline_tweets["text"]
# **Task:** Use `'airline_sentiment'` column to create an array with the name `'labels'`.
labels_array = airline_tweets["airline_sentiment"]
# **Task:** Clean the text data in the `'features'` array.
#
# - Remove all the special characters.
# - Remove all single characters.
# - Remove single characters from the start.
# - Substituting multiple spaces with single space.
# - Converting all text to lowercase.
features_array = np.array(features_array)
# +
# split into tokens
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
for i in range(len(features_array)):
features_array[i] = features_array[i].lower()
features_array[i] = tokenizer.tokenize(features_array[i])
# this splits into words
# --------------------------- some notes under -------
# Remove numbers, but not words that contain numbers.
# doocs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
# docs = [[token for token in doc if len(token) > 1] for doc in docs]
# -
features_array[:10]
# +
# Remove all the special characters from features_array
# Remove all single characters
# Remove single characters from the start
# Substituting multiple spaces with single space
# Converting to Lowercase
for i in range(0, len(features_array)):
# -
# **Task:** Import stopwords from nltk.
# **Task:** Import TfidfVectorizer from sklearn.
# **Task:** Instatiate TfidfVectorizer with following parameters:
#
# - max_features = 2500
# - min_df = 7
# - max_df = 0.8
# - stop_words = stopwords.words('english')
#
#
#
# **Bonus:** How would you determine optimal paraemeters for TfidfVectorizer? Discuss with your peers and/or mentors. Write down your answer below.
# **Task:** Transform features with vectorizer.
# **Task:** Import train_test_split from sklearn and split the data.
# **Task:** Import any classifier of your choice from sklearn (e.g. Random Forest, LogReg, Naive Bayes).
# **Task:** Fit your classifier to data.
# **Task:** Predict X_test.
# **Task:** Import confusion matrix and accuracy_score.
# **Task:** Print confusion matrix.
# **Task:** Print accaccuracy_score.
| w8/w8d4/.ipynb_checkpoints/todo_NLP_sentiment_analysis_exercise-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Question 1:
# + active=""
# Write a function that counts how many concentric layers a rug.
#
# Examples
#
# count_layers([
# "AAAA",
# "ABBA",
# "AAAA"
# ]) 2
#
# count_layers([
# "AAAAAAAAA",
# "ABBBBBBBA",
# "ABBAAABBA",
# "ABBBBBBBA",
# "AAAAAAAAA"
# ]) 3
#
# count_layers([
# "AAAAAAAAAAA",
# "AABBBBBBBAA",
# "AABCCCCCBAA",
# "AABCAAACBAA",
# "AABCADACBAA",
# "AABCAAACBAA",
# "AABCCCCCBAA",
# "AABBBBBBBAA",
# "AAAAAAAAAAA"
# ]) 5
# -
# Answer :
# +
def count_layers(lst):
return len(lst)//2 + 1
print(count_layers([
"AAAA",
"ABBA",
"AAAA"
]))
print(count_layers([
"AAAAAAAAA",
"ABBBBBBBA",
"ABBAAABBA",
"ABBBBBBBA",
"AAAAAAAAA"
]))
print(count_layers([
"AAAAAAAAAAA",
"AABBBBBBBAA",
"AABCCCCCBAA",
"AABCAAACBAA",
"AABCADACBAA",
"AABCAAACBAA",
"AABCCCCCBAA",
"AABBBBBBBAA",
"AAAAAAAAAAA"
]))
# -
# Question 2:
# + active=""
# There are many different styles of music and many albums exhibit multiple styles.
# Create a function that takes a list of musical styles from albums and returns how many styles are unique.
#
# Examples
#
# unique_styles([
# "Dub,Dancehall",
# "Industrial,Heavy Metal",
# "Techno,Dubstep",
# "Synth-pop,Euro-Disco",
# "Industrial,Techno,Minimal"
# ]) 9
#
# unique_styles([
# "Soul",
# "House,Folk",
# "Trance,Downtempo,Big Beat,House",
# "Deep House",
# "Soul"
# ]) 7
# -
# Answer :
# +
def unique_styles(lst):
str_ = ",".join(lst)
return len(set(str_.split(",")))
print(unique_styles([
"Dub,Dancehall",
"Industrial,Heavy Metal",
"Techno,Dubstep",
"Synth-pop,Euro-Disco",
"Industrial,Techno,Minimal"
]))
print(unique_styles([
"Soul",
"House,Folk",
"Trance,Downtempo,Big Beat,House",
"Deep House",
"Soul"
]))
# -
# Question 3:
# + active=""
# Create a function that finds a target number in a list of prime numbers.
# Implement a binary search algorithm in your function. The target number will be from 2 through 97.
# If the target is prime then return "yes" else return "no".
#
# Examples
#
# primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
# is_prime(primes, 3) "yes"
# is_prime(primes, 4) "no"
# is_prime(primes, 67) "yes"
# is_prime(primes, 36) "no"
# -
# Answer :
# +
def is_prime(list_, x):
low = 0
high = len(list_) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
if list_[mid] < x:
low = mid + 1
elif list_[mid] > x:
high = mid - 1
else:
return "yes"
return "no"
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
print(is_prime(primes,3))
print(is_prime(primes,4))
print(is_prime(primes,67))
print(is_prime(primes,36))
# -
# Question :4
# + active=""
# Create a function that takes in n, a, b and returns the number of positive values
# raised to the nth power that lie in the range [a, b], inclusive.
#
# Examples
#
# power_ranger(2, 49, 65) 2
# # 2 squares (n^2) lie between 49 and 65, 49 (7^2) and 64 (8^2)
# power_ranger(3, 1, 27) 3
# # 3 cubes (n^3) lie between 1 and 27, 1 (1^3), 8 (2^3) and 27 (3^3)
# power_ranger(10, 1, 5) 1
# # 1 value raised to the 10th power lies between 1 and 5, 1 (1^10)
# power_ranger(5, 31, 33) 1
# power_ranger(4, 250, 1300) 3
# -
# Answer :
# +
from math import sqrt
def power_ranger(n,a,b):
count_ = 0
for i in range(a,b+1):
if (i**(1/n)-int(i**(1/n))) == 0:
count_ += 1
return count_
print(power_ranger(2, 49, 65))
print(power_ranger(3, 1, 27))
print(power_ranger(10, 1, 5))
print(power_ranger(5, 31, 33))
print(power_ranger(4, 250, 1300))
# -
# Question 5:
# + active=""
# Given a number, return the difference between the maximum and minimum numbers
# that can be formed when the digits are rearranged.
#
# Examples
#
# rearranged_difference(972882) 760833
# # 988722 - 227889 = 760833
# rearranged_difference(3320707) 7709823
# # 7733200 - 23377 = 7709823
# rearranged_difference(90010) 90981
# # 91000 - 19 = 90981
# -
# Answer :
# +
def rearranged_difference(num):
asc = "".join(sorted(st := str(num)))
desc = "".join(sorted(st,reverse=True))
return int(desc)-int(asc)
print(rearranged_difference(972882))
print(rearranged_difference(3320707))
print(rearranged_difference(90010))
# -
| Python Advance Programming Assignment/Assignment_07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
#Вычисление коинтеграций пар тикеров
from gz_import import *
from gz_mainlib import *
from gz_plotlib import *
from gz_dwnllib import *
from gz_rwlib import *
from gz_const import *
mainver()
plotver()
rwver()
dwnlver()
constver()
# -
# +
#df_instruments = pd.read_hdf('instruments.h5', 'instruments')
#df_instruments = pd.read_hdf('dat.h5', 'df_instruments')
#dict_yfinfo = hdf5_readjson('dat.h5', 'dict_yfinfo')
#df_instruments.to_hdf('dat.h5', 'df_instruments')
dfy = pd.read_hdf('dat.h5', 'dfy')
dfm = pd.read_hdf('dat.h5', 'dfm')
dfy_corr = pd.read_hdf('dat.h5', 'dfy_corr')
dfm_corr = pd.read_hdf('dat.h5', 'dfm_corr')
# -
# +
#hdf5_contain('dat.h5')
# -
dfy_coint = pd.DataFrame(data=np.float64(0.0), index = dfy_corr.index, columns=dfy_corr.columns)
dfm_coint = pd.DataFrame(data=np.float64(0.0), index = dfm_corr.index, columns=dfm_corr.columns)
# +
#print(sorted(dfm.columns))
# +
len_coint = len(list(product(dfm_coint.index, dfm_coint.columns)))
print('Len coint:', len_coint)
for n,x in enumerate(product(dfm_coint.index, dfm_coint.columns)):
if x[0] != x[1]:
print('\r' + str(n) + ' ' + x[0] + ' ' + x[1], end='', flush=True)
dfm_coint.loc[x[0],x[1]] = ts.coint(dfm[x[0]], dfm[x[1]])[1]
# -
dfm_coint.to_hdf('dat.h5', 'dfm_coint')
# +
time1 = time.monotonic()
len_coint = len(list(product(dfy_coint.index, dfy_coint.columns)))
print('Len coint:', len_coint)
for n,x in enumerate(product(dfy_coint.index, dfy_coint.columns)):
if x[0] != x[1]:
print('\r' + str(n) + ' ' + x[0] + ' ' + x[1], end='', flush=True)
dfy_coint.loc[x[0],x[1]] = ts.coint(dfy[x[0]], dfy[x[1]])[1]
time2 = time.monotonic()
print('Calc coint time, sec: {0:.2f}'.format(time2-time1))
# -
dfy_coint.to_hdf('dat.h5', 'dfy_coint')
| pyandjnotebook/coint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape)
print(type(x_train), type(y_train))
#normalize 0-255 --> 0-1
x_train, x_test = x_train/255, x_test/255
plt.figure(figsize=(8,8))
for i in range(12):
plt.subplot(3,4,i+1)
plt.imshow(x_train[i], cmap="gray")
plt.show()
#model
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10)
])
print(model.summary())
# +
# another way to build the Sequential model:
#modelx = keras.models.Sequential()
#modelx.add(keras.layers.Flatten(input_shape=(28,28)))
#modelx.add(keras.layers.Dense(128, activation='relu'))
#modelx.add(keras.layers.Dense(10))
# -
# loss and optimizer
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optim = keras.optimizers.Adam(lr=0.001)
metrics = ["accuracy"]
model.compile(loss=loss, optimizer=optim, metrics=metrics)
# training
batch_size = 64
epochs = 20
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2)
# evaulate
model.evaluate(x_test, y_test, batch_size=batch_size, verbose=2)
# +
# predictions
# 1. option: build new model with Softmax layer
probability_model = keras.models.Sequential([
model,
keras.layers.Softmax()
])
predictions = probability_model(x_test)
pred0 = predictions[0]
print(pred0)
# -
# use np.argmax to get label with highest probability
label0 = np.argmax(pred0)
print(label0)
# 2. option: original model + nn.softmax, call model(x)
predictions = model(x_test)
predictions = tf.nn.softmax(predictions)
pred0 = predictions[0]
print(pred0)
label0 = np.argmax(pred0)
print(label0)
# 3. option: original model + nn.softmax, call model.predict(x)
predictions = model.predict(x_test, batch_size=batch_size)
predictions = tf.nn.softmax(predictions)
pred0 = predictions[0]
print(pred0)
label0 = np.argmax(pred0)
print(label0)
# call argmax for multiple labels
pred05s = predictions[0:5]
print(pred05s.shape)
label05s = np.argmax(pred05s, axis=1)
print(label05s)
| learn/tf/tf2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ML
# language: python
# name: ml
# ---
# # Lemmatization
#
# Lemmatization is very similiar to stemming in that it reduces a set of inflected words down to a common word. The difference is that lemmatization reduces inflections down to their real root words, which is called a lemma. If we take the words *'amaze'*, *'amazing'*, *'amazingly'*, the lemma of all of these is *'amaze'*. Compared to stemming which would usually return *'amaz'*. Generally lemmatization is seen as more advanced than stemming.
words = ["amaze", "amazed", "amazing"]
# We will use NLTK again for our lemmatization. We also need to ensure we have the *WordNet Database* downloaded which will act as the lookup for our lemmatizer to ensure that it has produced a real lemma.
# +
import nltk
nltk.download("wordnet")
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
[lemmatizer.lemmatize(word) for word in words]
# -
# Clearly nothing has happened, and that is because lemmatization requires that we also provide the *parts-of-speech* (POS) tag, which is the category of a word based on syntax. For example noun, adjective, or verb. In our case we could place each word as a verb, which we can then implement like so:
# +
from nltk.corpus import wordnet
[lemmatizer.lemmatize(word, wordnet.VERB) for word in words]
| course/preprocessing/03_lemmatization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Map colouring
# Using the GraphILP API to colour the map of all districts in Germany with as few colours as possible such that adjacent districts get different colours.
from download_helper import download_example
import geopandas as gpd
from shapely.geometry import LineString
import networkx as nx
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
from matplotlib import pyplot as plt
# ## Get the data: a map of districts in Germany
#
# We are using an open data source from the German [Bundesamt für Kartographie und Geodäsie](https://gdz.bkg.bund.de/index.php/default/open-data/verwaltungsgebiete-1-250-000-ebenen-stand-01-01-vg250-ebenen-01-01.html)
url = "https://daten.gdz.bkg.bund.de/produkte/vg/vg250_ebenen_0101/aktuell/vg250_01-01.gk3.shape.ebenen.zip"
target_directory = "/set/directory/for/data/here/"
download_example(url, target_directory, unzip=True)
data = gpd.read_file(target_directory + "vg250_01-01.gk3.shape.ebenen/vg250_ebenen_0101/VG250_KRS.shp")
# Let's draw a map of the districts.
# Your job is to colour each district in such a way that
# * you use as few colours as possible,
# * adjacent district are coloured with different colours.
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
data.plot(ax=ax);
# ## Set up a graph
# We will cast this as a problem on graphs by creating a graph in which
# * the vertices correspond to the districts and
# * there is an edge between two vertices if the corresponding districts are adjacent
# to help visualise the graph, we compute the centroid of each district
centroids = data['geometry'].apply(lambda x: x.centroid)
# we use pairwise intersection of districts to find out whether they are adjacent
intersect = gpd.sjoin(data, data)
# as a result, we can extract the edges to be used in our graph
links = [(row[0], row[1].index_right) for row in intersect.iterrows() if row[0] != row[1].index_right]
# let us create some geometry from the edges, so that we can plot them
lines = [LineString((centroids.loc[a], centroids.loc[b])) for (a, b) in links]
lines_df = gpd.GeoDataFrame(geometry=lines)
# now we can visualise the graph on top of our map
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
data.plot(ax=ax)
centroids.plot(color='pink', ax=ax)
lines_df.plot(color='red', ax=ax);
# create a graph from our data
mygraph = nx.Graph()
mygraph.add_edges_from(links)
# ## Set up and solve the problem using GraphILP API
from graphilp.imports import networkx as imp_nx
from graphilp.partitioning import min_vertex_coloring
from graphilp.partitioning.heuristics import vertex_coloring_greedy
from graphilp.sub_super import max_clique_cover as max_clique
G = imp_nx.read(mygraph)
col2node, node2col = vertex_coloring_greedy.get_heuristic(G)
model = min_vertex_coloring.create_model(G, warmstart=node2col)
model.optimize()
color_assignment, node_to_col = min_vertex_coloring.extract_solution(G, model)
# ## Visualise the solution
data['colors'] = data.apply(lambda row: node_to_col[row.name], axis=1)
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
data.plot(column='colors', cmap='Set1', ax=ax);
# ## Analyse the solution
# check how many colours we need
set(node_to_col.values())
# Wait! Shouldn't four colours suffice for every planar map according to the famous <a href="https://en.wikipedia.org/wiki/Four_color_theorem">Four colour theorem</a>?
# perhaps our graph is not planar?
nx.algorithms.planarity.check_planarity(mygraph)
# Indeed, now let us try to find a region that is to blame for this!
# We will do so by invoking another function of our library to find a <a href="https://en.wikipedia.org/wiki/Clique_problem">maximum size clique</a>.
model = max_clique.create_model(G)
model.optimize()
clique = max_clique.extract_solution(G, model)
data.iloc[clique]
# here are all the existing links between the nodes in our clique
clique_links = [(a, b) for (a, b) in links if a in clique and b in clique]
# let us create some geometry from these edges, too, so that we can plot them
clique_lines = [LineString((centroids.loc[a], centroids.loc[b])) for (a, b) in clique_links]
clique_lines_df = gpd.GeoDataFrame(geometry=clique_lines)
# now we can visualise the clique
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
data.iloc[clique].plot(column='colors', cmap='Set1', ax=ax)
centroids.iloc[clique].plot(color='pink', ax=ax)
clique_lines_df.plot(color='red', ax=ax);
| graphilp/examples/MapColouring.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-nn_analysis]
# language: python
# name: conda-env-.conda-nn_analysis-py
# ---
# %load_ext autoreload
# %autoreload 2
# +
import torch
import numpy as np
import matplotlib.pyplot as plt
from nn_analysis import models as md
from nn_analysis import utils
from nn_analysis.constants import MODEL_CONFIGS_PATH
model_configs = utils.load_config(MODEL_CONFIGS_PATH)
# -
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def exclude_bias_and_norm(p):
return p.ndim == 1
# # barlow P
epoch = 49
model_name = 'barlow_P'
model = md.get_custom_model(arch='equivariant_all_bn', extract_method='dpp', path='checkpoints/barlowtwins/equivariant_all_bn_v2/checkpoint.pth', device=device, state_dict_key='model')
state_dict = model.state_dict()
torch.save({'state_dict': state_dict}, '/mnt/smb/locker/issa-locker/users/hc3190/data/models/checkpoints/barlowtwins/equivariant_all_bn_v2_projector/0082.pth.tar')
model = md.get_custom_model(arch='resnet50', epoch=82, path='checkpoints/barlowtwins/equivariant_all_bn_v2', device=device)
model_projector = md.get_custom_model(arch='equivariant_all_bn', epoch=82, path='checkpoints/barlowtwins/equivariant_all_bn_v2_projector', device=device)
# + jupyter={"outputs_hidden": true} tags=[]
print(model)
# -
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
assert torch.allclose(param, model_projector.state_dict()[f'backbone.{name}'])
# # barlow control
model = md.get_custom_model(arch='barlow_twins', extract_method='dpp', path='checkpoints/barlowtwins/original_v2/checkpoint.pth', device=device, state_dict_key='model')
state_dict = model.state_dict()
torch.save({'state_dict': state_dict}, '/mnt/smb/locker/issa-locker/users/hc3190/data/models/checkpoints/barlowtwins/original_v2_projector/0054.pth.tar')
model = md.get_custom_model(arch='resnet50', epoch=54, path='checkpoints/barlowtwins/original_v2', device=device)
model_projector = md.get_custom_model(arch='barlow_twins', epoch=54, path='checkpoints/barlowtwins/original_v2_projector', device=device)
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
assert torch.allclose(param, model_projector.state_dict()[f'backbone.{name}'])
# +
model_projector = md.get_custom_model(arch='equivariant_all_bn', epoch=82, path='checkpoints/barlowtwins/equivariant_all_bn_v2_projector', device=device)
model_projector_base = md.get_custom_model(arch='barlow_twins', epoch=54, path='checkpoints/barlowtwins/original_v2_projector', device=device)
for key in list(model_projector.state_dict().keys()):
if key not in list(model_projector_base.state_dict().keys()):
print(key)
print("HI")
for key in list(model_projector_base.state_dict().keys()):
if key not in list(model_projector.state_dict().keys()):
print(key)
# -
| notebooks/equivariant_all_bn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-qDljY3eTL5l"
# # Exercise 1: eigenvalue problem
#
# Take matrix $A = \begin{pmatrix}
# 0.3 & 0.6 & 0.1\\
# 0.5 & 0.2 & 0.3\\
# 0.4 & 0.1 & 0.5
# \end{pmatrix}$
# and vector $v = \begin{pmatrix}1/3 & 1/3 & 1/3\end{pmatrix}$. You can also initialize $v$ to be random positive numbers, just need to sum to 1. You could even think of it as a probability distribution. And do this loop:
#
# ```sudocode
# do 25 times:
# v' = vA
# v = v'
# ```
# + id="x1UEy3TxTJcg"
import numpy as np
# + id="JSm53MZKVBjT"
A = np.array([[0.3, 0.6, 0.1], [0.5, 0.2, 0.3], [0.4, 0.1, 0.5]])
v = np.array([1/3, 1/3, 1/3])
# + id="MolooW7BVeG9"
for _ in range(25):
v_dash = v.dot(A)
v = v_dash
# + [markdown] id="yY_9HmJJWE0s"
# By the 25th step, you've calculated original $v$ times $A^{25}$.
#
# On each iteration, plot the Euclidean distance between $|v - v'|$ as a function of iteration.
# + id="gjlgLF9_VqN_"
from scipy.spatial import distance
dist = np.zeros(25)
for i in range(25):
v_dash = v.dot(A)
dist[i] = distance.euclidean(v, v_dash)
v = v_dash
# + colab={"base_uri": "https://localhost:8080/", "height": 276} id="_0HI935rXG5C" outputId="53c7fae4-d965-4abb-d416-7d6571380b5c"
import matplotlib.pyplot as plt
plt.plot(dist);
# + [markdown] id="FvbsHr9uXVAG"
# What you should notice is that the distance will converge to zero.
#
# What does it means that $v' = vA$ but distance $|v' - v| = 0$? Well, this is just another way of stating the eigenvalue problem. And what we've done here is we found the eigenvector for $A$, for which the corresponding eigenvalue is 1.
# + [markdown] id="6aE8NNHFYyM9"
# # Exercise 2: central limit theorem
#
# Goal of this exercise is to demonstrate the central limit theorem. Recall that the central limit theorem is that if we set a random variable to be the sum of some other random variables from any distribution, then as the number of random variables in the sum approaches infinity, the distribution of the sum approaches the normal distribution. So the exercise is to demonstrate that numerically and graphically.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 499} id="wyD1EDnjfitS" outputId="e18fa503-ad0a-4e1d-a136-8ed0c6c4e36b"
# number of sample
num = [1, 10, 50, 100]
# list of sample sums
sums = []
# Generating 1, 10, 30, 100 random numbers from -40 to 40
# taking their sum and appending it to list sums.
for j in num:
# Generating seed so that we can get same result
# every time the loop is run...
np.random.seed(1)
x = [np.mean(
np.random.randint(
-40, 40, j)) for _i in range(1000)]
sums.append(x)
k = 0
# plotting all the sums in one figure
fig, ax = plt.subplots(2, 2, figsize =(8, 8))
for i in range(0, 2):
for j in range(0, 2):
# Histogram for each x stored in sums
ax[i, j].hist(sums[k], 10, density = True)
ax[i, j].set_title(label = num[k])
k = k + 1
plt.show()
# + [markdown] id="W6h1gtqZgo_r"
# # Exercise 3: mean image
#
# Load in the MNIST dataset and plot the mean (average) image for each digit class 0-9. Remember that the mean is just the sum of all the images divided by the number of images.
# + id="1FVaZE8fg-Bp"
from keras.datasets import mnist
(train_X, train_y), (test_X, test_y) = mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="-vTS969phfdS" outputId="f8d0350f-3403-40cb-9a52-05252af73339"
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(train_X[i], cmap='gray')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="Urzp8FTmicAa" outputId="ab7020c0-4a73-49ec-c9c3-686e3d3e9394"
train_y[:9]
# + id="PJXPD33yiiiS"
X = np.concatenate([train_X, test_X])
Y = np.concatenate([train_y, test_y])
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="0slGX89_jMtF" outputId="7c9713a3-16e3-4a00-8244-5664e74458c9"
for digit in range(10):
index = Y == digit
images = X[index]
mean_img = images.mean(axis=0)
plt.subplot(3, 4, digit + 1)
plt.imshow(mean_img, cmap='gray')
plt.show;
# + [markdown] id="Afap9R9apeTQ"
# # Exercise 4: rotation
#
# Write a function that flips an image 90 degrees clockwise. Do it in two ways:
# - using loops: copy one pixel at a time
# - using numpy
#
# Then compare the performance of both methods.
#
#
# + id="0PlrTeK1qbCY"
def loop_rotate(img):
new_img = np.zeros(img.shape)
rows, columns = img.shape
assert rows == columns
for row_index in range(rows):
for column_index in range(columns):
new_img[column_index][columns - row_index - 1] = img[row_index][column_index]
return new_img
# + id="J9kYE-8I68TL"
new_img = loop_rotate(X[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="RSwicKsT7TQL" outputId="dc774719-aa6b-4ff7-c80e-9e3a13e978bd"
plt.subplot(1, 2, 1)
plt.imshow(X[0], cmap='gray')
plt.subplot(1, 2, 2)
plt.imshow(new_img, cmap='gray')
# + id="BHbwShrU7tol"
def numpy_rotate(img):
return np.rot90(img, k=3)
# + id="LBqa8yqM8hnu"
new_img = numpy_rotate(X[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="BA-a0HMA8jtl" outputId="1725dfb3-8669-4ff6-c787-779fd7314d8c"
plt.subplot(1, 2, 1)
plt.imshow(X[0], cmap='gray')
plt.subplot(1, 2, 2)
plt.imshow(new_img, cmap='gray')
# + id="Kz29Ahfj82uV"
from datetime import datetime
def compare_rotation(images):
t0 = datetime.now()
for img in images:
loop_rotate(img)
dt1 = datetime.now() - t0
t0 = datetime.now()
for img in images:
numpy_rotate(img)
dt2 = datetime.now() - t0
return dt1.total_seconds(), dt2.total_seconds()
# + colab={"base_uri": "https://localhost:8080/"} id="Q2GJigAJ9Q50" outputId="0176e757-4586-4515-9755-00957f302452"
loop_time, numpy_time = compare_rotation(X)
loop_time / numpy_time
# + [markdown] id="-hSmKHVK-QJH"
# # Exercise 5: symmetric matrix
#
# Write a function that tests whether or not a matrix is symmetric. Do it in two ways:
# - using loops and definition of symmetric matrix
# - using numpy
# + [markdown] id="yLCzYks_A9WR"
# ## Loop solution
# + id="O7mF5qbZ-_2D"
def is_symmetric(matrix):
rows, columns = matrix.shape
if rows != columns:
return False
for row_index in range(rows):
for column_index in range(columns):
if matrix[column_index][row_index] != matrix[row_index][column_index]:
return False
return True
# + colab={"base_uri": "https://localhost:8080/"} id="DBBGEHVD_psz" outputId="ba25c6ce-1af1-45a0-fbe5-b49b9d83d070"
matrix = np.array([[1, 0], [0, 1]])
is_symmetric(matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="0qc5EiQZAYRg" outputId="d2008f84-7335-47ea-def3-7f0c8ad5abda"
matrix = np.array([[1, 1], [0, 1]])
is_symmetric(matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="xy9WRcZQAZ74" outputId="16c0ae4a-2a52-4fbd-d332-cbe9a5519697"
matrix = np.array([[1, 0], [0, 1], [1, 1]])
is_symmetric(matrix)
# + [markdown] id="fGrDOL7CA_qL"
# ## NumPy solution
# + colab={"base_uri": "https://localhost:8080/"} id="i_XgSB-0AcwJ" outputId="668b188b-0ae1-4a89-904f-f85cb87ab426"
matrix = np.array([[1, 0], [0, 1]])
np.all(matrix.T == matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="5D0WU018AzwF" outputId="b97af202-2aba-4f45-91c0-25f255b51e58"
matrix = np.array([[1, 1], [0, 1]])
np.all(matrix.T == matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="dZ5CQVSrA18L" outputId="ba8e160c-e4df-4146-b3c2-5457e8539c91"
matrix = np.array([[1, 0], [0, 1], [1, 1]])
np.all(matrix.T == matrix)
# + [markdown] id="pYj7gUP4BPVL"
# # Exercise 6: XOR dataset
#
# Generate and plot XOR dataset.
# + id="wpukE54yBe8c"
X = np.random.uniform(-1, 1, (1000, 2))
# + id="SMka2QrkB2d_"
Y = np.logical_or(
np.logical_and(X[:, 0] < 0, X[:, 1] < 0),
np.logical_and(X[:, 0] > 0, X[:, 1] > 0)
)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="pQItV5EvCHM2" outputId="e187234a-d556-4a1a-a728-5cadd6a7e007"
plt.scatter(X[:, 0], X[:, 1], c=Y)
# + [markdown] id="eM-i0M18EHVy"
# # Exercise 7: Donught dataset
#
# Generate and plot the doughnut (or concentric circles with some additive noise) dataset.
# + id="MlOvUNYXGlPL"
def sample_circle(COUNT, R):
current_count = 0
samples = np.zeros((COUNT, 2))
while current_count < COUNT:
sample = np.random.uniform(-1*R, R, 2)
if sample[0]**2 + sample[1]**2 > R**2 - R and sample[0]**2 + sample[1]**2 < R**2 + R:
samples[current_count, 0] = sample[0]
samples[current_count, 1] = sample[1]
current_count += 1
return samples
# + id="DCKi5A8UIHIS"
X1 = sample_circle(200, 10)
X2 = sample_circle(200, 20)
X = np.concatenate((X1, X2), axis=0)
Y = np.zeros(400)
Y[:200] = 1
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="Pf6zws9AJ5GS" outputId="7f27654c-927c-487c-b033-7ddc6339490e"
plt.scatter(X[:,0], X[:,1], c=Y)
# + [markdown] id="aNI6KgJ1SJ5N"
# # Exercise 8: Spiral dataset
#
# Generate and plot the spiral dataset. It has arms that grow outward and both the angle and radius change simultaneously.
# + id="ZDOZfW5TMXq1" outputId="6ff00a9e-6a2f-4384-8ea9-c299efa82ba3" colab={"base_uri": "https://localhost:8080/", "height": 282}
N = 100 # number of points per class
D = 2 # dimensionality
K = 6 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in range(K):
ix = range(N*j,N*(j+1))
r = np.linspace(0.0,1,N) # radius
t = np.linspace(j,(j+1),N) # theta
X[ix] = np.c_[r*np.sin(t) + np.random.random(N)*0.1, r*np.cos(t) + np.random.random(N)*0.1]
y[ix] = j % 2
# lets visualize the data:
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
| DeepLearningPrerequisites/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/krsmith/DS-Sprint-01-Dealing-With-Data/blob/master/module2-loadingdata/LS_DS_112_Loading_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-c0vWATuQ_Dn" colab_type="text"
# # Lambda School Data Science - Loading Data
#
# Data comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.
#
# Data set sources:
#
# - https://archive.ics.uci.edu/ml/datasets.html
# - https://github.com/awesomedata/awesome-public-datasets
# - https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)
#
# Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags).
# + [markdown] id="wxxBTeHUYs5a" colab_type="text"
# ## Lecture example - flag data
# + id="nc-iamjyRWwe" colab_type="code" outputId="14baead2-6be6-43fe-924b-c37c0440a97a" colab={"base_uri": "https://localhost:8080/", "height": 3315}
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
# !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# + id="UKfOq1tlUvbZ" colab_type="code" colab={}
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# + id="exKPtcJyUyCX" colab_type="code" outputId="c3daabed-fd05-46c0-cba7-73b6bcc25205" colab={"base_uri": "https://localhost:8080/", "height": 270}
# Step 3 - verify we've got *something*
flag_data.head()
# + id="rNmkv2g8VfAm" colab_type="code" outputId="b73520c3-0b3c-4409-bfb5-dce3d57329c3" colab={"base_uri": "https://localhost:8080/", "height": 544}
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
# + id="iqPEwx3aWBDR" colab_type="code" outputId="a1da8060-6c5e-4fc0-f9d9-d3eaed6e3c45" colab={"base_uri": "https://localhost:8080/", "height": 85}
# !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# + id="5R1d1Ka2WHAY" colab_type="code" outputId="fcdda2a7-10ad-440a-c18c-f0bbeb159681" colab={"base_uri": "https://localhost:8080/", "height": 4712}
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# + id="o-thnccIWTvc" colab_type="code" outputId="27132df1-7147-4253-ef82-95a1b80d91e8" colab={"base_uri": "https://localhost:8080/", "height": 233}
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
# + id="iG9ZOkSMWZ6D" colab_type="code" outputId="296109d9-a4ab-467a-8fad-b0a7189b9f07" colab={"base_uri": "https://localhost:8080/", "height": 544}
flag_data.count()
# + id="gMcxnWbkWla1" colab_type="code" outputId="7bce3c78-a50d-41ad-9cf4-5537805e8618" colab={"base_uri": "https://localhost:8080/", "height": 544}
flag_data.isna().sum()
# + [markdown] id="AihdUkaDT8We" colab_type="text"
# ### Yes, but what does it *mean*?
#
# This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).
#
# ```
# 1. name: Name of the country concerned
# 2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania
# 3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW
# 4. area: in thousands of square km
# 5. population: in round millions
# 6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others
# 7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others
# 8. bars: Number of vertical bars in the flag
# 9. stripes: Number of horizontal stripes in the flag
# 10. colours: Number of different colours in the flag
# 11. red: 0 if red absent, 1 if red present in the flag
# 12. green: same for green
# 13. blue: same for blue
# 14. gold: same for gold (also yellow)
# 15. white: same for white
# 16. black: same for black
# 17. orange: same for orange (also brown)
# 18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)
# 19. circles: Number of circles in the flag
# 20. crosses: Number of (upright) crosses
# 21. saltires: Number of diagonal crosses
# 22. quarters: Number of quartered sections
# 23. sunstars: Number of sun or star symbols
# 24. crescent: 1 if a crescent moon symbol present, else 0
# 25. triangle: 1 if any triangles present, 0 otherwise
# 26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 0
# 27. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise
# 28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise
# 29. topleft: colour in the top-left corner (moving right to decide tie-breaks)
# 30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)
# ```
#
# Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
# + [markdown] id="nPbUK_cLY15U" colab_type="text"
# ## Your assignment - pick a dataset and do something like the above
#
# This is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.
#
# If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).
#
# If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
# + id="NJdISe69ZT7E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 11900} outputId="23d38d9a-043f-4018-bd6a-187e14822226"
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
b_cancer = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
# !curl https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data
# + id="UnVs1bZL3txj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="90169827-b46a-46aa-b347-98640997f9a2"
import pandas as pd
bc_data = pd.read_csv(b_cancer, header=None)
bc_data.head()
# + id="zGaIbz3l4-5b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="d0293c39-7ba6-4941-8c55-0c3428dcf14a"
bc_data.count()
# + id="UfFMgm8-5Em7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="db1d2c99-d893-452d-fbb4-3b534c6b4183"
bc_data.isna().sum() # Data is very clean. Nothing is missing.
# + id="IgBa7_oH5Vhw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="5a756e1e-15dd-4822-a823-0606a78a592c"
'''
Attribute Information:
1. Sample code number id number
2. Clump Thickness 1 - 10
3. Uniformity of Cell Size 1 - 10
4. Uniformity of Cell Shape 1 - 10
5. Marginal Adhesion 1 - 10
6. Single Epithelial Cell Size 1 - 10
7. Bare Nuclei 1 - 10
8. Bland Chromatin 1 - 10
9. Normal Nucleoli 1 - 10
10. Mitoses 1 - 10
11. Class: 2 for benign, 4 for malignant
'''
col_names = ['code_number', 'clump_thickness',
'cell_size_uniformity', 'cell_shape_uniformity',
'marginal_adhesion', 'single_ep',
'bare_nuclei', 'bland_chromatin',
'normal_nucleoli', 'mitoses',
'class']
bc_data = pd.read_csv(b_cancer, header=None, names=col_names)
bc_data.head()
# + id="Jt95iy4p6mia" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="20d43ada-e17b-4d0e-dc78-782e4949499a"
bc_data.isna().sum().sum() # Verifying that no data is missing.
# + id="5DD2MELA7BIv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="800a2693-e626-4a80-c2fb-659988d9680c"
income_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
income_data = pd.read_csv(income_url, header=None)
income_data.head()
# + id="mT_VQgHM86tB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="ce2cff1b-e250-4955-c0a2-3dc9d24ed4e2"
'''
Attribute Information:
age: continuous.
workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
fnlwgt: continuous.
education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
education-num: continuous.
marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
sex: Female, Male.
capital-gain: continuous.
capital-loss: continuous.
hours-per-week: continuous.
native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras,
Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France,
Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua,
Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
'''
income_col_names = ['age', 'work_class', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'sex',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income']
income_data = pd.read_csv(income_url, header=None, names=income_col_names)
income_data.head()
# + id="yFJ8iBqH7u1T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="1dbe00a3-79e8-4b42-d21d-7df0f1ef7257"
income_data.isna().sum()
# + id="w9P2dS1A8D7f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="5e369ca5-33f0-4e80-99e5-ef61b44bfab8"
income_data.iloc[14]
# + id="ZkNQKeaFCjtc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9ca06193-3d56-431d-a50d-90758949000c"
income_data = pd.read_csv(income_url, header=None, names=income_col_names, na_values=[' ?'])
income_data.isna().sum().sum()
# + id="a9wmIcdRDJgR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 772} outputId="75114791-e407-4a8e-8aef-9c7187609d45"
income_data.head(15)
# + id="ipxqrlffDT4K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b7854e8b-a7e4-4053-a222-3895602f7502"
import numpy as np
income_data = pd.read_csv(income_url, header=None, names=income_col_names)
income_data.replace(' ?', np.nan, inplace=True)
income_data.isna().sum().sum()
# + id="NgzE_bI-EmGm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="a88aafb3-064d-4c47-a6de-e2fa5b8701db"
income_data.isna().sum()
# + id="FwQ7hw_nGA58" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="c4361379-7aa4-44a9-8ef2-0a9192b48468"
income_data.info()
# + id="M59aMAsOGMap" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9bb35810-f9c4-4ba2-c460-6900b577e40c"
income_data.shape
# + id="5K3UjGLDGSqw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0f50c8e-d543-47fb-d852-ba97f704414d"
income_data_dropped = income_data.dropna()
income_data_dropped.shape
# + [markdown] id="MZCxTwKuReV9" colab_type="text"
# ## Stretch Goals - Other types and sources of data
#
# Not all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.
#
# If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.
#
# Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.
#
# How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.
#
# One last major source of data is APIs: https://github.com/toddmotto/public-apis
#
# API stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.
#
# *Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
# + id="h_rzPBTILBjW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="27950e20-1e5a-4783-ead6-cb6bf528c493"
import requests
dog_api_response = requests.get('https://dog.ceo/api/breeds/image/random')
print(dog_api_response.status_code)
# + id="M9r9XLjEL0Il" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="a738e3f6-d3c1-4651-eba4-cd55ce7bc4ac"
print(dog_api_response.content)
| module2-loadingdata/LS_DS_112_Loading_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MUTRAFF - Congestion Map Drawing
# Congestion tracing based on Mutraff Experiments over Google Maps
# References:
# * Mutraff
# * Jupyter gmaps: https://jupyter-gmaps.readthedocs.io/en/v0.3.3/gmaps.html
# * Blog examples: https://people.revoledu.com/kardi/tutorial/Python/Displaying+Locations+using+Heatmap.html
# ## Imports section
import pandas as pd
import numpy as np
import googlemaps
import gmaps
import yaml
# https://martin-thoma.com/configuration-files-in-python/
def load_config( file ):
config = {}
with open(file, 'r') as ymlfile:
config = yaml.load(ymlfile)
ymlfile.close()
# print(config)
return config
# +
cfg = load_config( "config.yaml")
# Import API_KEY from YAML
gm = googlemaps.Client(key=cfg['google_maps']['API_KEY'])
gmaps.configure(api_key=cfg['google_maps']['API_KEY']) # Your Google API key
# -
# ## Definitions
# +
MUTRAFF_HOME="/Users/alvaro/Desktop/workspace/mutraff/uah-gist-mutraff-bastra"
MUTRAFF_EXP_PATH=MUTRAFF_HOME + "/experiments/tmp"
SIMUL_TIME_STATS=None
COLUMNS=[]
exp={}
label={}
SMALL_FONT_SIZE=10
MID_FONT_SIZE=12
BIG_FONT_SIZE=14
# -
# ## Functions Library
# ### Geocode and GeocodeStreetLocationCity functions
# In case of receiving just edge names or address, the following functions will do the geocoding of street, location and the city.
# The algorithm goes as follow:
# * the code will try to search for street, location and city.
# * If it is not succesful, it will try to search for street and the city.
# * If it is still unsuccessful, it will search for location and city.
# * If all of these attempts are not successful, it will produce fail in geocoding.
# * If any of the attempt is successful, it will append the result into the list and return this list of latitude and longitude.
# +
def Geocode(query):
# do geocoding
try:
geocode_result = gm.geocode(query)[0]
latitude = geocode_result['geometry']['location']['lat']
longitude = geocode_result['geometry']['location']['lng']
return latitude,longitude
except IndexError:
return 0
def GeocodeStreetLocationCity(data):
lat=[] # initialize latitude list
lng=[] # initialize longitude list
start = data.index[0] # start from the first data
end = data.index[maxRow-1] # end at maximum number of row
#end = 100
for i in range(start,end+1,1): # iterate all rows in the data
isSuccess=True # initial Boolean flag
#query = data.Street[i] + ',' + data.Location[i]
query = data.Street[i] + ',' + 'Manila' # data.Location[i]
# query = data.Location[i]
# result=Geocode(query)
result = 0
if result==0: # if not successful,
print(i, 'is failed')
isSuccess = False
else:
print(i, result)
if isSuccess==True: # if geocoding is successful,
# store the results
lat.append(result[0]) # latitude
lng.append(result[1]) # longitude
return lat,lng
# -
# ### DrawHeatMap function
# Creates a heat map to represent congestion data
def drawHeatMap( center_address, v_lat_long, v_weigths, zoom, intensity, radius):
# do geocode for the whole mega city
geocode_result = gm.geocode(center_address)[0] # change the name into your city of interest
# get the center of the city
center_lat=geocode_result['geometry']['location']['lat']
center_lng=geocode_result['geometry']['location']['lng']
# print('center=',center_lat,center_lng)
# setting the data and parameters
# heatmap_layer = gmaps.heatmap_layer(locations, val, dissipating = True)
heatmap_layer = gmaps.heatmap_layer(v_lat_long, v_weigths, dissipating = True)
# heatmap_layer = gmaps.WeightedHeatmap(v_lat_long)
heatmap_layer.disspating = True
heatmap_layer.max_intensity = intensity
heatmap_layer.point_radius = radius
# draw the heatmap into a figure
fig = gmaps.figure()
fig = gmaps.figure(center = [center_lat,center_lng], zoom_level=zoom)
fig.add_layer(heatmap_layer)
return fig
# ### load_data function
# Import csv data from experiments into valuable data
def load_data(experiment):
global MUTRAFF_EXP_PATH
theFile="edges.csv"
filename = "{}/{}/{}".format(MUTRAFF_EXP_PATH,experiment,theFile)
filename="alcalahenares.edges.csv"
print("Parsing edges catalog "+filename)
df1 = pd.read_csv(filename)
theFile="edge_stats.csv"
filename = "{}/{}/{}".format(MUTRAFF_EXP_PATH,experiment,theFile)
print("Parsing data traffic file "+filename)
df2 = pd.read_csv(filename)
return df1, df2
# ## Experiments
# +
exp[0]="alcalahenares_M_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit20_11_190508_003728"
exp[2200]="alcalahenares_M3h_nomaps_tele60_timeALL_fulltraffic_190510_235642"
exp[2201]="alcalahenares_M3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190511_163910"
exp[2300]="alcalahenares_L3h_nomaps_tele60_timeALL_fulltraffic_190515_130939"
exp[2301]="alcalahenares_L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190515_133434"
exp[2400]="alcalahenares_2L3h_nomaps_tele60_timeALL_fulltraffic_190515_150939"
exp[2401]="alcalahenares_2L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190515_161514"
exp[2410]="alcalahenares_2L3h_nomaps_tele60_timeALL_fulltraffic_190515_182031"
exp[2411]="alcalahenares_2L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit50_01_190515_191357"
exp[2412]="alcalahenares_2L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190515_200544"
exp[2420]="alcalahenares_2L3h_nomaps_tele60_timeALL_fulltraffic_190515_202233"
exp[2421]="alcalahenares_2L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit50_01_190515_205714"
exp[2422]="alcalahenares_2L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190515_212938"
exp[2430]="alcalahenares_3L3h_nomaps_tele60_timeALL_fulltraffic_190516_084853"
exp[2431]="alcalahenares_3L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit10_01_190521_091537"
exp[2432]="alcalahenares_3L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit20_01_190521_143019"
exp[2433]="alcalahenares_3L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit50_01_190516_091540"
exp[2434]="alcalahenares_3L3h_mutraff_tele60_uni5x8_timeALL_fulltraffic_logit100_01_190521_210550"
exp[2441]="alcalahenares_3L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit10_01_190521_213112"
exp[2442]="alcalahenares_3L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit20_01_190521_220448"
exp[2443]="alcalahenares_3L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit50_01_190516_114311"
exp[2444]="alcalahenares_3L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit100_01_190521_223020"
exp[2500]="alcalahenares_3L3h_nomaps_tele60_timeALL_fulltraffic_190604_073604"
exp[2501]="alcalahenares_3L3h_nomaps_tele60_timeALL_fulltraffic_incident_190604_082709"
exp[2502]="alcalahenares_3L3h_mutraff_tele60_uni5x16_timeALL_fulltraffic_logit10_01_190604_091731"
# +
label[2430]="Alcala 3L - No MuTraff maps"
label[2431]="Alcala 3L - 8 MuTraff maps - uniform(5) - 10% usage"
label[2432]="Alcala 3L - 8 MuTraff maps - uniform(5) - 20% usage"
label[2433]="Alcala 3L - 8 MuTraff maps - uniform(5) - 50% usage"
label[2434]="Alcala 3L - 8 MuTraff maps - uniform(5) - 100% usage"
label[2441]="Alcala 3L - 16 MuTraff maps - uniform(5) - 10% usage"
label[2442]="Alcala 3L - 16 MuTraff maps - uniform(5) - 20% usage"
label[2443]="Alcala 3L - 16 MuTraff maps - uniform(5) - 50% usage"
label[2444]="Alcala 3L - 16 MuTraff maps - uniform(5) - 100% usage"
label[2500]="Alcala 3L - no TWM"
label[2501]="Alcala 3L - no TWM - Incident"
label[2502]="Alcala 3L - no TWM 16 MuTraff maps - uniform(5) - 10% usage"
# -
# ### Incident management
exp[2500]="alcalahenares_3L3h_nomaps_tele60_timeALL_fulltraffic_incident_190603_000953"
label[2500]="Alcala 3L - No MuTraff maps - Incident 30' main roundabout"
# ### FORESIGHT AND TWM
# +
exp[2599]="alcalahenares_3L3h_nomaps_timeALL_taz5-taz50_01_190605_225531"
exp[2600]="alcalahenares_3L3h_nomaps_timeALL_taz5-taz50_01_190607_163410"
exp[2601]="alcalahenares_3L3h_nomaps_timeALL_taz5-taz50_incident_01_190605_233312"
exp[2602]="alcalahenares_3L3h_nomaps_timeALL_taz5-taz50_foresight_01_190606_072936"
exp[2603]="alcalahenares_3L3h_nomaps_timeALL_taz5-taz50_foresight_incident_01_190606_082447"
label[2599]="No TWM - Alcala - Taz5 to Taz50 - No foresight - Exp1"
label[2600]="No TWM - Alcala - Taz5 to Taz50 - No foresight - Exp2"
label[2601]="No TWM - Alcala - Taz5 to Taz50 - No foresight - INCIDENT"
label[2602]="No TWM - Alcala - Taz5 to Taz50 - Fore2-3-4-10"
label[2603]="No TWM - Alcala - Taz5 to Taz50 - Fore2-3-4-10 - INCIDENT"
# -
experiment=exp[2430]
experiment=exp[2500]
experiment=exp[2601]
exp[1170]="alcalahenares_XL_nomutraff_tele60_nomaps_fulltraffic_170206_085516"
label[1170]="alcalahenares(XL) No TWM - - "
# ## Pollution Measures
exp[3000]="grid16EMISSIONS_noBastra_reference_fulltraffic_01_190817_222335"
label[3000]="GRID16 - FullTraffic - NoTWM"
exp[1183]="alcalahenares_3L3h_TopEdges_timeALL_fulltraffic_01_190929_231834"
label[1183]="xxx"
experiment=exp[1183]
# ## Emergencies Management
exp[3001]="alcalahenares_Emergency_noTWM_L_T60_01_191123_103505"
label[3001]="noTWM/T60"
exp[3002]="alcalahenares_Emergency_TWM50_L_T60_01_191123_103510"
label[3002]="TWM50/T60"
exp[3003]="alcalahenares_Emergency_TWM100_L_T60_penalties_01_191123_185635"
label[3001]="noTWM/T60"
experiment=exp[3001]
# ## Part 1 : Load traffic data from measures or experiments
edges, stats = load_data( experiment )
edges.head()
# * Latitud: 40.4994044
#
# * Longitud: -3.3401732
print("Max time={}".format(stats['time'].max()))
print(stats.columns)
stats.head()
# ### drawTrafficInstantShot
# Prints a heatmap with an spatial and time exposure
def drawInstantShot( city, dataset, timestamp, variable_name ):
time_shot = dataset[dataset['time']==timestamp].groupby(['time','edge_id']).sum() # .unstack()['traf_halted_veh_num']
v_lat_long = time_shot[['center_lat','center_long']]
v_weight = time_shot[variable_name]
print( "Lat/Long size={}".format( len(v_lat_long) ))
print( "Weight size={}".format( len(v_weight) ))
# set up parameters
zoom=13
max_intensity=float(np.max(v_weight))
radius=10
# call the function to draw the heatmap
drawHeatMap(city, v_lat_long, v_weight, zoom, max_intensity, radius)
# merge informacion together
dataset = pd.merge(edges, stats, left_on='edge_id', right_on='edge_id')
# +
#time=2520
#drawInstantShot( '<NAME>', dataset, time, 'traf_halted_veh_num' )
# -
# ### Join data and extract a time snapshot
# Summarize all the data corresponding to the *timestamp* time and combines this data with edge values.
# +
timestamp=3000
timestamp=2520
timestamp=2520
stats[stats['time']<=timestamp].head()
# -
time_stats_shot = stats[stats['time']<=timestamp].groupby(['time','edge_id']).sum() # .unstack()['traf_halted_veh_num']
#time_stats_shot
dataset = pd.merge(edges, time_stats_shot, left_on='edge_id', right_on='edge_id')
print(dataset.columns)
dataset.head()
# +
# stats['time']
# -
# ## Part 3 : Draw the heatmap
OBJECTIVE='traf_halted_veh_num'
OBJECTIVE='traf_total_veh_num'
# df=pd.read_csv('locations.csv', low_memory=False, index_col = 'key') # read geocoded location from file
v_lat_long = dataset[['center_lat','center_long']]
v_weight = dataset[OBJECTIVE]
# ### Drawing the map
# To call the function drawHeatMap, set up the parameters and call the function to draw the heatmap. You can experiment with different parameter values. The result map can also be saved as PNG file.
# +
# set up parameters
zoom=13
max_intensity=float(np.max(v_weight))
radius=10
# call the function to draw the heatmap
drawHeatMap('<NAME>', v_lat_long, v_weight, zoom, max_intensity, radius)
# -
# # Retocado del mapa para los experimentos
filename="{}_out.csv".format(experiment)
dataset.to_csv(filename, sep=';')
print("dumped file {}".format(filename))
filename="{}_out.xls".format(experiment)
dataset.to_excel(filename)
print("dumped file {}".format(filename))
data2 = pd.read_excel (filename, sheet_name='Sheet1')
print(data2.columns)
data2.head()
# +
v_lat_long = data2[['center_lat','center_long']]
v_weight = data2['OBJECTIVE']
# set up parameters
zoom=13
max_intensity=float(np.max(v_weight))
radius=10
# call the function to draw the heatmap
drawHeatMap('<NAME>', v_lat_long, v_weight, zoom, max_intensity, radius)
# -
| notebooks/google_maps_traffic_heatmap/mutraff_draw_congestion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import seaborn as sn
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
# %matplotlib inline
# load data
df = pd.read_csv("9_Exercise.csv")
# preprocessing
df.drop("CustomerId", axis="columns", inplace = True)
df.drop("Surname", axis="columns", inplace = True)
df.drop("RowNumber", axis="columns", inplace = True)
df = pd.get_dummies(data=df, columns=['Geography','Gender'])
cols_to_scale = ['Balance','CreditScore','Age', "NumOfProducts", "Tenure", "EstimatedSalary"]
scaler = MinMaxScaler()
df[cols_to_scale] = scaler.fit_transform(df[cols_to_scale])
df.sample(10)
# +
# create train and test sets
X = df.drop("Exited", axis="columns")
y = df["Exited"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
X_train.shape
# -
X_train.head()
# +
# build model
model = keras.Sequential([
keras.layers.Dense(13, input_shape=(13, ), activation="relu"), # Input Layer / hidden layer
keras.layers.Dense(1, activation="sigmoid") # Output layer
])
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# -
# train the model
model.fit(X_train, y_train, epochs=10)
# evaluate model
model.evaluate(X_test, y_test)
# +
# predict test sets
y_pred = model.predict(X_test)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
# print prediction vs actual side by side
np.c_[y_pred[:20], y_test[:20]]
# -
# create classification report
print(classification_report(y_test, y_pred))
# +
# plot it
cm = tf.math.confusion_matrix(labels=y_test, predictions=y_pred)
plt.figure(figsize=(10, 7))
sn.heatmap(cm, annot=True, fmt="d")
plt.xlabel("Predicted")
plt.ylabel("Actual")
| Python Tutorial Tensorflow/9_Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pymc3 as pm
sns.set_context('poster')
sns.set_palette('colorblind')
random_seed = 2021
# -
# ### First, let's simulate the data we're going to fit!
# +
m_true = 5
c_true = 10
x = np.linspace(0, 10, 100)
y = m_true * x + c_true
yerr = 6.
np.random.seed(random_seed)
y += np.random.randn(len(y))*yerr
# -
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(x, y, zorder=1, s=10, label='Observations')
ax.errorbar(x, y, yerr=yerr, alpha=.2, c='k', zorder=0, fmt='|')
ax.plot(x, m_true * x + c_true, ls='--', c='r', lw=3, label='Truth')
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y')
# Now, let's set up a PyMC3 script to model this line! We want to measure the parameters $m$ and $c$ that determine the slope and intercept of the line.
# +
model = pm.Model()
with model:
# Define the parameters and give them priors
m = pm.Uniform('m', lower = 1, upper = 10, testval = 5.)
c = pm.Normal('c', mu = 10, sigma = 10, testval = 5.)
likelihood = pm.Normal('ll', mu = m * x + c, sigma = yerr, observed = y)
# -
# What we've set up here is a simple Bayesian model with two priors and a likelihood function. The priors are set up that $m$ has a uniform probability to be between 1 and 10. The intercept $c$ has a prior that is a normal distribution centered on 10 with a spread of 10 (which I'm guessing based on how the data look). The `testval`s are first guesses you can make based on the data, which improve the efficiency of the sampling.
#
# The `likelihood` line determines how the parameters (which have been defined by your priors) are evaluated against the data! In this case we use a normal distribution, with a mean that is set as your model, and standard deviation equal to the uncertainty on your observations. The `observed = y` argument states that this line will evaluate your model against the data.
# We now sample the model as follows:
with model:
trace = pm.sample(random_seed = random_seed)
# And our model has sampled! PyMC3 has some built in methods you can use to evaluate your fit.
print(pm.summary(trace))
# This summary shows that the `r_hat` parameter is equal or close to 1, which indicates a good fit.
pm.traceplot(trace);
# And this `traceplot` shows that the sampled posterior distributions look like nice normal distributions encompassing the true answer. Good!
#
#
#
# Finally, we can look at the results using a `corner` plot, which is a very popular tool for studying the output of MCMC analyses. Essentially, it plots the samples drawn from the model, and so shows the correlations between the different parameters effecitvely.
# +
import corner
chain = np.array([trace['m'], trace['c']])
corner.corner(chain.T, labels=[r'$m$',r'$c$'], show_titles=True,
quantiles=[0.16, 0.5, 0.84], truths=[m_true, c_true])
plt.show()
# -
# You can see that the normal $68\%$ confidence interval (indicated by the dashed lines) contains the "truth" values--- our model has done a good job!
# Finally, we can plot the draws of our model over the data to see how the data have informed our model.
quantiles[0]
# +
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(x, y, zorder=1, s=10, label='Observations')
ax.errorbar(x, y, yerr=yerr, alpha=.2, c='k', zorder=0, fmt='|')
linrange = np.linspace(0, 10, 100)
A = np.vander(linrange, 2)
lines = np.dot(chain.T, A.T)
quantiles = np.percentile(lines, [16, 84], axis=0)
plt.fill_between(linrange, quantiles[0], quantiles[1], color="#8d44ad", alpha=0.5, zorder=10, label=r'$1\sigma$ fit')
ax.plot(linrange, m_true*linrange + c_true, c='k', zorder=1, alpha=1., ls='--', lw=5, label=r'$y = mx + b$')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.legend(loc='upper left')
sns.despine()
fig.tight_layout()
plt.show()
# -
| 4_Bayesian-Tools/pymc3_example.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// <h1 style="text-align: center; font-size: 40px">Generics and Interfaces</h1>
// ## Loading Libraries
//
// Let's import all the necessary packages first!
import java.util.*;
import java.lang.*;
// ## Objectives
//
// The objectives of this worksheet are as follows:
// * Introduce the basics of interfaces and how they're utilized
// * Show the basics of creating an implementing interfaces
// * Show the basics of making generic classes and generic interfaces in Java
//
// #### Using Jupyter
// A few things to remind you with regard to using this Jupyter environment:
// 1. If the platform crashes don't worry. All of this is in the browser so just refresh and all of your changes up to your last save should still be there. For this reason we encourage you to **save often**.
// 2. Be sure to run cells from top to bottom.
// 3. If you're getting strange errors to go: Kernel->Restart & Clear Output. From there, run cells from top to bottom.
//
// Additionally keep an eye out for the badges in each section they give an indication for which sections are inclass activities .
// ## Interfaces
// Before we dive in it is worth understanding the core objective of interfaces. Recall several worksheets ago when I stated the difference between:
//
// * `List<E> lst = new ArrayList<>();`
// * `ArrayList<E> lst = new ArrayList<>();`
//
// Recall that we prefer the former unless we specifically require some functionality or implementation details of `ArrayList`. This is because the `List` interface guarantees that all implementations of `List` are guaranteed to have some core set of methods implemented. We will be covering in this section why that is, how interfaces are created, how classes implement them, and how this impacts the code we write. We will be doing this with the intention of providing a deeper understanding of how ADTs are implemented in Java along with a deeper understanding of why we program to interfaces rather than implementations.
// ### Defining an Interface
//
// Defining an interface is syntactically very similiar to defining a class. However, there are three key differences:
// 1. We use the `interface` keyword rather than the `class` keyword.
// 2. There are not attributes
// 3. We provide method headers but **not the implementation of the method**.
//
// For example, the `List` interfaces we've been using in previous classes might look something like this:
// ```java
// interface List{
// public boolean add(Object value);
// public Object get(int index);
// public void remove(int index);
// }
// ```
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
//
// Your task is to create an interface for a `Shape`. This interface should have the following `public` methods:
//
// * getArea() which returns an double
// * getPerimeter() which also reaturns a double
/* Define your shape interface here */
// ### Implementing Interfaces
//
// Now that our `Shape` interface has been defined lets create two classes that implement it. Here, we define a class and then use the `implements` keyword followed the interface we want to implement. For the `ArrayList` impelmentation of the `List` interface this process might look something like this:
//
// ```java
// class ArrayList implements List{
// public int add(Object value){
// /* rest of definition here */
// }
//
// public Object get(int index){
// /* rest of definition here */
// }
//
// public void remove(int index){
// /* rest of definition here */
// }
//
// }
// ```
//
// Your task will be to create two implementations of the `Shape` interface you created above.
// ##### The Hexagon Class
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
//
// The `Hexagon` class should be implemented to have:
// * Attributes:
// * sideLength (int)
// * Implementations for the following methods from the `Shape` interface:
// * getArea() where the area of a hexagon is defined by (3*sqrt(3))/2 * (sideLength^2)
// * getPerimeter()
/* Implement the interface */
/* Test your code here */
Shape hex1 = new Hexagon(3.43);
System.out.println(hex1.getArea());
System.out.println(hex1.getPerimeter());
// ###### The Equilateral Triangle Class
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
//
// The `EquilateralTriangle` class should be implemented to have:
//
// * Attributes:
// * sideLength (int)
// * Implementations for the following methods from the `Shape` interface:
// * getArea() where the area of an equilateral triangle is defined by sqrt(3)/4 * (sideLength^2)
// * getPerimeter()
/* Implement the interface here */
/* Test your code here */
Shape hex1 = new EquilateralTriangle(3.43);
System.out.println(hex1.getArea());
System.out.println(hex1.getPerimeter());
// ###### List of Shapes
List<Shape> shapeList = new ArrayList<>();
shapeList.add(new EquilateralTriangle(1.4));
shapeList.add(new Hexagon(0.25));
shapeList.add(new Hexagon(7.0));
shapeList.add(new EquilateralTriangle(7.25));
shapeList.add(new Hexagon(100.5));
shapeList.add(new EquilateralTriangle(75.456));
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
// Now, use the list of shapes we made to compute the total area of all the shapes in the list.
// +
public double sumAllShapeAreas(List<Shape> shapes){
}
// -
/* Tests here */
double totalArea = sumAllShapeAreas(shapeList);
System.out.println(totalArea);
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
// Next, compute the total perimeter of all shapes in the list.
// +
public double sumAllShapePerims(List<Shape> shapes){
}
// -
/* Tests here */
double totalPerim = sumAllShapePerims(shapeList);
System.out.println(totalPerim);
// ## Generics
//
//
// ### Generic Interfaces
//
// Now, you might recall that the actual syntax for declaring an instance of some implementation of the `List` interface has `<>` in it. In between these chatacters we put the object type we want this list to contain. For instance, if we wanted a list of integers we would declare it as `List<Integer> intList = ...` or if we wanted a list of string we would use `List<String> strList = ...`. This allowence of a collection of arbitrary objects is powered by the usage of generic data types. So, in reality, the Java list interface looks more like this.
//
// ```java
// interface List<E>{
// public boolean add(E e);
// public E get(int index);
// public void remove(int index);
// }
// ```
// The `E` is what is known as a generic type. Since the thing we are getting from the list or adding to the list is an element we use the character `E` to stand for "element". The following are the generic type conventions for the character used to represent the generic type being used:
// * T - Type
// * E - Element
// * K - Key
// * V - Value
// * N - Number
//
// The `E` is just a placeholder that allows us to declare the list with any kind of element. This allows us to have collections of elements without having to create a separate `List` or `add` method for each kind of data we want in our collections.
//
// Another example of this that you used in a prior assignment and will use again in future ones is the `Comparable` interface:
// ```java
// public interface Comparable<T>{
// public int compareTo(T o);
// }
// ```
// In this interface we use the `T` since we use `compareTo` to compare types.
//
// *Note:* I've simplified quite a bit just to illustrate the core concepts; however, if you're interest the full source code for the actual List interface can be found [here](http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/tip/src/share/classes/java/util/List.java);
//
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
// Create a generic interface of your choosing. It doesn't need to be anything fancy, just something that we can easily implement in the next phase.
/* Your code here */
// ### Generic Classes
//
// For this class it is good to be aware of how generic interfaces work however, outside of the second mini-assignment we will be primarily focusing on generic classes for the remainder of the semester. These generic classes will allow us to make data structures that can be used to store collections of arbitrary types. The basic template for creating a generic class is as follows:
// ```java
// class ClassName<T>{
// //...
// }
// ```
// Once upon relying on lists in Java, the `ArrayList<E>` class is a generic class that implements the generic interface `List<E>`.
// ```java
// class ArrayList<E> implements List<E>{
// public boolean add(E e){
// /* Code here */
// }
// public E get(int index){
// /* Code here */
// }
// public void remove(int index){
// /* Code here */
// }
// }
// ```
//
// <img alt="Activity - In-Class" src="https://img.shields.io/badge/Activity-In--Class-E84A27" align="left" style="margin-right: 5px"/>
// <br>
// <br>
// For this actvity implement the interface you created and test it by creating an instance and using some of the functions you defined.
/* Generic class here */
/* Test here */
| worksheet-2-generics-and-interfaces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## This notebook explains how to use albumentations for segmentation
# We will use images and data from the [TGS Salt Identification Challenge](https://www.kaggle.com/c/tgs-salt-identification-challenge).
# +
# %matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
from albumentations import (
PadIfNeeded,
HorizontalFlip,
VerticalFlip,
CenterCrop,
Crop,
Compose,
Transpose,
RandomRotate90,
ElasticTransform,
GridDistortion,
OpticalDistortion,
RandomSizedCrop,
OneOf,
CLAHE,
RandomBrightnessContrast,
RandomGamma
)
# +
def visualize(image, mask, original_image=None, original_mask=None):
fontsize = 18
if original_image is None and original_mask is None:
f, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0].imshow(image)
ax[1].imshow(mask)
else:
f, ax = plt.subplots(2, 2, figsize=(8, 8))
ax[0, 0].imshow(original_image)
ax[0, 0].set_title('Original image', fontsize=fontsize)
ax[1, 0].imshow(original_mask)
ax[1, 0].set_title('Original mask', fontsize=fontsize)
ax[0, 1].imshow(image)
ax[0, 1].set_title('Transformed image', fontsize=fontsize)
ax[1, 1].imshow(mask)
ax[1, 1].set_title('Transformed mask', fontsize=fontsize)
# -
image = cv2.imread('images/kaggle_salt/0fea4b5049_image.png')
mask = cv2.imread('images/kaggle_salt/0fea4b5049.png', 0)
# ## Original image <a class="anchor" id="original-image"></a>
print(image.shape, mask.shape)
original_height, original_width = image.shape[:2]
visualize(image, mask)
# # Padding
# UNet type architecture require input image size be divisible by $2^N$, where $N$ is th enumber of the maxpooling layers. In the vanilla UNet $N=5$ $\Longrightarrow$ we need to pad input images to the closest divisible by $2^5 = 32$ number which is 128. This operation may be performed using [**PadIfNeeded**](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.PadIfNeeded) transformation. Which pads both image and mask on all four sides. Padding type (zero, constant, reflection) may be specified. Default padding is reflection padding.
# +
aug = PadIfNeeded(p=1, min_height=128, min_width=128)
augmented = aug(image=image, mask=mask)
image_padded = augmented['image']
mask_padded = augmented['mask']
print(image_padded.shape, mask_padded.shape)
visualize(image_padded, mask_padded, original_image=image, original_mask=mask)
# -
# # CenterCrop and Crop
# To get to the original image and mask we may use [**CenterCrop**](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.CenterCrop) or [**Crop**](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.Crop) transformations.
# +
aug = CenterCrop(p=1, height=original_height, width=original_width)
augmented = aug(image=image_padded, mask=mask_padded)
image_center_cropped = augmented['image']
mask_center_cropped = augmented['mask']
print(image_center_cropped.shape, mask_center_cropped.shape)
assert (image - image_center_cropped).sum() == 0
assert (mask - mask_center_cropped).sum() == 0
visualize(image_padded, mask_padded, original_image=image_center_cropped, original_mask=mask_center_cropped)
# +
x_min = (128 - original_width) // 2
y_min = (128 - original_height) // 2
x_max = x_min + original_width
y_max = y_min + original_height
aug = Crop(p=1, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max)
augmented = aug(image=image_padded, mask=mask_padded)
image_cropped = augmented['image']
mask_cropped = augmented['mask']
print(image_cropped.shape, mask_cropped.shape)
assert (image - image_cropped).sum() == 0
assert (mask - mask_cropped).sum() == 0
visualize(image_cropped, mask_cropped, original_image=image_padded, original_mask=mask_padded)
# -
# # Non destructive transformations. [Dehidral group D4](https://en.wikipedia.org/wiki/Dihedral_group)
# For images for which there is no clear notion of top like this one, satellite and aerial imagery or medical imagery it is typically a good idea to add transformations that do not add or loose the information.
#
# There are eight distinct ways to represent the same square on the plane.
#
# 
#
# Combinations of the transformations [HorizontalFlip](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.HorizontalFlip), [VerticalFlip](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.VerticalFlip), [Transpose](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.Transpose), [RandomRotate90](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomRotate90) will be able to get the original image to all eight states.
# ## HorizontalFlip
# +
aug = HorizontalFlip(p=1)
augmented = aug(image=image, mask=mask)
image_h_flipped = augmented['image']
mask_h_flipped = augmented['mask']
visualize(image_h_flipped, mask_h_flipped, original_image=image, original_mask=mask)
# -
# ## VerticalFlip
# +
aug = VerticalFlip(p=1)
augmented = aug(image=image, mask=mask)
image_v_flipped = augmented['image']
mask_v_flipped = augmented['mask']
visualize(image_v_flipped, mask_v_flipped, original_image=image, original_mask=mask)
# -
# ## RandomRotate90 (Randomly rotates by 0, 90, 180, 270 degrees)
# +
aug = RandomRotate90(p=1)
augmented = aug(image=image, mask=mask)
image_rot90 = augmented['image']
mask_rot90 = augmented['mask']
visualize(image_rot90, mask_rot90, original_image=image, original_mask=mask)
# -
# ## Transpose (switch X and Y axis)
# +
aug = Transpose(p=1)
augmented = aug(image=image, mask=mask)
image_transposed = augmented['image']
mask_transposed = augmented['mask']
visualize(image_transposed, mask_transposed, original_image=image, original_mask=mask)
# -
# # Non-rigid transformations: ElasticTransform, GridDistortion, OpticalDistortion
# In medical imaging problems non rigind transformations help to augment the data. It is unclear if they will help in this problem, but let's look at them. We will consider [ElasticTransform](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.ElasticTransform), [GridDistortion](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.GridDistortion), [OpticalDistortion](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.OpticalDistortion).
# ## ElasticTransform
# +
aug = ElasticTransform(p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03)
augmented = aug(image=image, mask=mask)
image_elastic = augmented['image']
mask_elastic = augmented['mask']
visualize(image_elastic, mask_elastic, original_image=image, original_mask=mask)
# -
# ## GridDistortion
# +
aug = GridDistortion(p=1)
augmented = aug(image=image, mask=mask)
image_grid = augmented['image']
mask_grid = augmented['mask']
visualize(image_grid, mask_grid, original_image=image, original_mask=mask)
# -
# ## OpticalDistortion
# +
aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
augmented = aug(image=image, mask=mask)
image_optical = augmented['image']
mask_optical = augmented['mask']
visualize(image_optical, mask_optical, original_image=image, original_mask=mask)
# -
# # RandomSizedCrop
# One may combine [RandomCrop](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomCrop) and [RandomScale](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomScale) but there is a transformation [RandomSizedCrop](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomSizedCrop) that allows to combine them into one transformation.
# +
aug = RandomSizedCrop(p=1, min_max_height=(50, 101), height=original_height, width=original_width)
augmented = aug(image=image, mask=mask)
image_scaled = augmented['image']
mask_scaled = augmented['mask']
visualize(image_scaled, mask_scaled, original_image=image, original_mask=mask)
# -
# # Let's try to combine different transformations
# Light non destructive augmentations.
# +
aug = Compose([VerticalFlip(p=0.5),
RandomRotate90(p=0.5)])
augmented = aug(image=image, mask=mask)
image_light = augmented['image']
mask_light = augmented['mask']
visualize(image_light, mask_light, original_image=image, original_mask=mask)
# -
# # Let's add non rigid transformations and RandomSizedCrop
# ## Medium augmentations
# +
aug = Compose([
OneOf([RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),
PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1),
VerticalFlip(p=0.5),
RandomRotate90(p=0.5),
OneOf([
ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
GridDistortion(p=0.5),
OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5)
], p=0.8)])
augmented = aug(image=image, mask=mask)
image_medium = augmented['image']
mask_medium = augmented['mask']
visualize(image_medium, mask_medium, original_image=image, original_mask=mask)
# -
# # Let's add non-spatial stransformations.
# Many non spatial transformations like [CLAHE](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.CLAHE), [RandomBrightness](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomBrightness), [RandomContrast](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomContrast), [RandomGamma](https://albumentations.readthedocs.io/en/latest/api/augmentations.html#albumentations.augmentations.transforms.RandomGamma) can be also added. They will be applied only to the image and not the mask.
# +
aug = Compose([
OneOf([RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),
PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1),
VerticalFlip(p=0.5),
RandomRotate90(p=0.5),
OneOf([
ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
GridDistortion(p=0.5),
OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
], p=0.8),
CLAHE(p=0.8),
RandomBrightnessContrast(p=0.8),
RandomGamma(p=0.8)])
augmented = aug(image=image, mask=mask)
image_heavy = augmented['image']
mask_heavy = augmented['mask']
visualize(image_heavy, mask_heavy, original_image=image, original_mask=mask)
# -
| notebooks/example_kaggle_salt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
sys.path.append('/Users/iwatobipen/develop/chemoenv/psikit/psikit/')
from psikit import Psikit
pk = Psikit()
pk.read_from_smiles('OC(=O)c1cnccc1')
pk.optimize()
pk.mol
#caculate chages
print(pk.calc_resp_charges())
print(pk.calc_mulliken_charges())
print(pk.calc_lowdin_charges())
mol = pk.mol
atoms = mol.GetAtoms()
data = {'Symbol':[atom.GetSymbol() for atom in atoms],
'RESP': [float(atom.GetProp('RESP')) for atom in atoms],
'MULLIKEN': [float(atom.GetProp('MULLIKEN')) for atom in atoms],
'LOWDIN': [float(atom.GetProp('LOWDIN')) for atom in atoms]}
df = pd.DataFrame(data)
df.round(3)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# RESP charge and MULLIKEN charge shows high correlation
plt.scatter(df.RESP, df.MULLIKEN)
plt.xlabel('RESP')
plt.ylabel('MULLIKEN')
plt.plot(np.arange(-1.0, 1.0, 0.01), np.arange(-1.0, 1.0, 0.01), c='b')
plt.scatter(df.RESP, df.LOWDIN)
plt.xlabel('RESP')
plt.ylabel('LOWDIN')
plt.plot(np.arange(-1.0, 1.0, 0.01), np.arange(-1.0, 1.0, 0.01), c='b')
plt.scatter(df.MULLIKEN, df.LOWDIN)
plt.xlabel('MULLIKEN')
plt.ylabel('LOWDIN')
plt.plot(np.arange(-1.0, 1.0, 0.01), np.arange(-1.0, 1.0, 0.01), c='b')
| examples/CHARGE_COMPARISON/charge_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import sys
spaceNetUtilitiesPath = '/opt/utilities/python/'
if not spaceNetUtilitiesPath in sys.path:
print('adding spaceNetUtilitiesPath')
sys.path.extend([spaceNetUtilitiesPath])
else:
print('SpaceNet Utilities Path Already Exists')
from spaceNetUtilities import geoTools as gT
from spaceNetUtilities import labelTools as lT
# -
sys.path
# +
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
./create_pascal_tf_record --data_dir=/home/user/VOCdevkit \
--year=VOC2012 \
--output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
from tqdm import tqdm
def dict_to_tf_example(data, label_map_dict, img_path=[], dataDir=''):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
if not img_path:
img_path = data['filename']
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
#print(xmin)
#print(ymin)
#print(xmax)
#print(ymax)
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
else:
return []
def createTFRecord(txtList, label_map_path, output_dir, dataDir='', maxSamples=-1):
writer = tf.python_io.TFRecordWriter(output_dir)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
rowList = []
with open(txtList, 'r') as f:
for row in f:
row = row.rstrip()
rowList.append(row.split(' '))
if maxSamples ==-1:
maxSamples == len(rowList)
maxLength = min(len(rowList), maxSamples)
for row in tqdm(rowList[0:maxLength]):
imgPath = row[0]
labelPath = row[1]
if dataDir != "":
labelPath = os.path.join(dataDir, os.path.basename(labelPath))
imgPath = os.path.join(dataDir, os.path.basename(imgPath))
with tf.gfile.GFile(labelPath, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, label_map_dict, img_path=imgPath)
if tf_example:
#print (row)
writer.write(tf_example.SerializeToString())
writer.close()
return 'test'
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
# +
filePath = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/trainval.txt'
pathpbTxt = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/data/spacenet_label_map.pbtxt'
output_dir = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/tf_train1.record'
dataDir = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/annotations'
createTFRecord(filePath, pathpbTxt, output_dir, dataDir=dataDir)
filePath = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/test.txt'
output_dir = '/data/spacenetV2_TrainData/AOI_2_Vegas_Train_PASCALVOC/tf_test1.record'
createTFRecord(filePath, pathpbTxt, output_dir, dataDir=dataDir)
#with tf.gfile.GFile(path, 'r') as fid:
# xml_str = fid.read()
#xml = etree.fromstring(xml_str)
#data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
# -
| spacenetTensorFlowAPI.ipynb |