code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="8709d358" executionInfo={"status": "ok", "timestamp": 1645808394927, "user_tz": -420, "elapsed": 1352, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}}
# import library
import pandas as pd
import numpy as np
# import dataset
raw_data = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/dataset_statistic.csv', sep=';')
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="025ac6c1" executionInfo={"status": "ok", "timestamp": 1645808395433, "user_tz": -420, "elapsed": 26, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="1196faad-a556-4ef5-db10-b5c5fccbf4b4"
# inspeksi data
# 5 data baris teratas
raw_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="d401f7de" executionInfo={"status": "ok", "timestamp": 1645808395435, "user_tz": -420, "elapsed": 23, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="2762e28d-79d4-4678-d391-3fcfd9665127"
# 5 baris terakhir
raw_data.tail()
# + colab={"base_uri": "https://localhost:8080/"} id="f04d7bc0" executionInfo={"status": "ok", "timestamp": 1645808395436, "user_tz": -420, "elapsed": 21, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="e8729a39-dfdf-414c-880d-0d02b91ba5dc"
# melihat dimensi data
print(raw_data.shape)
# melihat jumlah data
print(raw_data.shape[0])
# + colab={"base_uri": "https://localhost:8080/"} id="c91709d0" executionInfo={"status": "ok", "timestamp": 1645808396445, "user_tz": -420, "elapsed": 1026, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="a22b9a9b-cf79-4667-8560-def2260a9087"
# menampilkan kolom
raw_data.columns
# + colab={"base_uri": "https://localhost:8080/"} id="dd00476a" executionInfo={"status": "ok", "timestamp": 1645808396446, "user_tz": -420, "elapsed": 95, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="c499b32f-2e47-4686-ff88-ed2a37eb12e5"
# Ada berapa banyak data yang hilang dari dataset?
raw_data.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="c29638e9" executionInfo={"status": "ok", "timestamp": 1645808396447, "user_tz": -420, "elapsed": 87, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="94584641-6117-43f3-f55c-1f81794da274"
# method describe
print(raw_data.describe())
# kolom harga max
print(raw_data['Harga'].max())
# kolom harha min
print(raw_data['Harga'].min())
# + colab={"base_uri": "https://localhost:8080/"} id="117f1b51" executionInfo={"status": "ok", "timestamp": 1645808396448, "user_tz": -420, "elapsed": 81, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="6126e05c-464e-4d88-a87a-25ed963adaa9"
# menghitung jumlah kolom
print(raw_data.sum())
# menghitung jumlah kolom yang bertipe numerik saja
print(raw_data.sum(numeric_only=True))
# menghitung dari jumlah kolom 'Harga' dan 'Pendapatan'
print(raw_data[['Harga', 'Pendapatan']].sum())
# + colab={"base_uri": "https://localhost:8080/"} id="64817946" executionInfo={"status": "ok", "timestamp": 1645808396449, "user_tz": -420, "elapsed": 74, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="f753a8ef-b761-4a75-aa66-1da14963779e"
# Manipulasi Dataframe - Memilih Kolom dan Baris
# memilih kolom pendapatan saja
print(raw_data['Pendapatan'])
# memilih kolom Jenis Kelamin dan Pendapatan
print(raw_data[['Jenis Kelamin', 'Pendapatan']])
# + colab={"base_uri": "https://localhost:8080/"} id="b93d4714" executionInfo={"status": "ok", "timestamp": 1645808396450, "user_tz": -420, "elapsed": 67, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="14d0d807-f43d-4945-ff33-d933acb01826"
# Metode loc
# mengambil data dari baris ke-0 sampai baris ke-(10-1) atau baris ke-9
print(raw_data[:10])
# mengambil data dari baris ke-3 sampai baris ke-(5-1) atau baris ke-4
print(raw_data[3:5])
# mengambil data pada baris ke-1, ke-3 dan ke-10
print(raw_data.loc[[1,3,10]])
# Mengambil kolom 'Jenis Kelamin' dan 'Pendapatan' dan ambil baris ke-1 sampai ke-9
print(raw_data[['Jenis Kelamin', 'Pendapatan']][1:10])
# Mengambil kolom 'Harga' dan 'Tingkat Kepuasan' dan ambil baris ke-1, ke-10 dan ke-15
print(raw_data[['Harga', 'Tingkat Kepuasan']].loc[[1,10,15]])
# + colab={"base_uri": "https://localhost:8080/"} id="a176e0c4" executionInfo={"status": "ok", "timestamp": 1645808396451, "user_tz": -420, "elapsed": 60, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="b3a512e0-b9a9-41af-8cd5-79f1c7a763a4"
raw_data['Jenis Kelamin'][2:10]
# + colab={"base_uri": "https://localhost:8080/"} id="78a82b27" executionInfo={"status": "ok", "timestamp": 1645808396452, "user_tz": -420, "elapsed": 55, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="396a26fa-91d6-467a-9b4a-58e8d68c769f"
# rata-rata MEAN
# mengambil data hanya Produk A
produk_A = raw_data[raw_data['Produk'] == 'A']
# menggunakan .mean()
print(produk_A['Pendapatan'].mean())
# menggunakan np.mean
print(np.mean(produk_A['Pendapatan']))
# + colab={"base_uri": "https://localhost:8080/"} id="4adee8ed" executionInfo={"status": "ok", "timestamp": 1645808396453, "user_tz": -420, "elapsed": 51, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="b00750bb-b4de-4eee-dfcd-225acae98795"
# Median
# mengambil data hanya Produk A
produk_A = raw_data[raw_data['Produk'] == 'A']
# menggunakan .median()
print(produk_A['Pendapatan'].median())
# menggunakan np.median()
print(np.median(produk_A['Pendapatan']))
# + colab={"base_uri": "https://localhost:8080/"} id="b45d54cf" executionInfo={"status": "ok", "timestamp": 1645808396454, "user_tz": -420, "elapsed": 48, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="d6b0ab70-f120-4ca5-a7d2-0873183ceb9e"
# modus
print(raw_data['Produk'].value_counts())
# + colab={"base_uri": "https://localhost:8080/"} id="80a4dac4" executionInfo={"status": "ok", "timestamp": 1645808396455, "user_tz": -420, "elapsed": 45, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="c63c1689-5a41-4a77-fabd-69dbf41c374c"
#Kuantil
# mencari median atau 50% dari data menggunakan pandas
print(raw_data['Pendapatan'].quantile(q=0.5))
# menggunakan numpy
print(np.quantile(raw_data['Pendapatan'], q=0.5))
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="077ccf4f" executionInfo={"status": "ok", "timestamp": 1645808396457, "user_tz": -420, "elapsed": 42, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="eb20685a-2e85-4472-e10c-7d5a48898ddd"
# Agregasi data dengan method .agg()
# menghitung rata-rata n median dari pendapatan dn harga
raw_data[['Pendapatan', 'Harga']].agg([np.mean, np.median])
# + colab={"base_uri": "https://localhost:8080/", "height": 333} id="ee2a9579" executionInfo={"status": "ok", "timestamp": 1645808396458, "user_tz": -420, "elapsed": 39, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="5ae3d673-6d17-4609-a907-e46c075af721"
# menghitung rata-rata dan median dari tiap produk
raw_data[['Pendapatan', 'Harga', 'Produk']].groupby('Produk').agg([np.mean, np.median])
# + colab={"base_uri": "https://localhost:8080/"} id="d5ae9f0c" executionInfo={"status": "ok", "timestamp": 1645808396459, "user_tz": -420, "elapsed": 37, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="f7566416-e51a-4075-c040-e717f3a9dce5"
# mencari quartile ke 1 dan ke 3 dari pendapatan
raw_data['Pendapatan'].quantile([0.25, 0.75])
# + id="4cb0ac3c" executionInfo={"status": "ok", "timestamp": 1645808396814, "user_tz": -420, "elapsed": 388, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="14d94068-0149-45fe-ceb0-d9ba591e2fcf" colab={"base_uri": "https://localhost:8080/"}
# mencari proporsi tiap produk
raw_data['Produk'].value_counts()/raw_data.shape[0]
# + colab={"base_uri": "https://localhost:8080/"} id="u7LcIZoCnnCN" executionInfo={"status": "ok", "timestamp": 1645808396816, "user_tz": -420, "elapsed": 27, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="5086aa71-91be-40c2-c1fb-0795911bebf0"
# mencari nilai rentang dari kolom 'Pendapatan'
raw_data['Pendapatan'].max() - raw_data['Pendapatan'].min()
# + colab={"base_uri": "https://localhost:8080/"} id="YvehXnoDoPaH" executionInfo={"status": "ok", "timestamp": 1645808396817, "user_tz": -420, "elapsed": 24, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="cc2f7449-8a0c-482b-d2d0-d503dd5fa939"
# variansi
print(raw_data['Pendapatan'].var())
print(np.var(raw_data['Pendapatan']))
print(raw_data['Pendapatan'].var(ddof=0))
# + colab={"base_uri": "https://localhost:8080/"} id="QOitRt9SpCJN" executionInfo={"status": "ok", "timestamp": 1645808396819, "user_tz": -420, "elapsed": 21, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="911c7420-3ddd-482b-882f-ae53d8b7bb6f"
# standar deviasi
print(raw_data['Pendapatan'].std())
print(np.std(raw_data['Pendapatan'], ddof=1))
# + colab={"base_uri": "https://localhost:8080/"} id="ilEhRt1jpgAi" executionInfo={"status": "ok", "timestamp": 1645808397691, "user_tz": -420, "elapsed": 887, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="632ecbb8-54ee-4635-94e6-4df9f92a0106"
# korelasi
print(raw_data.corr())
print(raw_data.corr(method='spearman'))
print(raw_data.corr(method='kendall'))
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="wxxaN_97q_I1" executionInfo={"status": "ok", "timestamp": 1645808398860, "user_tz": -420, "elapsed": 1174, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="bb91570f-5f9c-4b9f-bf6c-03950c113fd5"
# data visualisasi
# scatterplot
import matplotlib.pyplot as plt
plt.figure()
# visualisasi scatter plot untuk variabel Pendapatan dan Total menggunakan plot.scatter pandas
raw_data.plot.scatter(x='Pendapatan', y='Total')
plt.title('plot.scatter dari pandas', size=14)
plt.tight_layout()
plt.show()
# visualisasi scatter plot untum variabel Pendapatan dan Total menggunakan plt.scatter dari matplotlib
plt.scatter(x='Pendapatan', y='Total', data=raw_data)
plt.title('plt.scatter dari matplotlib', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 239} id="iCkxc3UAuQBS" executionInfo={"status": "ok", "timestamp": 1645808399237, "user_tz": -420, "elapsed": 382, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="09987a1d-5e18-4cad-e9fa-30735e45a1a9"
plt.scatter(x='Pendapatan', y='Total', data=raw_data)
plt.xlabel('Pendapatan')
plt.ylabel('Total')
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="SbbMYTOcvM_R" executionInfo={"status": "ok", "timestamp": 1645808400543, "user_tz": -420, "elapsed": 1314, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="f1025681-dd47-41bf-e6b6-271a5a22f282"
# histogram
plt.figure()
# melihat distribusi kolom 'Pendapatan' menggunakan hist dari pandas
raw_data.hist(column='Pendapatan')
plt.title('.hist dari pandas', size=14)
plt.tight_layout()
plt.show()
# melihat distribusi data kolom 'Pendapatan' menggunakan pyplot.hist dari matplotlib
plt.hist(x='Pendapatan', data=raw_data)
plt.title('pyplot.hist dari matplotlib.pyplot', size=14)
plt.xlabel('Pendapatan')
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 462} id="mt8m0rQk1hVl" executionInfo={"status": "ok", "timestamp": 1645808401176, "user_tz": -420, "elapsed": 637, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="177706ec-6b3f-4eb7-8aa6-bf2a7a2961ae"
# boxplot
plt.figure()
# melihat boxplot dari kolom Pendapatan menggunakan method .boxplot dari pandas
raw_data.boxplot(column='Pendapatan')
plt.title('.boxplot dari pandas', size=14)
plt.tight_layout()
plt.show()
# melihat boxplot dari kolom Pendapatan menggunakan plt.boxplot dari matplotlib
plt.boxplot(x='Pendapatan', data=raw_data)
plt.title('.boxplot dari matplotlib', size=14)
plt.xlabel('Pendapatan')
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="IAdFRvCA2jry" executionInfo={"status": "ok", "timestamp": 1645808401993, "user_tz": -420, "elapsed": 833, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="d9d1b59e-6c14-4f09-8b9f-4764b8fca71c"
# barplot
# frekuensi pada kolom 'Produk'
class_freq = raw_data.Produk.value_counts()
# lihat nilai dari class_freq
print(class_freq)
plt.figure()
class_freq.plot.bar()
plt.title('.bar() dari pandas', size=14)
plt.tight_layout()
plt.bar(x=class_freq.index, height=class_freq.values)
plt.title('plt.bar() dari matplotlib', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 577} id="Rzjwyqe4teDj" executionInfo={"status": "ok", "timestamp": 1645808402636, "user_tz": -420, "elapsed": 664, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="a708bc65-3efa-466f-c81a-16ae121e896f"
# pie chart
plt.figure()
class_freq = raw_data['Produk'].value_counts()
# membuat pie chart menggunakan method plt.pie dari matplotlib.pyplot
plt.pie(class_freq.values, labels=class_freq.index)
plt.title('plt.pie dengan matplotlib.pyplot')
plt.tight_layout()
plt.show()
# membuat pie chart menggunakan plot.pie dari pandas
class_freq.plot.pie()
plt.title('plot.pie dari pandas', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="D4t0HUhpvSHi" executionInfo={"status": "ok", "timestamp": 1645808406706, "user_tz": -420, "elapsed": 4076, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="2b1c55e4-3bc8-487b-a561-becf07eac422"
# transformasi data
from scipy import stats
plt.figure()
raw_data.hist()
plt.title('histogram untuk seluruh kolom', size=14)
plt.tight_layout()
plt.show()
plt.figure()
plt.hist(raw_data['Pendapatan'])
plt.title('histogram pendapatan')
plt.show()
plt.figure()
# transformasi menggunakan akar lima
np.power(raw_data['Pendapatan'], 1/5).hist()
plt.title('histogram pendapatan - transformasi menggunakan akar lima', size=14)
plt.tight_layout()
plt.show()
# simpan hasil transformasi
pendapatan_akar_lima = np.power(raw_data['Pendapatan'], 1/5)
plt.figure()
# membuat qqplot pendapatan - transformasi akar lima
stats.probplot(pendapatan_akar_lima, plot=plt)
plt.title('qqplot pendapatan - transformasi akar lima', size=14)
plt.tight_layout()
plt.show()
# membuat qqplot pendapatan
stats.probplot(raw_data['Pendapatan'], plot=plt)
plt.title('qqplot pendapatan', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 462} id="A3roXJLM0P-D" executionInfo={"status": "ok", "timestamp": 1645808408175, "user_tz": -420, "elapsed": 1496, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="d26a61e6-ec28-4968-b6a1-02f74593b601"
# transformasi Box-Cox
hasil, _ = stats.boxcox(raw_data['Pendapatan'])
plt.figure()
# histogram
plt.hist(hasil)
plt.title('histogram', size=14)
plt.tight_layout()
plt.show()
plt.figure()
# qqplot
stats.probplot(hasil, plot=plt)
plt.title('qqplot', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="X8yxT34O6bKP" executionInfo={"status": "ok", "timestamp": 1645808408783, "user_tz": -420, "elapsed": 617, "user": {"displayName": "Dev7", "photoUrl": "<KEY>", "userId": "10334392920424487579"}} outputId="30058042-fbd1-4c00-a9d7-391eb21d02a4"
# transformasi data kategorik ke dalam angka
print(raw_data['Produk'])
data_dummy_produk = pd.get_dummies(raw_data['Produk'])
print(data_dummy_produk)
# + colab={"base_uri": "https://localhost:8080/", "height": 831} id="O_-6Wg09Aofh" executionInfo={"status": "ok", "timestamp": 1645808410815, "user_tz": -420, "elapsed": 2047, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="4cc42ebe-876e-43e1-9ee2-6d8902292839"
# matriks kolerasi
# mengatur ukuran gambar/plot
plt.rcParams['figure.dpi'] = 100
plt.figure()
plt.matshow(raw_data.corr())
plt.title('Plot correlation dengan .matshow', size=14)
plt.tight_layout()
plt.show()
import seaborn as sns
plt.figure()
sns.heatmap(raw_data.corr(), annot=True)
plt.title('Plot correlation matrix dengan sns.heatmap', size=14)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 933} id="ty18p0o2036R" executionInfo={"status": "ok", "timestamp": 1645808593213, "user_tz": -420, "elapsed": 4463, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="0c222f63-3b3d-4ec8-ad80-a63b7d1f21c1"
# grouped box plot
plt.clf()
plt.figure()
# boxplot biasa tanpa pengelompokkan
raw_data.boxplot(rot=90)
plt.title('Boxplot tanpa pengelompokkan', size=14)
plt.tight_layout()
plt.show()
plt.figure()
# box plot dengan pengelompokkan dilakukan oleh kolom 'Produk'
raw_data.boxplot(by='Produk')
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="vn8fuKKP2JAa" executionInfo={"status": "ok", "timestamp": 1645809214340, "user_tz": -420, "elapsed": 5835, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="827d794d-170d-4699-e763-e3fb51dc391b"
# grouped histogram
produk_list = ['A', 'B', 'C', 'D', 'E']
for produk in produk_list:
plt.figure()
raw_data[raw_data['Produk'] == produk].hist()
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="RRNdGJrZ37Ik" executionInfo={"status": "ok", "timestamp": 1645809361152, "user_tz": -420, "elapsed": 1636, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="06679564-40d5-4acd-890e-1061716c6ddf"
# hex bin plot
plt.figure()
raw_data.plot.hexbin(x='Pendapatan', y='Total', gridsize=25, rot=90)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 918} id="pYm60QeS4iKg" executionInfo={"status": "ok", "timestamp": 1645809537565, "user_tz": -420, "elapsed": 4476, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="bc11ac46-155a-462f-c057-9c684f80fee4"
# scatter matrix plot
from pandas.plotting import scatter_matrix
_, ax = plt.subplots(1, 1, figsize=(10, 10))
scatter_matrix(raw_data, ax=ax)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 399} id="qsXy11GN5f8d" executionInfo={"status": "ok", "timestamp": 1645809652514, "user_tz": -420, "elapsed": 5005, "user": {"displayName": "Dev7", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjan_MNSqpfqm97026jX2Ym4qcnbVmBG8DPxR3jOw=s64", "userId": "10334392920424487579"}} outputId="24d2790e-4a2d-4a61-adb4-c6e34f7b393b"
_, ax = plt.subplots(1, 1, figsize=(10, 10))
scatter_matrix(raw_data, diagonal='kde', ax=ax)
plt.show()
| Learn/Fundamental/Statistic using Python for Data Science.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ml4t] *
# language: python
# name: conda-env-ml4t-py
# ---
# # How to generate long-short trading signals with a Random Forest
# ## Imports & Settings
import warnings
warnings.filterwarnings('ignore')
# +
# %matplotlib inline
from time import time
from io import StringIO
import sys, os
from itertools import product
from pathlib import Path
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn.linear_model import LinearRegression
from scipy.stats import spearmanr
# -
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from utils import MultipleTimeSeriesCV, format_time
sns.set_style('whitegrid')
np.random.seed(42)
YEAR = 252
idx = pd.IndexSlice
DATA_DIR = Path('..', 'data')
results_path = Path('results', 'return_predictions')
if not results_path.exists():
results_path.mkdir(parents=True)
# ## Get Data
# See the notebook [japanese_equity_features](03_japanese_equity_features.ipynb) in this directory for data preparation.
data = pd.read_hdf('data.h5', 'stooq/japan/equities')
data.info(null_counts=True)
# We start with 941 tickers.
len(data.index.unique('ticker'))
# ### Select universe of 250 most-liquid stocks
# We rank the stocks by their daily average dollar volume and select those with the 250 lowest average ranks and thus highest average volumes for the 2010-2017 period.
# +
prices = (pd.read_hdf(DATA_DIR / 'assets.h5', 'stooq/jp/tse/stocks/prices')
.loc[idx[:, '2010': '2017'], :])
dollar_vol = prices.close.mul(prices.volume).loc[idx[:, :'2017'],:]
dollar_vol_rank = dollar_vol.groupby(level='date').rank(ascending=False)
universe = dollar_vol_rank.groupby(level='ticker').mean().nsmallest(250).index
# -
# ## MultipleTimeSeriesCV
# See [Chapter 7 - Linear Models](../07_linear_models) for details.
cv = MultipleTimeSeriesCV(n_splits=36,
test_period_length=21,
lookahead=5,
train_period_length=2 * 252)
# For each fold, the train and test periods are separated by a `lookahead` number of periods and thus do not overlap:
for i, (train_idx, test_idx) in enumerate(cv.split(X=data)):
train = data.iloc[train_idx]
train_dates = train.index.get_level_values('date')
test = data.iloc[test_idx]
test_dates = test.index.get_level_values('date')
df = train.reset_index().append(test.reset_index())
n = len(df)
assert n== len(df.drop_duplicates())
msg = f'Training: {train_dates.min().date()}-{train_dates.max().date()} '
msg += f' ({train.groupby(level="ticker").size().value_counts().index[0]:,.0f} days) | '
msg += f'Test: {test_dates.min().date()}-{test_dates.max().date()} '
msg += f'({test.groupby(level="ticker").size().value_counts().index[0]:,.0f} days)'
print(msg)
if i == 3:
break
# ## Model Selection: Time Period and Horizon
# For the model selection step, we restrict training and validation sets to the 2010-2017 period.
cv_data = data.loc[idx[universe, :'2017'], :]
tickers = cv_data.index.unique('ticker')
# Persist the data to save some time when running another experiment:
cv_data.to_hdf('data.h5', 'stooq/japan/equities/cv_data')
with pd.HDFStore('data.h5') as store:
print(store.info())
# We're picking prediction horizons of 1, 5, 10 and 21 days:
lookaheads = [1, 5, 10, 21]
# ## Baseline: Linear Regression
# Since it's quick to run and quite informative, we generate linear regression baseline predictions. See [Chapter 7 - Linear Models](../07_linear_models) for details.
lr = LinearRegression()
labels = sorted(cv_data.filter(like='fwd').columns)
features = cv_data.columns.difference(labels).tolist()
# ### CV Parameters
# We set five different training lengths from 3 months to 5 years, and two test periods as follows:
train_lengths = [5 * YEAR, 3 * YEAR, YEAR, 126, 63]
test_lengths = [5, 21]
# Since linear regression has no hyperparameters, our CV parameters are the cartesian product of prediction horizon and train/test period lengths:
test_params = list(product(lookaheads, train_lengths, test_lengths))
# Now we iterate over these parameters and train/validate the linear regression model while capturing the information coefficient of the model predictions, measure both on a daily basis and for each complete fold:
# +
lr_metrics = []
for lookahead, train_length, test_length in test_params:
label = f'fwd_ret_{lookahead:02}'
df = cv_data.loc[:, features + [label]].dropna()
X, y = df.drop(label, axis=1), df[label]
n_splits = int(2 * YEAR / test_length)
cv = MultipleTimeSeriesCV(n_splits=n_splits,
test_period_length=test_length,
lookahead=lookahead,
train_period_length=train_length)
ic, preds = [], []
for i, (train_idx, test_idx) in enumerate(cv.split(X=X)):
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_test, y_test = X.iloc[test_idx], y.iloc[test_idx]
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
preds.append(y_test.to_frame('y_true').assign(y_pred=y_pred))
ic.append(spearmanr(y_test, y_pred)[0])
preds = pd.concat(preds)
lr_metrics.append([
lookahead, train_length, test_length,
np.mean(ic),
spearmanr(preds.y_true, preds.y_pred)[0]
])
columns = ['lookahead', 'train_length', 'test_length', 'ic_by_day', 'ic']
lr_metrics = pd.DataFrame(lr_metrics, columns=columns)
# -
lr_metrics.info()
# ### Information Coefficient distribution by Lookahead
# Convert the data to long `seaborn`-friendly format:
lr_metrics_long = pd.concat([(lr_metrics.drop('ic', axis=1)
.rename(columns={'ic_by_day': 'ic'})
.assign(Measured='By Day')),
lr_metrics.drop('ic_by_day', axis=1)
.assign(Measured='Overall')])
lr_metrics_long.columns=['Lookahead', 'Train Length', 'Test Length', 'IC', 'Measure']
lr_metrics_long.info()
# Plot both IC measures for the various CV parameters:
sns.catplot(x='Train Length',
y='IC',
hue='Test Length',
col='Lookahead',
row='Measure',
data=lr_metrics_long,
kind='bar')
# Compare the distributions of each IC metric for the different prediction horizons:
fig, axes =plt.subplots(ncols=2, figsize=(14,5), sharey=True)
sns.boxplot(x='lookahead', y='ic_by_day',data=lr_metrics, ax=axes[0])
axes[0].set_title('IC by Day')
sns.boxplot(x='lookahead', y='ic',data=lr_metrics, ax=axes[1])
axes[1].set_title('IC Overall')
axes[0].set_ylabel('Information Coefficient')
axes[1].set_ylabel('')
sns.despine()
fig.tight_layout()
# ### Best Train/Test Period Lengths
# Show the best train/test period settings for the four prediction horizons:
(lr_metrics.groupby('lookahead', group_keys=False)
.apply(lambda x: x.nlargest(3, 'ic')))
lr_metrics.to_csv(results_path / 'lin_reg_performance.csv', index=False)
# ## LightGBM Random Forest Model Tuning
# Helper function to obtain the LightGBM feature importance metrics:
def get_fi(model):
fi = model.feature_importance(importance_type='gain')
return (pd.Series(fi / fi.sum(),
index=model.feature_name()))
# LightGBM base parameter settings that are independent of hyperparameter tuning:
base_params = dict(boosting_type='rf',
objective='regression',
bagging_freq=1,
verbose=-1)
# ### Hyperparameter Options
# We run this experiment with different parameters for the bagging and feature fractions that determine the degree of randomization as well as the minimum number of samples for a split to control overfitting:
bagging_fraction_opts = [.5, .75, .95]
feature_fraction_opts = [.75, .95]
min_data_in_leaf_opts = [250, 500, 1000]
# This gives us 3x2x3=18 parameter combinations:
cv_params = list(product(bagging_fraction_opts,
feature_fraction_opts,
min_data_in_leaf_opts))
n_cv_params = len(cv_params)
n_cv_params
# #### Random Sample
# To limit the running time, we can randomly sample a subset of the parameter combinations (here: 50%):
# +
sample_proportion = .5
sample_size = int(sample_proportion * n_cv_params)
cv_param_sample = np.random.choice(list(range(n_cv_params)),
size=int(sample_size),
replace=False)
cv_params_ = [cv_params[i] for i in cv_param_sample]
print('# CV parameters:', len(cv_params_))
# -
# We tune the number of trees by evaluating a fully grown forest for various smaller sizes:
num_iterations = [25] + list(range(50, 501, 25))
num_boost_round = num_iterations[-1]
# ### Train/Test Period Lenghts
# As above for linear regression, we define a range of train/test period length:
# #### Define parameters
train_lengths = [5 * YEAR, 3 * YEAR, YEAR, 126, 63]
test_lengths = [5, 21]
test_params = list(product(train_lengths, test_lengths))
n_test_params = len(test_params)
# #### Random sample
# Just as for the model parameters, we can randomly sample from the 5 x 2 = 8 training configurations (here: 50%):
# +
sample_proportion = 1.0
sample_size = int(sample_proportion * n_test_params)
test_param_sample = np.random.choice(list(range(n_test_params)),
size=int(sample_size),
replace=False)
test_params_ = [test_params[i] for i in test_param_sample]
print('Train configs:', len(test_params_))
print('CV Iterations:', len(cv_params_) * len(test_params_))
# -
# ### Categorical Variables
# To leverage LightGBM's ability to handle categorical variables, we need to define them; we'll also `factorize` them so they are both integer-encoded and start at zero (optional, but otherwise throws a warning) as expected by LightGBM:
categoricals = ['year', 'weekday', 'month']
for feature in categoricals:
data[feature] = pd.factorize(data[feature], sort=True)[0]
# ### Run Cross-Validation
# Set up some helper variabels and storage locations to faciliate the CV process and result storage:
labels = sorted(cv_data.filter(like='fwd').columns)
features = cv_data.columns.difference(labels).tolist()
label_dict = dict(zip(lookaheads, labels))
cv_store = Path(results_path / 'parameter_tuning.h5')
ic_cols = ['bagging_fraction',
'feature_fraction',
'min_data_in_leaf',
't'] + [str(n) for n in num_iterations]
# Now we take the following steps:
# - we iterate over the prediction horizons and train/test period length,
# - set up the `MultipleTimeSeriesCV` accordingly
# - create the binary LightGBM dataset with the appropriate target, and
# - iterate over the model hyperparamters to train and validate the model while capturing the relevant performance metrics:
for lookahead in lookaheads:
for train_length, test_length in test_params_:
n_splits = int(2 * YEAR / test_length)
print(f'Lookahead: {lookahead:2.0f} | Train: {train_length:3.0f} | '
f'Test: {test_length:2.0f} | Params: {len(cv_params_):3.0f}')
cv = MultipleTimeSeriesCV(n_splits=n_splits,
test_period_length=test_length,
train_period_length=train_length,
lookahead=lookahead)
label = label_dict[lookahead]
outcome_data = data.loc[:, features + [label]].dropna()
lgb_data = lgb.Dataset(data=outcome_data.drop(label, axis=1),
label=outcome_data[label],
categorical_feature=categoricals,
free_raw_data=False)
predictions, daily_ic, ic, feature_importance = [], [], [], []
key = f'{lookahead}/{train_length}/{test_length}'
T = 0
for p, (bagging_fraction, feature_fraction, min_data_in_leaf) in enumerate(cv_params_):
params = base_params.copy()
params.update(dict(bagging_fraction=bagging_fraction,
feature_fraction=feature_fraction,
min_data_in_leaf=min_data_in_leaf))
start = time()
cv_preds, nrounds = [], []
for i, (train_idx, test_idx) in enumerate(cv.split(X=outcome_data)):
lgb_train = lgb_data.subset(train_idx.tolist()).construct()
lgb_test = lgb_data.subset(test_idx.tolist()).construct()
model = lgb.train(params=params,
train_set=lgb_train,
num_boost_round=num_boost_round,
verbose_eval=False)
if i == 0:
fi = get_fi(model).to_frame()
else:
fi[i] = get_fi(model)
test_set = outcome_data.iloc[test_idx, :]
X_test = test_set.loc[:, model.feature_name()]
y_test = test_set.loc[:, label]
y_pred = {str(n): model.predict(X_test, num_iteration=n)
for n in num_iterations}
cv_preds.append(y_test.to_frame(
'y_test').assign(**y_pred).assign(i=i))
nrounds.append(model.best_iteration)
feature_importance.append(fi.T.describe().T.assign(bagging_fraction=bagging_fraction,
feature_fraction=feature_fraction,
min_data_in_leaf=min_data_in_leaf))
cv_preds = pd.concat(cv_preds).assign(bagging_fraction=bagging_fraction,
feature_fraction=feature_fraction,
min_data_in_leaf=min_data_in_leaf)
predictions.append(cv_preds)
by_day = cv_preds.groupby(level='date')
ic_by_day = pd.concat([by_day.apply(lambda x: spearmanr(x.y_test,
x[str(n)])[0]).to_frame(n)
for n in num_iterations], axis=1)
daily_ic.append(ic_by_day.assign(bagging_fraction=bagging_fraction,
feature_fraction=feature_fraction,
min_data_in_leaf=min_data_in_leaf))
cv_ic = [spearmanr(cv_preds.y_test, cv_preds[str(n)])[0]
for n in num_iterations]
T += time() - start
ic.append([bagging_fraction, feature_fraction,
min_data_in_leaf, lookahead] + cv_ic)
msg = f'{p:3.0f} | {format_time(T)} | '
msg += f'{bagging_fraction:3.0%} | {feature_fraction:3.0%} | {min_data_in_leaf:5,.0f} | '
msg += f'{max(cv_ic):6.2%} | {ic_by_day.mean().max(): 6.2%} | {ic_by_day.median().max(): 6.2%}'
print(msg)
m = pd.DataFrame(ic, columns=ic_cols)
m.to_hdf(cv_store, 'ic/' + key)
pd.concat(daily_ic).to_hdf(cv_store, 'daily_ic/' + key)
pd.concat(feature_importance).to_hdf(cv_store, 'fi/' + key)
pd.concat(predictions).to_hdf(cv_store, 'predictions/' + key)
# ## Analyse Cross-Validation Results
# ### Collect Data
# We'll now combine the CV results that we stored separately for each fold (to avoid loosing results in case something goes wrong along the way):
id_vars = ['train_length',
'test_length',
'bagging_fraction',
'feature_fraction',
'min_data_in_leaf',
't', 'date']
# We'll look at the financial performance in the notebook `alphalens_signal_quality`.
daily_ic, ic = [], []
for t in lookaheads:
print(t)
with pd.HDFStore(cv_store) as store:
keys = [k[1:] for k in store.keys() if k.startswith(f'/fi/{t}')]
for key in keys:
train_length, test_length = key.split('/')[2:]
print(train_length, test_length)
k = f'{t}/{train_length}/{test_length}'
cols = {'t': t,
'train_length': int(train_length),
'test_length': int(test_length)}
ic.append(pd.melt(store['ic/' + k]
.assign(**cols),
id_vars=id_vars[:-1],
value_name='ic',
var_name='rounds')
.apply(pd.to_numeric))
df = store['daily_ic/' + k].assign(**cols).reset_index()
daily_ic.append(pd.melt(df,
id_vars=id_vars,
value_name='daily_ic',
var_name='rounds')
.set_index('date')
.apply(pd.to_numeric)
.reset_index())
ic = pd.concat(ic, ignore_index=True)
daily_ic = pd.concat(daily_ic, ignore_index=True)
# ### Predictive Performance: CV Information Coefficient by Day
# We first look at the daily IC, the metric we ultimately care about for a daily trading strategy. The best results for all prediction horizons are typically achieved with three years of training; the shorter horizons work better with 21 day testing period length. More regularization often improves the result but the impact of the bagging and feature fraction parameters are a little less clear cut and likely depend on other parameters.
group_cols = ['t','train_length', 'test_length',
'bagging_fraction', 'feature_fraction', 'min_data_in_leaf']
daily_ic_avg = daily_ic.groupby(group_cols + ['rounds']).daily_ic.mean().to_frame('ic').reset_index()
daily_ic_avg.groupby('t', group_keys=False).apply(lambda x: x.nlargest(3, 'ic'))
daily_ic_avg.info(null_counts=True)
# For a 1-day forecast horizon, over 75% of the predictions yield a positive daily IC; the same is true for 21 days which, unsurprisingly, also shows a wider range.
ax = sns.boxenplot(x='t', y='ic', data=daily_ic_avg)
ax.axhline(0, ls='--', lw=1, c='k');
g = sns.catplot(x='t',
y='ic',
col='train_length',
row='test_length',
data=daily_ic_avg[(daily_ic_avg.test_length == 21)],
kind='boxen')
g.savefig(results_path / 'daily_ic_test_21', dpi=300);
# ### HyperParameter Impact: Linear Regression
# To get a better idea of how the various CV parameters impact the forecast quality, we can run a linear regression with the daily IC as outcome and the one-hot encoded hyperparameters as inputs:
lin_reg = {}
for t in [1, 5]:
df_ = daily_ic_avg[(daily_ic_avg.t==t)&(daily_ic_avg.rounds<=250)].dropna()
y, X = df_.ic, df_.drop(['ic', 't'], axis=1)
X = sm.add_constant(pd.get_dummies(X, columns=X.columns, drop_first=True))
model = sm.OLS(endog=y, exog=X)
lin_reg[t] = model.fit()
s = lin_reg[t].summary()
coefs = pd.read_csv(StringIO(s.tables[1].as_csv())).rename(
columns=lambda x: x.strip())
coefs.columns = ['variable', 'coef', 'std_err',
't', 'p_value', 'ci_low', 'ci_high']
coefs.to_csv(results_path / f'lr_result_{t:02}.csv', index=False)
def visualize_lr_result(model, ax):
ci = model.conf_int()
errors = ci[1].sub(ci[0]).div(2)
coefs = (model.params.to_frame('coef').assign(error=errors)
.reset_index().rename(columns={'index': 'variable'}))
coefs = coefs[~coefs['variable'].str.startswith(
'date') & (coefs.variable != 'const')]
coefs.variable = coefs.variable.str.split('_').str[-1]
coefs.plot(x='variable', y='coef', kind='bar', ax=ax,
color='none', capsize=3, yerr='error', legend=False, rot=0)
ax.set_ylabel('IC')
ax.set_xlabel('')
ax.scatter(x=pd.np.arange(len(coefs)), marker='_', s=120, y=coefs['coef'], color='black')
ax.axhline(y=0, linestyle='--', color='black', linewidth=1)
ax.xaxis.set_ticks_position('none')
ax.annotate('Train\nLength', xy=(.09, -0.1), xytext=(.09, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=5, lengthB=0.8', lw=1.0, color='black'))
ax.annotate('Test\nLength', xy=(.23, -0.1), xytext=(.23, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=2, lengthB=0.8', lw=1.0, color='black'))
ax.annotate('Bagging\nFraction', xy=(.32, -0.1), xytext=(.32, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=2.7, lengthB=0.8', lw=1.0, color='black'))
ax.annotate('Feature\nFraction', xy=(.44, -0.1), xytext=(.44, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=3.4, lengthB=1.0', lw=1.0, color='black'))
ax.annotate('Min.\nSamples', xy=(.55, -0.1), xytext=(.55, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=2.5, lengthB=1.0', lw=1.0, color='black'))
ax.annotate('Number of\nRounds', xy=(.8, -0.1), xytext=(.8, -0.2),
xycoords='axes fraction',
textcoords='axes fraction',
fontsize=11, ha='center', va='bottom',
bbox=dict(boxstyle='square', fc='white', ec='black'),
arrowprops=dict(arrowstyle='-[, widthB=11.2, lengthB=1.0', lw=1.0, color='black'))
# The below plot shows the regression coefficient values and their confidence intervals. The intercept (not shown) has a small positive value and is statistically signifant; it captures the impact of the dropped categories (the smallest value for each parameter).
#
# For 1-day forecasts, some but not all results are insightful: 21-day testing is better, and so is `min_samples_leaf` of 500 or 1,000. 100-200 trees seem to work best, but both shorter and longer training periods are better than intermediate values.
with sns.axes_style('white'):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 6))
axes = axes.flatten()
for i, t in enumerate([1, 5]):
visualize_lr_result(lin_reg[t], axes[i])
axes[i].set_title(f'Lookahead: {t} Day(s)')
fig.suptitle('OLS Coefficients & Confidence Intervals', fontsize=20)
fig.tight_layout()
fig.subplots_adjust(top=.92)
# ### Information Coefficient: Overall
# We'll also take a look at the overall IC value, which is often reported but does not necessarily match the goal of a daily trading strategy that uses the model return predictions as well as the daily IC.
ic.info()
# #### Best Parameters
# Directionally, and for shorter periods, similar hyperparameter settings work best (while the IC values are higher):
ic.groupby('t').apply(lambda x: x.nlargest(3, 'ic'))
# #### Visualiztion
g = sns.catplot(x='t',
y='ic',
col='train_length',
row='test_length',
data=ic[(ic.test_length == 21) & (ic.t < 21)],
kind='box')
t = 1
train_length = 756
test_length = 21
g = sns.catplot(x='rounds',
y='ic',
col='feature_fraction',
hue='bagging_fraction',
row='min_data_in_leaf',
data=ic[(ic.t == t) &
(ic.train_length == train_length) &
(ic.test_length == test_length)],
kind='swarm');
# ### Random Forest vs Linear Regression
# Let's compare the best-performing (in-sample) random forest models to our linear regression baseline:
lr_metrics = pd.read_csv(model_path / 'lin_reg_performance.csv')
lr_metrics.info()
daily_ic_avg.info()
# The results are mixed: for the shortest and longest horizons, the random forest outperforms (slightly for 1 day), while linear regression is competitive for the intermediate horizons:
with sns.axes_style("white"):
ax = (ic.groupby('t').ic.max().to_frame('Random Forest')
.join(lr_metrics.groupby('lookahead').ic.max().to_frame('Linear Regression')).plot.barh())
ax.set_ylabel('Lookahead')
ax.set_xlabel('Information Coefficient')
sns.despine()
plt.tight_layout();
# ## Generate predictions
# To build and evaluate a trading strategy, we create predictions for the 2018-19 period using the 10 best models that we then ensemble:
param_cols = ['train_length', 'test_length', 'bagging_fraction',
'feature_fraction', 'min_data_in_leaf', 'rounds']
def get_params(data, t=5, best=0):
df = data[data.t == t].sort_values('ic', ascending=False).iloc[best]
df = df.loc[param_cols]
rounds = int(df.rounds)
params = pd.to_numeric(df.drop('rounds'))
return params, rounds
# +
base_params = dict(boosting_type='rf',
objective='regression',
bagging_freq=1,
verbose=-1)
store = Path(results_path / 'predictions.h5')
# -
for lookahead in [1, 5, 10, 21]:
if lookahead > 1: continue
print(f'\nLookahead: {lookahead:02}')
data = (pd.read_hdf('data.h5', 'stooq/japan/equities'))
labels = sorted(data.filter(like='fwd').columns)
features = data.columns.difference(labels).tolist()
label = f'fwd_ret_{lookahead:02}'
data = data.loc[:, features + [label]].dropna()
categoricals = ['year', 'weekday', 'month']
for feature in categoricals:
data[feature] = pd.factorize(data[feature], sort=True)[0]
lgb_data = lgb.Dataset(data=data[features],
label=data[label],
categorical_feature=categoricals,
free_raw_data=False)
for position in range(10):
params, num_boost_round = get_params(daily_ic_avg,
t=lookahead,
best=position)
params = params.to_dict()
params['min_data_in_leaf'] = int(params['min_data_in_leaf'])
train_length = int(params.pop('train_length'))
test_length = int(params.pop('test_length'))
params.update(base_params)
print(f'\tPosition: {position:02}')
n_splits = int(2 * YEAR / test_length)
cv = MultipleTimeSeriesCV(n_splits=n_splits,
test_period_length=test_length,
lookahead=lookahead,
train_period_length=train_length)
predictions = []
start = time()
for i, (train_idx, test_idx) in enumerate(cv.split(X=data), 1):
lgb_train = lgb_data.subset(train_idx.tolist()).construct()
model = lgb.train(params=params,
train_set=lgb_train,
num_boost_round=num_boost_round,
verbose_eval=False)
test_set = data.iloc[test_idx, :]
y_test = test_set.loc[:, label].to_frame('y_test')
y_pred = model.predict(test_set.loc[:, model.feature_name()])
predictions.append(y_test.assign(prediction=y_pred))
if position == 0:
test_predictions = (pd.concat(predictions)
.rename(columns={'prediction': position}))
else:
test_predictions[position] = pd.concat(predictions).prediction
by_day = test_predictions.groupby(level='date')
for position in range(10):
if position == 0:
ic_by_day = by_day.apply(lambda x: spearmanr(x.y_test, x[position])[0]).to_frame()
else:
ic_by_day[position] = by_day.apply(lambda x: spearmanr(x.y_test, x[position])[0])
test_predictions.to_hdf(store, f'test/{lookahead:02}')
| 11_decision_trees_random_forests/05_random_forest_return_signals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# we assume that we have the pycnn module in your path.
# we also assume that LD_LIBRARY_PATH includes a pointer to where libcnn_shared.so is.
from pycnn import *
# ## An LSTM/RNN overview:
#
# An (1-layer) RNN can be thought of as a sequence of cells, $h_1,...,h_k$, where $h_i$ indicates the time dimenstion.
#
# Each cell $h_i$ has an input $x_i$ and an output $r_i$. In addition to $x_i$, cell $h_i$ receives as input also $r_{i-1}$.
#
# In a deep (multi-layer) RNN, we don't have a sequence, but a grid. That is we have several layers of sequences:
#
# * $h_1^3,...,h_k^3$
# * $h_1^2,...,h_k^2$
# * $h_1^1,...h_k^1$,
#
# Let $r_i^j$ be the output of cell $h_i^j$. Then:
#
# The input to $h_i^1$ is $x_i$ and $r_{i-1}^1$.
#
# The input to $h_i^2$ is $r_i^1$ and $r_{i-1}^2$,
# and so on.
#
#
#
#
#
#
# ## The LSTM (RNN) Interface
#
# RNN / LSTM / GRU follow the same interface. We have a "builder" which is in charge of creating definining the parameters for the sequence.
model = Model()
NUM_LAYERS=2
INPUT_DIM=50
HIDDEN_DIM=10
builder = LSTMBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# or:
# builder = SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# Note that when we create the builder, it adds the internal RNN parameters to the `model`.
# We do not need to care about them, but they will be optimized together with the rest of the network's parameters.
s0 = builder.initial_state()
x1 = vecInput(INPUT_DIM)
s1=s0.add_input(x1)
y1 = s1.output()
# here, we add x1 to the RNN, and the output we get from the top is y (a HIDEN_DIM-dim vector)
y1.npvalue().shape
s2=s1.add_input(x1) # we can add another input
y2=s2.output()
# If our LSTM/RNN was one layer deep, y2 would be equal to the hidden state. However, since it is 2 layers deep, y2 is only the hidden state (= output) of the last layer.
# If we were to want access to the all the hidden state (the output of both the first and the last layers), we could use the `.h()` method, which returns a list of expressions, one for each layer:
print s2.h()
# The same interface that we saw until now for the LSTM, holds also for the Simple RNN:
# +
# create a simple rnn builder
rnnbuilder=SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# initialize a new graph, and a new sequence
rs0 = rnnbuilder.initial_state()
# add inputs
rs1 = rs0.add_input(x1)
ry1 = rs1.output()
print "all layers:", s1.h()
# -
print s1.s()
# To summarize, when calling `.add_input(x)` on an `RNNState` what happens is that the state creates a new RNN/LSTM column, passing it:
# 1. the state of the current RNN column
# 2. the input `x`
#
# The state is then returned, and we can call it's `output()` method to get the output `y`, which is the output at the top of the column. We can access the outputs of all the layers (not only the last one) using the `.h()` method of the state.
#
# **`.s()`** The internal state of the RNN may be more involved than just the outputs $h$. This is the case for the LSTM, that keeps an extra "memory" cell, that is used when calculating $h$, and which is also passed to the next column. To access the entire hidden state, we use the `.s()` method.
#
# The output of `.s()` differs by the type of RNN being used. For the simple-RNN, it is the same as `.h()`. For the LSTM, it is more involved.
#
# +
rnn_h = rs1.h()
rnn_s = rs1.s()
print "RNN h:", rnn_h
print "RNN s:", rnn_s
lstm_h = s1.h()
lstm_s = s1.s()
print "LSTM h:", lstm_h
print "LSTM s:", lstm_s
# -
# As we can see, the LSTM has two extra state expressions (one for each hidden layer) before the outputs h.
# ## Extra options in the RNN/LSTM interface
#
# **Stack LSTM** The RNN's are shaped as a stack: we can remove the top and continue from the previous state.
# This is done either by remembering the previous state and continuing it with a new `.add_input()`, or using
# we can access the previous state of a given state using the `.prev()` method of state.
#
# **Initializing a new sequence with a given state** When we call `builder.initial_state()`, we are assuming the state has random /0 initialization. If we want, we can specify a list of expressions that will serve as the initial state. The expected format is the same as the results of a call to `.final_s()`. TODO: this is not supported yet.
# +
s2=s1.add_input(x1)
s3=s2.add_input(x1)
s4=s3.add_input(x1)
# let's continue s3 with a new input.
s5=s3.add_input(x1)
# we now have two different sequences:
# s0,s1,s2,s3,s4
# s0,s1,s2,s3,s5
# the two sequences share parameters.
assert(s5.prev() == s3)
assert(s4.prev() == s3)
s6=s3.prev().add_input(x1)
# we now have an additional sequence:
# s0,s1,s2,s6
# -
s6.h()
s6.s()
# ## Aside: memory efficient transduction
# The `RNNState` interface is convenient, and allows for incremental input construction.
# However, sometimes we know the sequence of inputs in advance, and care only about the sequence of
# output expressions. In this case, we can use the `add_inputs(xs)` method, where `xs` is a list of Expression.
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
states = state.add_inputs(xs)
outputs = [s.output() for s in states]
hs = [s.h() for s in states]
print outputs, hs
# This is convenient.
#
# What if we do not care about `.s()` and `.h()`, and do not need to access the previous vectors? In such cases
# we can use the `transduce(xs)` method instead of `add_inputs(xs)`.
# `transduce` takes in a sequence of `Expression`s, and returns a sequence of `Expression`s.
# As a consequence of not returning `RNNState`s, `trnasduce` is much more memory efficient than `add_inputs` or a series of calls to `add_input`.
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
outputs = state.transduce(xs)
print outputs
# ## Charecter-level LSTM
#
# Now that we know the basics of RNNs, let's build a character-level LSTM language-model.
# We have a sequence LSTM that, at each step, gets as input a character, and needs to predict the next character.
# +
import random
from collections import defaultdict
from itertools import count
import sys
LAYERS = 2
INPUT_DIM = 50
HIDDEN_DIM = 50
characters = list("abcdefghijklmnopqrstuvwxyz ")
characters.append("<EOS>")
int2char = list(characters)
char2int = {c:i for i,c in enumerate(characters)}
VOCAB_SIZE = len(characters)
# +
model = Model()
srnn = SimpleRNNBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
lstm = LSTMBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
model.add_lookup_parameters("lookup", (VOCAB_SIZE, INPUT_DIM))
model.add_parameters("R", (VOCAB_SIZE, HIDDEN_DIM))
model.add_parameters("bias", (VOCAB_SIZE))
# return compute loss of RNN for one sentence
def do_one_sentence(rnn, sentence):
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(model["R"])
bias = parameter(model["bias"])
lookup = model["lookup"]
sentence = ["<EOS>"] + list(sentence) + ["<EOS>"]
sentence = [char2int[c] for c in sentence]
s = s0
loss = []
for char,next_char in zip(sentence,sentence[1:]):
s = s.add_input(lookup[char])
probs = softmax(R*s.output() + bias)
loss.append( -log(pick(probs,next_char)) )
loss = esum(loss)
return loss
# generate from model:
def generate(rnn):
def sample(probs):
rnd = random.random()
for i,p in enumerate(probs):
rnd -= p
if rnd <= 0: break
return i
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(model["R"])
bias = parameter(model["bias"])
lookup = model["lookup"]
s = s0.add_input(lookup[char2int["<EOS>"]])
out=[]
while True:
probs = softmax(R*s.output() + bias)
probs = probs.vec_value()
next_char = sample(probs)
out.append(int2char[next_char])
if out[-1] == "<EOS>": break
s = s.add_input(lookup[next_char])
return "".join(out[:-1]) # strip the <EOS>
# train, and generate every 5 samples
def train(rnn, sentence):
trainer = SimpleSGDTrainer(model)
for i in xrange(200):
loss = do_one_sentence(rnn, sentence)
loss_value = loss.value()
loss.backward()
trainer.update()
if i % 5 == 0:
print loss_value,
print generate(rnn)
# -
# Notice that:
# 1. We pass the same rnn-builder to `do_one_sentence` over and over again.
# We must re-use the same rnn-builder, as this is where the shared parameters are kept.
# 2. We `renew_cg()` before each sentence -- because we want to have a new graph (new network) for this sentence.
# The parameters will be shared through the model and the shared rnn-builder.
sentence = "a quick brown fox jumped over the lazy dog"
train(srnn, sentence)
sentence = "a quick brown fox jumped over the lazy dog"
train(lstm, sentence)
# The model seem to learn the sentence quite well.
#
# Somewhat surprisingly, the Simple-RNN model learn quicker than the LSTM!
#
# How can that be?
#
# The answer is that we are cheating a bit. The sentence we are trying to learn
# has each letter-bigram exactly once. This means a simple trigram model can memorize
# it very well.
#
# Try it out with more complex sequences.
#
#
train(srnn, "these pretzels are making me thirsty")
| cnn/pyexamples/tutorials/.ipynb_checkpoints/RNNs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import os
import numpy as np
import json
from PIL import Image
from numpy import unravel_index
datapath = '../CS148/RedLights2011_Medium/'
rl_file = 'RL-269.jpg'
rl_img = np.array(Image.open(datapath+rl_file))
init_redlightkernel = rl_img[190:200, 310:320, :]
plt.imshow(rl_img)
plt.show()
init_redlightkernel = rl_img[172:180, 325:333, :]
plt.imshow(init_redlightkernel)
plt.show()
# +
datapath = '../CS148/RedLights2011_Medium/'
rl_file = 'RL-334.jpg'
rl_img = np.array(Image.open(datapath+rl_file))
plt.imshow(rl_img)
plt.show()
init_redlightkernel = rl_img[217:237, 307:327, :]
plt.imshow(init_redlightkernel)
plt.show()
# -
datapath = '../CS148/RedLights2011_Medium/'
rl_file = 'RL-012.jpg'
rl_img = np.array(Image.open(datapath+rl_file))
init_redlightkernel = rl_img[38:54, 301:316, :]
plt.imshow(init_redlightkernel)
plt.show()
red_init_redlightkernel = init_redlightkernel[:,:,0]
# +
red_init_redlightkernel = (red_init_redlightkernel - np.mean(red_init_redlightkernel))/np.std(red_init_redlightkernel)
# -
cross = np.multiply(red_init_redlightkernel, red_init_redlightkernel)
np.max(cross.flatten())
np.sum(red_init_redlightkernel*red_init_redlightkernel)
red_init_redlightkernel = red_init_redlightkernel - np.mean(red_init_redlightkernel)
red_init_redlightkernel = (red_init_redlightkernel-np.min(red_init_redlightkernel))/(np.max(red_init_redlightkernel)-np.min(red_init_redlightkernel))
cross = np.multiply(red_init_redlightkernel, red_init_redlightkernel)
np.max(cross.flatten())
(cross+cross+cross)/3
file_names_train = np.load('../data/hw02_splits/file_names_train.npy')
gts_path = '../'
with open(os.path.join(gts_path, 'formatted_annotations_students.json'),'r') as f:
gts = json.load(f)
gts_train = {}
gts_test = {}
gts_keys = list(gts.keys())
for key in gts_keys:
if key in file_names_train:
gts_train[key] = gts[key]
else:
gts_test[key] = gts[key]
# +
gts_path = '../data/hw02_annotations'
with open(os.path.join(gts_path, 'annotations_train.json'),'w') as f:
json.dump(gts_train,f)
with open(os.path.join(gts_path, 'annotations_test.json'),'w') as f:
json.dump(gts_test,f)
# +
import os
import json
import numpy as np
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
A_x1 = box_1[0]
A_x2 = box_1[1]
A_y1 = box_1[2]
A_y2 = box_1[3]
B_x1 = box_2[0]
B_x2 = box_2[1]
B_y1 = box_2[2]
B_y2 = box_2[3]
inter_x1 = max(A_x1, B_x1)
inter_x2 = min(A_x2, B_x2)
inter_y1 = min(A_y1, B_y1)
inter_y2 = max(A_y2, B_y2)
intersect_area = max(0, inter_y2 - inter_y1) * max(0, inter_x2 - inter_x1)
box1_area = (A_y2-A_y1) * (A_x2-A_x1)
box2_area = (B_y2-B_y1) * (B_x2-B_x1)
print('box1_area', box1_area)
print('box2_area', box2_area)
print("intersect_area", intersect_area)
print("denom = ", box1_area+box2_area-intersect_area)
iou = intersect_area/(box1_area+box2_area-intersect_area)
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_center_dist(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
A_x1 = box_1[0]
A_x2 = box_1[1]
A_y1 = box_1[2]
A_y2 = box_1[3]
B_x1 = box_2[0]
B_x2 = box_2[1]
B_y1 = box_2[2]
B_y2 = box_2[3]
A_center_x = int((A_x2-A_x1)/2)
A_center_y = int((A_y2-A_y1)/2)
B_center_x = int((B_x2-B_x1)/2)
B_center_y = int((B_y2-B_y1)/2)
return np.sqrt((B_center_y-A_center_y)**2 + (B_center_x-A_center_x)**2)
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
'''
BEGIN YOUR CODE
'''
skip_idx = []
# print(preds)
for pred_file in preds.keys():
gt = gts[pred_file]
pred = preds[pred_file]
N = len(gt)
M = len(pred)
correct_detections = 0
for i in range(len(gt)):
max_iou = 0
max_iou_idx = -1
for j in range(len(pred)):
if j in skip_idx:
continue
iou = compute_iou(pred[j][:4], gt[i])
print("iou = ", iou)
if iou > max_iou and iou >= iou_thr:
max_iou = iou
max_iou_idx = j
if max_iou > 0:
skip_idx.append(max_iou_idx)
correct_detections += 1
else:
min_dist = 10000
min_dist_idx = -1
for j in range(len(pred)):
if j in skip_idx:
continue
d = compute_center_dist(pred[j][:4], gt[i])
if d <= min(5, min_dist) and pred[j][4] >= conf_thr:
min_dist = d
min_dist_idx = j
if min_dist != 10000:
skip_idx.append(min_dist_idx)
correct_detections += 1
false_pos = M-correct_detections
false_neg = N-correct_detections
TP += correct_detections
FP += false_pos
FN += false_neg
'''
END YOUR CODE
'''
return TP, FP, FN
# +
# set a path for predictions and annotations:
preds_path = '../data/hw02_preds'
gts_path = '../data/hw02_annotations'
# load splits:
split_path = '../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
'''
Load training data.
'''
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
# -
test = None
for key in preds_train:
test = np.array(preds_train[key])
break
iou_threshold = 0.5
confidence_thrs = []
for fname in preds_train:
confidence_thrs.extend(np.array(preds_train[fname])[:,4])
# np.sort(np.array([np.array(preds_train[fname])[4] for fname in preds_train],dtype=float)) # using (ascending) list of confidence scores as threshold
confidence_thrs = np.sort(confidence_thrs)
confidence_thrs_downsampled = []
for i in range(30, len(confidence_thrs), 30):
confidence_thrs_downsampled.append(confidence_thrs[i])
# +
tp_train = np.zeros(len(confidence_thrs))
fp_train = np.zeros(len(confidence_thrs))
fn_train = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp_train[i], fp_train[i], fn_train[i] = compute_counts(preds_train, gts_train, iou_thr=0.5, conf_thr=conf_thr)
# Plot training set PR curves
# -
tp_train
| .ipynb_checkpoints/DataExploration_visualization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import pip
pip.main(['install', 'xlrd'])
input_file = "input/input.xlsx"
df = pd.read_excel(input_file)
likes = df[df.TypeAction == "Like"]
likes.head()
# -
print(likes.User_name.unique())
print(likes.CommentsForItem_message.unique())
print(likes.Sentiment.unique())
# +
# let's drop the commentsForItem and Sentiment columns, as they are empty for the likes...
likes.drop(['CommentsForItem_message', 'Sentiment'], 1,inplace=True)
# also drop the columns with no information
likes.drop(['TypeAction', 'Action_id'], 1,inplace=True)
likes.head()
# -
likes.describe()
| Likes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python tensor
# language: python
# name: myenv
# ---
import numpy as np
import pandas as pd
import tensorflow as tf
from data_process import build_vocab_morphs, batch_iter, sentence_to_onehot_morphs, sentence_to_index_morphs
from models import logistic_regression, LSTM, CNN
train = pd.read_csv('./data/train-5T.txt', delimiter='\t')
test = pd.read_csv('./data/test-1T.txt', delimiter='\t')
X_train = train.document
Y_train = train.label
X_test = test.document
Y_test = test.label
max_vocab = 50000
vocab, _, vocab_size = build_vocab_morphs(X_train, max_vocab)
# # Sentiment Analysis with logistic regression using morphs
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
# +
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
model = logistic_regression(sess=sess, vocab_size=vocab_size, lr=1e-1)
train_acc = []
avgLoss = []
x_test = sentence_to_onehot_morphs(X_test, vocab)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_onehot_morphs(x_train, vocab)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
# -
# # Sentiment Analysis with LSTM using morphs
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
# +
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
model = LSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)
train_acc = []
avgLoss = []
x_test = sentence_to_index_morphs(X_test, vocab)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_index_morphs(x_train, vocab)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
# -
# # Sentiment Analysis with CNN using morphs
batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)
# +
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.reset_default_graph()
sess = tf.Session(config=config)
max_length = 30
model = CNN(sess=sess, vocab_size=vocab_size, sequence_length=max_length, lr=1e-2)
train_acc = []
avgLoss = []
x_test = sentence_to_index_morphs(X_test, vocab, max_length)
for step, batch in enumerate(batches):
x_train, y_train = zip(*batch)
x_train = sentence_to_index_morphs(x_train, vocab, max_length)
acc = model.get_accuracy(x_train, y_train)
l, _ = model.train(x_train, y_train)
train_acc.append(acc)
avgLoss.append(l)
if step % 100 == 0:
test_loss = model.get_loss(x_test, Y_test)
print('batch:', '%04d' % step, '\ntrain loss:', '%.5f' % np.mean(avgLoss), '\ttest loss:', '%.5f' % test_loss)
test_acc = model.get_accuracy(x_test, Y_test)
print('train accuracy:', '%.3f' % np.mean(train_acc), '\ttest accuracy:', '%.3f' % test_acc, '\n')
avgLoss = []
train_acc = []
| 01-sentiment_analysis/08-Morphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Adagrad
# :label:`sec_adagrad`
#
# Let us begin by considering learning problems with features that occur infrequently.
#
# ## Sparse Features and Learning Rates
#
# Imagine that we are training a language model. To get good accuracy we typically want to decrease the learning rate as we keep on training, usually at a rate of $\mathcal{O}(t^{-\frac{1}{2}})$ or slower. Now consider a model training on sparse features, i.e., features that occur only infrequently. This is common for natural language, e.g., it is a lot less likely that we will see the word *preconditioning* than *learning*. However, it is also common in other areas such as computational advertising and personalized collaborative filtering. After all, there are many things that are of interest only for a small number of people.
#
# Parameters associated with infrequent features only receive meaningful updates whenever these features occur. Given a decreasing learning rate we might end up in a situation where the parameters for common features converge rather quickly to their optimal values, whereas for infrequent features we are still short of observing them sufficiently frequently before their optimal values can be determined. In other words, the learning rate either decreases too slowly for frequent features or too quickly for infrequent ones.
#
# A possible hack to redress this issue would be to count the number of times we see a particular feature and to use this as a clock for adjusting learning rates. That is, rather than choosing a learning rate of the form $\eta = \frac{\eta_0}{\sqrt{t + c}}$ we could use $\eta_i = \frac{\eta_0}{\sqrt{s(i, t) + c}}$. Here $s(i, t)$ counts the number of nonzeros for feature $i$ that we have observed up to time $t$. This is actually quite easy to implement at no meaningful overhead. However, it fails whenever we do not quite have sparsity but rather just data where the gradients are often very small and only rarely large. After all, it is unclear where one would draw the line between something that qualifies as an observed feature or not.
#
# Adagrad by :cite:`Duchi.Hazan.Singer.2011` addresses this by replacing the rather crude counter $s(i, t)$ by an aggregate of the squares of previously observed gradients. In particular, it uses $s(i, t+1) = s(i, t) + \left(\partial_i f(\mathbf{x})\right)^2$ as a means to adjust the learning rate. This has two benefits: first, we no longer need to decide just when a gradient is large enough. Second, it scales automatically with the magnitude of the gradients. Coordinates that routinely correspond to large gradients are scaled down significantly, whereas others with small gradients receive a much more gentle treatment. In practice this leads to a very effective optimization procedure for computational advertising and related problems. But this hides some of the additional benefits inherent in Adagrad that are best understood in the context of preconditioning.
#
#
# ## Preconditioning
#
# Convex optimization problems are good for analyzing the characteristics of algorithms. After all, for most nonconvex problems it is difficult to derive meaningful theoretical guarantees, but *intuition* and *insight* often carry over. Let us look at the problem of minimizing $f(\mathbf{x}) = \frac{1}{2} \mathbf{x}^\top \mathbf{Q} \mathbf{x} + \mathbf{c}^\top \mathbf{x} + b$.
#
# As we saw in :numref:`sec_momentum`, it is possible to rewrite this problem in terms of its eigendecomposition $\mathbf{Q} = \mathbf{U}^\top \boldsymbol{\Lambda} \mathbf{U}$ to arrive at a much simplified problem where each coordinate can be solved individually:
#
# $$f(\mathbf{x}) = \bar{f}(\bar{\mathbf{x}}) = \frac{1}{2} \bar{\mathbf{x}}^\top \boldsymbol{\Lambda} \bar{\mathbf{x}} + \bar{\mathbf{c}}^\top \bar{\mathbf{x}} + b.$$
#
# Here we used $\mathbf{x} = \mathbf{U} \mathbf{x}$ and consequently $\mathbf{c} = \mathbf{U} \mathbf{c}$. The modified problem has as its minimizer $\bar{\mathbf{x}} = -\boldsymbol{\Lambda}^{-1} \bar{\mathbf{c}}$ and minimum value $-\frac{1}{2} \bar{\mathbf{c}}^\top \boldsymbol{\Lambda}^{-1} \bar{\mathbf{c}} + b$. This is much easier to compute since $\boldsymbol{\Lambda}$ is a diagonal matrix containing the eigenvalues of $\mathbf{Q}$.
#
# If we perturb $\mathbf{c}$ slightly we would hope to find only slight changes in the minimizer of $f$. Unfortunately this is not the case. While slight changes in $\mathbf{c}$ lead to equally slight changes in $\bar{\mathbf{c}}$, this is not the case for the minimizer of $f$ (and of $\bar{f}$ respectively). Whenever the eigenvalues $\boldsymbol{\Lambda}_i$ are large we will see only small changes in $\bar{x}_i$ and in the minimum of $\bar{f}$. Conversely, for small $\boldsymbol{\Lambda}_i$ changes in $\bar{x}_i$ can be dramatic. The ratio between the largest and the smallest eigenvalue is called the condition number of an optimization problem.
#
# $$\kappa = \frac{\boldsymbol{\Lambda}_1}{\boldsymbol{\Lambda}_d}.$$
#
# If the condition number $\kappa$ is large, it is difficult to solve the optimization problem accurately. We need to ensure that we are careful in getting a large dynamic range of values right. Our analysis leads to an obvious, albeit somewhat naive question: couldn't we simply "fix" the problem by distorting the space such that all eigenvalues are $1$. In theory this is quite easy: we only need the eigenvalues and eigenvectors of $\mathbf{Q}$ to rescale the problem from $\mathbf{x}$ to one in $\mathbf{z} := \boldsymbol{\Lambda}^{\frac{1}{2}} \mathbf{U} \mathbf{x}$. In the new coordinate system $\mathbf{x}^\top \mathbf{Q} \mathbf{x}$ could be simplified to $\|\mathbf{z}\|^2$. Alas, this is a rather impractical suggestion. Computing eigenvalues and eigenvectors is in general *much more* expensive than solving the actual problem.
#
# While computing eigenvalues exactly might be expensive, guessing them and computing them even somewhat approximately may already be a lot better than not doing anything at all. In particular, we could use the diagonal entries of $\mathbf{Q}$ and rescale it accordingly. This is *much* cheaper than computing eigenvalues.
#
# $$\tilde{\mathbf{Q}} = \mathrm{diag}^{-\frac{1}{2}}(\mathbf{Q}) \mathbf{Q} \mathrm{diag}^{-\frac{1}{2}}(\mathbf{Q}).$$
#
# In this case we have $\tilde{\mathbf{Q}}_{ij} = \mathbf{Q}_{ij} / \sqrt{\mathbf{Q}_{ii} \mathbf{Q}_{jj}}$ and specifically $\tilde{\mathbf{Q}}_{ii} = 1$ for all $i$. In most cases this simplifies the condition number considerably. For instance, the cases we discussed previously, this would entirely eliminate the problem at hand since the problem is axis aligned.
#
# Unfortunately we face yet another problem: in deep learning we typically do not even have access to the second derivative of the objective function: for $\mathbf{x} \in \mathbb{R}^d$ the second derivative even on a minibatch may require $\mathcal{O}(d^2)$ space and work to compute, thus making it practically infeasible. The ingenious idea of Adagrad is to use a proxy for that elusive diagonal of the Hessian that is both relatively cheap to compute and effective---the magnitude of the gradient itself.
#
# In order to see why this works, let us look at $\bar{f}(\bar{\mathbf{x}})$. We have that
#
# $$\partial_{\bar{\mathbf{x}}} \bar{f}(\bar{\mathbf{x}}) = \boldsymbol{\Lambda} \bar{\mathbf{x}} + \bar{\mathbf{c}} = \boldsymbol{\Lambda} \left(\bar{\mathbf{x}} - \bar{\mathbf{x}}_0\right),$$
#
# where $\bar{\mathbf{x}}_0$ is the minimizer of $\bar{f}$. Hence the magnitude of the gradient depends both on $\boldsymbol{\Lambda}$ and the distance from optimality. If $\bar{\mathbf{x}} - \bar{\mathbf{x}}_0$ didn't change, this would be all that's needed. After all, in this case the magnitude of the gradient $\partial_{\bar{\mathbf{x}}} \bar{f}(\bar{\mathbf{x}})$ suffices. Since AdaGrad is a stochastic gradient descent algorithm, we will see gradients with nonzero variance even at optimality. As a result we can safely use the variance of the gradients as a cheap proxy for the scale of the Hessian. A thorough analysis is beyond the scope of this section (it would be several pages). We refer the reader to :cite:`Duchi.Hazan.Singer.2011` for details.
#
# ## The Algorithm
#
# Let us formalize the discussion from above. We use the variable $\mathbf{s}_t$ to accumulate past gradient variance as follows.
#
# $$\begin{aligned}
# \mathbf{g}_t & = \partial_{\mathbf{w}} l(y_t, f(\mathbf{x}_t, \mathbf{w})), \\
# \mathbf{s}_t & = \mathbf{s}_{t-1} + \mathbf{g}_t^2, \\
# \mathbf{w}_t & = \mathbf{w}_{t-1} - \frac{\eta}{\sqrt{\mathbf{s}_t + \epsilon}} \cdot \mathbf{g}_t.
# \end{aligned}$$
#
# Here the operation are applied coordinate wise. That is, $\mathbf{v}^2$ has entries $v_i^2$. Likewise $\frac{1}{\sqrt{v}}$ has entries $\frac{1}{\sqrt{v_i}}$ and $\mathbf{u} \cdot \mathbf{v}$ has entries $u_i v_i$. As before $\eta$ is the learning rate and $\epsilon$ is an additive constant that ensures that we do not divide by $0$. Last, we initialize $\mathbf{s}_0 = \mathbf{0}$.
#
# Just like in the case of momentum we need to keep track of an auxiliary variable, in this case to allow for an individual learning rate per coordinate. This does not increase the cost of Adagrad significantly relative to SGD, simply since the main cost is typically to compute $l(y_t, f(\mathbf{x}_t, \mathbf{w}))$ and its derivative.
#
# Note that accumulating squared gradients in $\mathbf{s}_t$ means that $\mathbf{s}_t$ grows essentially at linear rate (somewhat slower than linearly in practice, since the gradients initially diminish). This leads to an $\mathcal{O}(t^{-\frac{1}{2}})$ learning rate, albeit adjusted on a per coordinate basis. For convex problems this is perfectly adequate. In deep learning, though, we might want to decrease the learning rate rather more slowly. This led to a number of Adagrad variants that we will discuss in the subsequent chapters. For now let us see how it behaves in a quadratic convex problem. We use the same problem as before:
#
# $$f(\mathbf{x}) = 0.1 x_1^2 + 2 x_2^2.$$
#
# We are going to implement Adagrad using the same learning rate previously, i.e., $\eta = 0.4$. As we can see, the iterative trajectory of the independent variable is smoother. However, due to the cumulative effect of $\boldsymbol{s}_t$, the learning rate continuously decays, so the independent variable does not move as much during later stages of iteration.
#
# + origin_pos=3 tab=["tensorflow"]
# %matplotlib inline
from d2l import tensorflow as d2l
import math
import tensorflow as tf
# + origin_pos=4 tab=["tensorflow"]
def adagrad_2d(x1, x2, s1, s2):
eps = 1e-6
g1, g2 = 0.2 * x1, 4 * x2
s1 += g1 ** 2
s2 += g2 ** 2
x1 -= eta / math.sqrt(s1 + eps) * g1
x2 -= eta / math.sqrt(s2 + eps) * g2
return x1, x2, s1, s2
def f_2d(x1, x2):
return 0.1 * x1 ** 2 + 2 * x2 ** 2
eta = 0.4
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
# + [markdown] origin_pos=5
# As we increase the learning rate to $2$ we see much better behavior. This already indicates that the decrease in learning rate might be rather aggressive, even in the noise-free case and we need to ensure that parameters converge appropriately.
#
# + origin_pos=6 tab=["tensorflow"]
eta = 2
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
# + [markdown] origin_pos=7
# ## Implementation from Scratch
#
# Just like the momentum method, Adagrad needs to maintain a state variable of the same shape as the parameters.
#
# + origin_pos=10 tab=["tensorflow"]
def init_adagrad_states(feature_dim):
s_w = tf.Variable(tf.zeros((feature_dim, 1)))
s_b = tf.Variable(tf.zeros(1))
return (s_w, s_b)
def adagrad(params, grads, states, hyperparams):
eps = 1e-6
for p, s, g in zip(params, states, grads):
s[:].assign(s + tf.math.square(g))
p[:].assign(p - hyperparams['lr'] * g / tf.math.sqrt(s + eps))
# + [markdown] origin_pos=11
# Compared to the experiment in :numref:`sec_minibatch_sgd` we use a
# larger learning rate to train the model.
#
# + origin_pos=12 tab=["tensorflow"]
data_iter, feature_dim = d2l.get_data_ch11(batch_size=10)
d2l.train_ch11(adagrad, init_adagrad_states(feature_dim),
{'lr': 0.1}, data_iter, feature_dim);
# + [markdown] origin_pos=13
# ## Concise Implementation
#
# Using the `Trainer` instance of the algorithm `adagrad`, we can invoke the Adagrad algorithm in Gluon.
#
# + origin_pos=16 tab=["tensorflow"]
trainer = tf.keras.optimizers.Adagrad
d2l.train_concise_ch11(trainer, {'learning_rate' : 0.1}, data_iter)
# + [markdown] origin_pos=17
# ## Summary
#
# * Adagrad decreases the learning rate dynamically on a per-coordinate basis.
# * It uses the magnitude of the gradient as a means of adjusting how quickly progress is achieved - coordinates with large gradients are compensated with a smaller learning rate.
# * Computing the exact second derivative is typically infeasible in deep learning problems due to memory and computational constraints. The gradient can be a useful proxy.
# * If the optimization problem has a rather uneven structure Adagrad can help mitigate the distortion.
# * Adagrad is particularly effective for sparse features where the learning rate needs to decrease more slowly for infrequently occurring terms.
# * On deep learning problems Adagrad can sometimes be too aggressive in reducing learning rates. We will discuss strategies for mitigating this in the context of :numref:`sec_adam`.
#
# ## Exercises
#
# 1. Prove that for an orthogonal matrix $\mathbf{U}$ and a vector $\mathbf{c}$ the following holds: $\|\mathbf{c} - \mathbf{\delta}\|_2 = \|\mathbf{U} \mathbf{c} - \mathbf{U} \mathbf{\delta}\|_2$. Why does this mean that the magnitude of perturbations does not change after an orthogonal change of variables?
# 1. Try out Adagrad for $f(\mathbf{x}) = 0.1 x_1^2 + 2 x_2^2$ and also for the objective function was rotated by 45 degrees, i.e., $f(\mathbf{x}) = 0.1 (x_1 + x_2)^2 + 2 (x_1 - x_2)^2$. Does it behave differently?
# 1. Prove [Gerschgorin's circle theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem) which states that eigenvalues $\lambda_i$ of a matrix $\mathbf{M}$ satisfy $|\lambda_i - \mathbf{M}_{jj}| \leq \sum_{k \neq j} |\mathbf{M}_{jk}|$ for at least one choice of $j$.
# 1. What does Gerschgorin's theorem tell us about the eigenvalues of the diagonally preconditioned matrix $\mathrm{diag}^{-\frac{1}{2}}(\mathbf{M}) \mathbf{M} \mathrm{diag}^{-\frac{1}{2}}(\mathbf{M})$?
# 1. Try out Adagrad for a proper deep network, such as :numref:`sec_lenet` when applied to Fashion MNIST.
# 1. How would you need to modify Adagrad to achieve a less aggressive decay in learning rate?
#
# + [markdown] origin_pos=20 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/1073)
#
| d2l-en/tensorflow/chapter_optimization/adagrad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# # Pandas Built-in Data Visualization
#
# In this lecture we will learn about pandas built-in capabilities for data visualization! It's built-off of matplotlib, but it baked into pandas for easier usage!
#
# Let's take a look!
# ## Imports
import numpy as np
import pandas as pd
# %matplotlib inline
# per mostrare i grafici in maniera un pò più decente
import seaborn as sns
sns.set()
# ## The Data
#
# There are some fake data csv files you can read in as dataframes:
df1 = pd.read_csv('df1',index_col=0)
df2 = pd.read_csv('df2')
df1.head()
df2.head()
# ## Style Sheets
#
# Matplotlib has [style sheets](http://matplotlib.org/gallery.html#style_sheets) you can use to make your plots look a little nicer. These style sheets include plot_bmh,plot_fivethirtyeight,plot_ggplot and more. They basically create a set of style rules that your plots follow. I recommend using them, they make all your plots have the same look and feel more professional. You can even create your own if you want your company's plots to all have the same look (it is a bit tedious to create on though).
#
# Here is how to use them.
#
# **Before plt.style.use() your plots look like this:**
#
#
# #### In generale meglio usare:
#
# `df1['A'].plot.hist()` (come visto sotto)
# l'istogramma mostra quante volte nella seire df1['A'] compare ogni bin
# es. il bin che approssimativamente contiene i numeri da 0.1 a 0.2 compare circa 115 volte
# i.e., nella colonna ci sono 115 valori compresi tra 0.1 e 0.2
df1['A'].hist(bins=30)
# Call the style:
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Now your plots look like this:
df1['A'].hist()
plt.style.use('bmh')
df1['A'].hist()
plt.style.use('dark_background')
df1['A'].hist()
plt.style.use('fivethirtyeight')
df1['A'].hist()
plt.style.use('ggplot')
# Let's stick with the ggplot style and actually show you how to utilize pandas built-in plotting capabilities!
# # Plot Types
#
# There are several plot types built-in to pandas, most of them statistical plots by nature:
#
# * df.plot.area
# * df.plot.barh
# * df.plot.density
# * df.plot.hist
# * df.plot.line
# * df.plot.scatter
# * df.plot.bar
# * df.plot.box
# * df.plot.hexbin
# * df.plot.kde
# * df.plot.pie
#
# You can also just call df.plot(kind='hist') or replace that kind argument with any of the key terms shown in the list above (e.g. 'box','barh', etc..)
# ___
# Let's start going through them!
#
# ## Area
df2.plot.area(alpha=0.4)
# ## Barplots
df2.head()
# Funziona se si ha un index categorico che quindi non aumenta all'infinito ma ha certi valori
# in questo caso semplicemente ci sono solo 9 righe del df
df2.plot.bar(figsize=(10,10))
df2.plot.bar(stacked=True)
# ## Histograms
df1['A'].plot.hist(bins=50)
# ## Line Plots
df1.plot.line(x=df1.index,y='B',figsize=(12,3),lw=1)
# ## Scatter Plots
df1.plot.scatter(x='A',y='B')
# You can use c to color based off another column value
# Use cmap to indicate colormap to use.
# For all the colormaps, check out: http://matplotlib.org/users/colormaps.html
df1.plot.scatter(x='A',y='B',c='C')
df1.plot.scatter(x='A',y='B',c='C',cmap='coolwarm')
# Or use s to indicate size based off another column. s parameter needs to be an array, not just the name of a column:
df1.plot.scatter(x='A',y='B',s=df1['C']*100)
# ## BoxPlots
df2.plot.box(figsize=(10,10)) # Can also pass a by= argument for groupby
# ## Hexagonal Bin Plot
#
# Useful for Bivariate Data, alternative to scatterplot:
df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
df.plot.hexbin(x='a',y='b',gridsize=25,cmap='Oranges')
# più punti sono in un esegono più quello è scuro
# ____
# ## Kernel Density Estimation plot (KDE)
df2['a'].plot.kde()
df2.plot.density()
# That's it! Hopefully you can see why this method of plotting will be a lot easier to use than full-on matplotlib, it balances ease of use with control over the figure. A lot of the plot calls also accept additional arguments of their parent matplotlib plt. call.
#
#
# # Great Job!
| 04-Visualization-Matplotlib-Pandas/04-02-Pandas Visualization/Pandas Built-in Data Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Integrated gradients for text classification on the IMDB dataset
# In this example, we apply the integrated gradients method to a sentiment analysis model trained on the IMDB dataset. In text classification models, integrated gradients define an attribution value for each word in the input sentence. The attributions are calculated considering the integral of the model gradients with respect to the word embedding layer along a straight path from a baseline instance $x^\prime$ to the input instance $x.$ A description of the method can be found [here](https://docs.seldon.io/projects/alibi/en/latest/methods/IntegratedGradients.html). Integrated gradients was originally proposed in Sundararajan et al., ["Axiomatic Attribution for Deep Networks"](https://arxiv.org/abs/1703.01365)
#
# The IMDB data set contains 50K movie reviews labelled as positive or negative.
# We train a convolutional neural network classifier with a single 1-d convolutional layer followed by a fully connected layer. The reviews in the dataset are truncated at 100 words and each word is represented by 50-dimesional word embedding vector. We calculate attributions for the elements of the embedding layer.
import tensorflow as tf
import numpy as np
import os
import pandas as pd
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, Conv1D, GlobalMaxPooling1D, Dropout
from tensorflow.keras.utils import to_categorical
from alibi.explainers import IntegratedGradients
import matplotlib.pyplot as plt
print('TF version: ', tf.__version__)
print('Eager execution enabled: ', tf.executing_eagerly()) # True
# ## Load data
# Loading the imdb dataset.
max_features = 10000
maxlen = 100
# +
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
test_labels = y_test.copy()
train_labels = y_train.copy()
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
index = imdb.get_word_index()
reverse_index = {value: key for (key, value) in index.items()}
# -
# A sample review from the test set. Note that unknown words are replaced with 'UNK'
def decode_sentence(x, reverse_index):
# the `-3` offset is due to the special tokens used by keras
# see https://stackoverflow.com/questions/42821330/restore-original-text-from-keras-s-imdb-dataset
return " ".join([reverse_index.get(i - 3, 'UNK') for i in x])
print(decode_sentence(x_test[1], reverse_index))
# ## Train Model
# The model includes one convolutional layer and reaches a test accuracy of 0.85. If `save_model = True`, a local folder `../model_imdb` will be created and the trained model will be saved in that folder. If the model was previously saved, it can be loaded by setting `load_model = True`.
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
load_model = False
save_model = True
filepath = './model_imdb/' # change to directory where model is downloaded
if load_model:
model = tf.keras.models.load_model(os.path.join(filepath, 'model.h5'))
else:
print('Build model...')
inputs = Input(shape=(maxlen,), dtype=tf.int32)
embedded_sequences = Embedding(max_features,
embedding_dims)(inputs)
out = Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1)(embedded_sequences)
out = Dropout(0.4)(out)
out = GlobalMaxPooling1D()(out)
out = Dense(hidden_dims,
activation='relu')(out)
out = Dropout(0.4)(out)
outputs = Dense(2, activation='softmax')(out)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=256,
epochs=3,
validation_data=(x_test, y_test))
if save_model:
if not os.path.exists(filepath):
os.makedirs(filepath)
model.save(os.path.join(filepath, 'model.h5'))
# ## Calculate integrated gradients
# The integrated gradients attributions are calculated with respect to the embedding layer for 10 samples from the test set. Since the model uses a word to vector embedding with vector dimensionality of 50 and sequence length of 100 words, the dimensionality of the attributions is (10, 100, 50). In order to obtain a single attribution value for each word, we sum all the attribution values for the 50 elements of each word's vector representation.
#
# The default baseline is used in this example which is internally defined as a sequence of zeros. In this case, this corresponds to a sequence of padding characters (**NB:** in general the numerical value corresponding to a "non-informative" baseline such as the PAD token will depend on the tokenizer used, make sure that the numerical value of the baseline used corresponds to your desired token value to avoid surprises). The path integral is defined as a straight line from the baseline to the input image. The path is approximated by choosing 50 discrete steps according to the Gauss-Legendre method.
layer = model.layers[1]
layer
n_steps = 50
method = "gausslegendre"
internal_batch_size = 100
nb_samples = 10
ig = IntegratedGradients(model,
layer=layer,
n_steps=n_steps,
method=method,
internal_batch_size=internal_batch_size)
x_test_sample = x_test[:nb_samples]
predictions = model(x_test_sample).numpy().argmax(axis=1)
explanation = ig.explain(x_test_sample,
baselines=None,
target=predictions,
attribute_to_layer_inputs=False)
# Metadata from the explanation object
explanation.meta
# Data fields from the explanation object
explanation.data.keys()
# Get attributions values from the explanation object
attrs = explanation.attributions[0]
print('Attributions shape:', attrs.shape)
# ## Sum attributions
attrs = attrs.sum(axis=2)
print('Attributions shape:', attrs.shape)
# ## Visualize attributions
i = 1
x_i = x_test_sample[i]
attrs_i = attrs[i]
pred = predictions[i]
pred_dict = {1: 'Positive review', 0: 'Negative review'}
print('Predicted label = {}: {}'.format(pred, pred_dict[pred]))
# We can visualize the attributions for the text instance by mapping the values of the attributions onto a matplotlib colormap. Below we define some utility functions for doing this.
from IPython.display import HTML
def hlstr(string, color='white'):
"""
Return HTML markup highlighting text with the desired color.
"""
return f"<mark style=background-color:{color}>{string} </mark>"
def colorize(attrs, cmap='PiYG'):
"""
Compute hex colors based on the attributions for a single instance.
Uses a diverging colorscale by default and normalizes and scales
the colormap so that colors are consistent with the attributions.
"""
import matplotlib as mpl
cmap_bound = np.abs(attrs).max()
norm = mpl.colors.Normalize(vmin=-cmap_bound, vmax=cmap_bound)
cmap = mpl.cm.get_cmap(cmap)
# now compute hex values of colors
colors = list(map(lambda x: mpl.colors.rgb2hex(cmap(norm(x))), attrs))
return colors
# Below we visualize the attribution values (highlighted in the text) having the highest positive attributions. Words with high positive attribution are highlighted in shades of green and words with negative attribution in shades of pink. Stronger shading corresponds to higher attribution values. Positive attributions can be interpreted as increase in probability of the predicted class ("Positive sentiment") while negative attributions correspond to decrease in probability of the predicted class.
words = decode_sentence(x_i, reverse_index).split()
colors = colorize(attrs_i)
HTML("".join(list(map(hlstr, words, colors))))
| examples/integrated_gradients_imdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("flight.csv")
df.head()
from vega.widget import VegaWidget
from ipytablewidgets.progressivis_adapter import ProgressivisAdapter
from progressivis.table import Table
table = Table(name='vega_table', data=df)
source = ProgressivisAdapter(table)
spec_no_data = {
"$schema": "https://vega.github.io/schema/vega-lite/v5.json",
"data": {"name": "data"},
"encoding": {"y": {"field": "OP_UNIQUE_CARRIER", "type": "ordinal", "title": "Company"}},
"layer": [
{
"mark": {"type": "point", "filled": True},
"encoding": {
"x": {
"aggregate": "mean",
"field": "ARR_DELAY",
"type": "quantitative",
"scale": {"zero": False},
"title": "Delay"
},
"color": {"value": "black"}
}
},
{
"mark": {"type": "errorbar", "extent": "ci"},
"encoding": {
"x": {"field": "ARR_DELAY", "type": "quantitative", "title": "Delay"}
}
}
]
}
widget = VegaWidget(spec=spec_no_data)
display(widget)
# %time widget.update('data', insert=source)
widget = VegaWidget(spec=spec_no_data)
widget.compression = 'lz4'
display(widget)
# %time widget.update('data', insert=source)
| notebooks/VegaProgressivis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Physionet 2017 | ECG Rhythm Classification
# ## 4. Train Model
# ### <NAME>, Ph.D.
# # Setup Noteboook
# +
# Import 3rd party libraries
import os
import sys
import numpy as np
import pickle
# Deep learning libraries
import tensorflow as tf
# Import local Libraries
sys.path.insert(0, r'C:\Users\sebastian goodfellow\Documents\code\deeP_ecg')
from deepecg.training.utils.plotting.time_series import plot_time_series_widget
from deepecg.training.utils.devices.device_check import print_device_counts
# from deepecg.training.train.memory.train import train
# from deepecg.training.model.memory.model import Model
from deepecg.config.config import DATA_DIR
# Configure Notebook
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# # Resources
# +
# Objective Function
# https://stackoverflow.com/questions/44560549/unbalanced-data-and-weighted-cross-entropy
# Global Average Pooling
# https://alexisbcook.github.io/2017/global-average-pooling-layers-for-object-localization/
# https://github.com/philipperemy/tensorflow-class-activation-mapping/blob/master/class_activation_map.py
# https://github.com/AndersonJo/global-average-pooling
# -
# # 1. Load ECG Dataset
# +
# Set path
path = os.path.join(DATA_DIR, 'training', 'memory')
# Set sample rate
fs = 300
print(os.path.join(path, 'training_60s.pickle'))
# Unpickle
with open(os.path.join(path, 'training_60s.pickle'), "rb") as input_file:
data = pickle.load(input_file)
# +
# Get training data
x_train = data['data_train'].values.reshape(data['data_train'].shape[0], data['data_train'].shape[1], 1)
y_train = data['labels_train']['label_int'].values.reshape(data['labels_train'].shape[0], 1).astype(int)
# Get validation data
x_val = data['data_val'].values.reshape(data['data_val'].shape[0], data['data_val'].shape[1], 1)
y_val = data['labels_val']['label_int'].values.reshape(data['labels_val'].shape[0], 1).astype(int)
# Print dimensions
print('x_train dimensions: ' + str(x_train.shape))
print('y_train dimensions: ' + str(y_train.shape))
print('x_val dimensions: ' + str(x_val.shape))
print('y_val dimensions: ' + str(y_val.shape))
# +
# One hot encoding array dimensions
y_train_1hot = one_hot_encoding(labels=y_train.ravel(), classes=len(np.unique(y_train.ravel())))
y_val_1hot = one_hot_encoding(labels=y_val.ravel(), classes=len(np.unique(y_val.ravel())))
# Print dimensions
print('x_train dimensions: ' + str(x_train.shape))
print('y_train dimensions: ' + str(y_train.shape))
print('y_train_1hot dimensions: ' + str(y_train_1hot.shape))
print('x_val dimensions: ' + str(x_val.shape))
print('y_val dimensions: ' + str(y_val.shape))
print('y_val_1hot dimensions: ' + str(y_val_1hot.shape))
# +
# Label lookup
label_lookup = {'N': 0, 'A': 1, 'O': 2, '~': 3}
# Label dimensions
print('Train: Classes: ' + str(np.unique(y_train.ravel())))
print('Train: Count: ' + str(np.bincount(y_train.ravel())))
print('Val: Classes: ' + str(np.unique(y_val.ravel())))
print('Val: Count: ' + str(np.bincount(y_val.ravel())))
# +
# Label dictionary
label_list = ['Normal Sinus Rhythm', 'Atrial Fibrillation', 'Other Rhythm']
# PLot times series
plot_time_series_widget(time_series=x_train, labels=y_train, fs=fs, label_list=label_list)
# -
# # 2. Device Check
# Get GPU count
print_device_counts()
# # 3. Initialize Model
# +
# Set save path for graphs, summaries, and checkpoints
save_path = r'C:\Users\<NAME>\Desktop\tensorboard\deep_ecg'
# Set model name
model_name = 'test_1'
# Maximum number of checkpoints to keep
max_to_keep = 20
# Set randome states
seed = 0
tf.set_random_seed(seed)
# Get training dataset dimensions
(m, length, channels) = x_train.shape
# Get number of label classes
classes = y_train_1hot.shape[1]
# Choose network
network_name = 'DeepECG'
# Set network inputs
network_parameters = dict(
length=length,
channels=channels,
classes=classes,
seed=seed,
)
# Create model
model = Model(
model_name=model_name,
network_name=network_name,
network_parameters=network_parameters,
save_path=save_path,
max_to_keep=max_to_keep
)
# -
# # 7. Train Model
# +
# Set hyper-parameters
epochs = 100
minibatch_size = 10
learning_rate = 0.001
# Train model
train(
model=model,
x_train=x_train, y_train=y_train_1hot,
x_val=x_val, y_val=y_val_1hot,
learning_rate=learning_rate,
epochs=epochs, mini_batch_size=minibatch_size,
)
| deepecg/training/notebooks/training/memory/1_train_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercice 1 : Les erreurs d’arrondi
#
#
#
# Le but de cet exercice est d'évaluer l'importance des erreurs d'arrondi.
#
#
#
# #### 1- a- Calculer à la main l’écriture binaire des nombres suivants : 0.5, 0.25 et 0.1
#
# Comment faire ? Pour un entier c’est chose facile : $3 = 2¹ + 2⁰$ donc son écriture binaire est 11. Pour un nombre à virgule, on procède de la même manière : $0.625 = 2⁻¹ + 2⁻³$ donc son écriture binaire est 0.101.
#
# #### b- Quelle est la particularité de 0.1 par rapport à 0.5 ou 0.25 ?
#
#
#
# #### 2- Calculez et affichez maintenant le booléen suivant : 0.3 - 0.2 == 0.1
print(0.3 - 0.2 == 0.1 )
#
#
# #### Comment peut-on expliquer ce phénomène ?
#
#
#
# #### 3- Reprenons l’exemple de l’algorithme de <NAME> exposé dans la vidéo :
#
# B = 4095.1
#
# A = B+1
#
# x0 = 1
#
# x1 = A * x0 - B
#
# x2 = A * x1 - B
#
# x3 = A * x2 - B
#
# x4 = A * x3 - B
#
# x5 = A * x4 - B
#
# x6 = A * x5 - B
#
# x7 = A * x6 - B
#
# …
#
# Si lorsque le calcul est fait pour des nombres codés sur 32 bits, tout se passe bien, pour un calcul fait avec des nombres codés en 64 bits, les valeurs trouvées sont les suivantes :
#
# 1.0
#
# 1.0000000000004547
#
# 1.0000000018631
#
# 1.0000076314440776
#
# 1.0312591580864137
#
# 129.04063743775941
#
# 524468.2550088064
#
# 2148270324.241572
#
# …
#
#
# #### D’où viennent les erreurs ? Pourquoi celles-ci deviennent-elles si importantes au bout de quelques itérations seulement ?
#
#
| Math/Fractale_section2_exercice1_Les_erreurs_d_arrondis_VF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: UNSEEN-open
# language: python
# name: unseen-open
# ---
# ## February and April 2020 precipitation anomalies
#
# In this notebook, we will analyze precipitation anomalies of February and April 2020, which seemed to be very contrasting in weather. We use the EOBS dataset.
# ### Import packages
##This is so variables get printed within jupyter
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
##import packages
import os
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import matplotlib.ticker as mticker
##We want the working directory to be the UNSEEN-open directory
pwd = os.getcwd() ##current working directory is UNSEEN-open/Notebooks/1.Download
pwd #print the present working directory
os.chdir(pwd+'/../') # Change the working directory to UNSEEN-open
os.getcwd() #print the working directory
# ### Load EOBS
#
# I downloaded EOBS (from 1950 - 2019) and the most recent EOBS data (2020) [here](https://surfobs.climate.copernicus.eu/dataaccess/access_eobs.php). Note, you have to register as E-OBS user.
#
# The data has a daily timestep. I resample the data into monthly average mm/day. I chose not to use the total monthly precipitation because of leap days.
EOBS = xr.open_dataset('../UK_example/EOBS/rr_ens_mean_0.25deg_reg_v20.0e.nc') ## open the data
EOBS = EOBS.resample(time='1m').mean() ## Monthly averages
# EOBS = EOBS.sel(time=EOBS['time.month'] == 2) ## Select only February
EOBS
# Here I define the attributes, that xarray uses when plotting
EOBS['rr'].attrs = {'long_name': 'rainfall', ##Define the name
'units': 'mm/day', ## unit
'standard_name': 'thickness_of_rainfall_amount'} ## original name, not used
EOBS['rr'].mean('time').plot() ## and show the 1950-2019 average February precipitation
# The 2020 data file is separate and needs the same preprocessing:
EOBS2020 = xr.open_dataset('../UK_example/EOBS/rr_0.25deg_day_2020_grid_ensmean.nc.1') #open
EOBS2020 = EOBS2020.resample(time='1m').mean() #Monthly mean
EOBS2020['rr'].sel(time='2020-04').plot() #show map
EOBS2020 ## display dataset
# ### Plot the 2020 event
#
# I calculate the anomaly (deviation from the mean in mm/d) and divide this by the standard deviation to obtain the standardized anomalies.
# +
EOBS2020_anomaly = EOBS2020['rr'].groupby('time.month') - EOBS['rr'].groupby('time.month').mean('time')
EOBS2020_anomaly
EOBS2020_sd_anomaly = EOBS2020_anomaly.groupby('time.month') / EOBS['rr'].groupby('time.month').std('time')
EOBS2020_sd_anomaly.attrs = {
'long_name': 'Monthly precipitation standardized anomaly',
'units': '-'
}
EOBS2020_sd_anomaly
# -
# I select February and April (tips on how to select this are appreciated)
EOBS2020_sd_anomaly
# EOBS2020_sd_anomaly.sel(time = ['2020-02','2020-04']) ## Dont know how to select this by label?
EOBS2020_sd_anomaly[[1,3],:,:] ## Dont know how to select this by label?
# And plot using cartopy!
# +
EOBS_plots = EOBS2020_sd_anomaly[[1, 3], :, :].plot(
transform=ccrs.PlateCarree(),
robust=True,
col='time',
cmap=plt.cm.twilight_shifted_r,
subplot_kws={'projection': ccrs.EuroPP()})
for ax in EOBS_plots.axes.flat:
ax.add_feature(cartopy.feature.BORDERS, linestyle=':')
ax.coastlines(resolution='50m')
gl = ax.gridlines(crs=ccrs.PlateCarree(),
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--')
# plt.savefig('graphs/February_April_2020_precipAnomaly.png', dpi=300)
| doc/_build/.doctrees/nbsphinx/Notebooks/2020_contrasting_weather.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
# default_exp utils.sampling
# -
# # Sampling
# > Data sampling methods.
#hide
from nbdev.showdoc import *
# ## Negative Sampling
#export
from abc import *
from pathlib import Path
import pickle
import os
from tqdm import trange
from collections import Counter
import numpy as np
import pandas as pd
#export
def simple_negative_sampling(data,
num_negatives=4,
binarization=False,
feedback_column='RATING'):
# Get a list of all Item IDs
all_itemsIds = data['ITEMID'].unique()
# Placeholders that will hold the data
users, items, labels = [], [], []
if binarization:
data.loc[:,feedback_column] = 1
user_item_set = set(zip(data['USERID'], data['ITEMID'], data[feedback_column]))
for (u, i, r) in user_item_set:
users.append(u)
items.append(i)
labels.append(r)
for _ in range(num_negatives):
# randomly select an item
negative_item = np.random.choice(all_itemsIds)
# check that the user has not interacted with this item
while (u, negative_item) in user_item_set:
negative_item = np.random.choice(all_itemsIds)
users.append(u)
items.append(negative_item)
labels.append(0) # items not interacted with are negative
ns_data = pd.DataFrame(list(zip(users, items, labels)),
columns=['USERID','ITEMID',feedback_column])
return ns_data
#export
class AbstractNegativeSampler(metaclass=ABCMeta):
def __init__(self, train, val, test, user_count, item_count, sample_size, seed, flag, save_folder):
self.train = train
self.val = val
self.test = test
self.user_count = user_count
self.item_count = item_count
self.sample_size = sample_size
self.seed = seed
self.flag = flag
self.save_path = save_path
@classmethod
@abstractmethod
def code(cls):
pass
@abstractmethod
def generate_negative_samples(self):
pass
def get_negative_samples(self):
savefile_path = self._get_save_path()
print("Negative samples don't exist. Generating.")
seen_samples, negative_samples = self.generate_negative_samples()
with savefile_path.open('wb') as f:
pickle.dump([seen_samples, negative_samples], f)
return seen_samples, negative_samples
def _get_save_path(self):
folder = Path(self.save_path)
if not folder.is_dir():
folder.mkdir(parents=True)
# filename = '{}-sample_size{}-seed{}-{}.pkl'.format(
# self.code(), self.sample_size, self.seed, self.flag)
filename = 'negative_samples_{}.pkl'.format(self.flag)
return folder.joinpath(filename)
#export
class RandomNegativeSampler(AbstractNegativeSampler):
@classmethod
def code(cls):
return 'random'
def generate_negative_samples(self):
assert self.seed is not None, 'Specify seed for random sampling'
np.random.seed(self.seed)
num_samples = 2 * self.user_count * self.sample_size
all_samples = np.random.choice(self.item_count, num_samples) + 1
seen_samples = {}
negative_samples = {}
print('Sampling negative items randomly...')
j = 0
for i in trange(self.user_count):
user = i + 1
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
seen_samples[user] = seen
samples = []
while len(samples) < self.sample_size:
item = all_samples[j % num_samples]
j += 1
if item in seen or item in samples:
continue
samples.append(item)
negative_samples[user] = samples
return seen_samples, negative_samples
#export
class PopularNegativeSampler(AbstractNegativeSampler):
@classmethod
def code(cls):
return 'popular'
def generate_negative_samples(self):
assert self.seed is not None, 'Specify seed for random sampling'
np.random.seed(self.seed)
popularity = self.items_by_popularity()
items = list(popularity.keys())
total = 0
for i in range(len(items)):
total += popularity[items[i]]
for i in range(len(items)):
popularity[items[i]] /= total
probs = list(popularity.values())
num_samples = 2 * self.user_count * self.sample_size
all_samples = np.random.choice(items, num_samples, p=probs)
seen_samples = {}
negative_samples = {}
print('Sampling negative items by popularity...')
j = 0
for i in trange(self.user_count):
user = i + 1
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
seen_samples[user] = seen
samples = []
while len(samples) < self.sample_size:
item = all_samples[j % num_samples]
j += 1
if item in seen or item in samples:
continue
samples.append(item)
negative_samples[user] = samples
return seen_samples, negative_samples
def items_by_popularity(self):
popularity = Counter()
self.users = sorted(self.train.keys())
for user in self.users:
popularity.update(self.train[user])
popularity.update(self.val[user])
popularity.update(self.test[user])
popularity = dict(popularity)
popularity = {k: v for k, v in sorted(popularity.items(), key=lambda item: item[1], reverse=True)}
return popularity
# ## Graph Sampling
#export
import random
import numpy as np
from collections import deque
#export
class ForestFireSampler:
"""An implementation of forest fire sampling. The procedure is a stochastic
snowball sampling method where the expansion is proportional to the burning probability.
`"For details about the algorithm see this paper." <https://cs.stanford.edu/people/jure/pubs/sampling-kdd06.pdf>`_
Inspiration credit:
littleballoffur
https://github.com/benedekrozemberczki/littleballoffur
Args:
number_of_nodes (int): Number of sampled nodes. Default is 100.
p (float): Burning probability. Default is 0.4.
seed (int): Random seed. Default is 42.
"""
def __init__(self, number_of_nodes: int=100, p: float=0.4, seed: int=42, max_visited_nodes_backlog: int=100,
restart_hop_size: int = 10):
self.number_of_nodes = number_of_nodes
self.p = p
self.seed = seed
self._set_seed()
self.restart_hop_size = restart_hop_size
self.max_visited_nodes_backlog = max_visited_nodes_backlog
def _set_seed(self):
random.seed(self.seed)
np.random.seed(self.seed)
def _create_node_sets(self, graph):
"""
Create a starting set of nodes.
"""
self._sampled_nodes = set()
self._set_of_nodes = set(range(graph.number_of_nodes()))
self._visited_nodes = deque(maxlen=self.max_visited_nodes_backlog)
def get_neighbors(self, graph, node):
return list(graph.neighbors(node))
def _start_a_fire(self, graph):
"""
Starting a forest fire from a single node.
"""
remaining_nodes = list(self._set_of_nodes.difference(self._sampled_nodes))
seed_node = random.choice(remaining_nodes)
self._sampled_nodes.add(seed_node)
node_queue = deque([seed_node])
while len(self._sampled_nodes) < self.number_of_nodes:
if len(node_queue) == 0:
node_queue = deque([self._visited_nodes.popleft()
for k in range(min(self.restart_hop_size, len(self._visited_nodes)))])
if len(node_queue) == 0:
# print('Warning: could not collect the required number of nodes. The fire could not find enough nodes to burn.')
break
top_node = node_queue.popleft()
self._sampled_nodes.add(top_node)
neighbors = set(self.get_neighbors(graph, top_node))
unvisited_neighbors = neighbors.difference(self._sampled_nodes)
score = np.random.geometric(self.p)
count = min(len(unvisited_neighbors), score)
burned_neighbors = random.sample(unvisited_neighbors, count)
self._visited_nodes.extendleft(unvisited_neighbors.difference(set(burned_neighbors)))
for neighbor in burned_neighbors:
if len(self._sampled_nodes) >= self.number_of_nodes:
break
node_queue.extend([neighbor])
#export
class RandomWalkWithRestartSampler:
"""An implementation of node sampling by random walks with restart. The
process is a discrete random walker on nodes which teleports back to the
staring node with a fixed probability. This results in a connected subsample
from the original input graph. `"For details about the algorithm see this
paper." <https://cs.stanford.edu/people/jure/pubs/sampling-kdd06.pdf>`_
Inspiration credit:
littleballoffur
https://github.com/benedekrozemberczki/littleballoffur
Args:
number_of_nodes (int): Number of nodes. Default is 100.
seed (int): Random seed. Default is 42.
p (float): Restart probability. Default is 0.1.
"""
def __init__(self, number_of_nodes: int=100, seed: int=42, p: float=0.1):
self.number_of_nodes = number_of_nodes
self.seed = seed
self.p = p
self._set_seed()
def _set_seed(self):
random.seed(self.seed)
def get_neighbors(self, graph, node):
return list(graph.neighbors(node))
def get_random_neighbor(self, graph, node):
return random.choice(self.get_neighbors(graph, node))
def get_nodes(self, graph):
return list(graph.nodes)
def get_number_of_nodes(self, graph):
return graph.number_of_nodes()
def _create_initial_node_set(self, graph, start_node):
"""
Choosing an initial node.
"""
self._set_of_nodes = set(self.get_nodes(graph))
if start_node is not None:
if start_node >= 0 and start_node < self.get_number_of_nodes(graph):
self._current_node = start_node
self._sampled_nodes = set([self._current_node])
else:
raise ValueError("Starting node index is out of range.")
else:
self._current_node = random.choice(range(self.get_number_of_nodes(graph)))
self._sampled_nodes = set([self._current_node])
self._initial_node = self._current_node
def _do_a_step(self, graph):
"""
Doing a single random walk step.
"""
score = random.uniform(0, 1)
if score < self.p:
self._current_node = self._initial_node
else:
new_node = self.get_random_neighbor(graph, self._current_node)
self._sampled_nodes.add(new_node)
self._current_node = new_node
#hide
# !pip install -q watermark
# %reload_ext watermark
# %watermark -a "Sparsh A." -m -iv -u -t -d
| nbs/utils/utils.sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from numpy import *
import pandas
E = array([1.425,1.123,1.85,0.972,2.076,2.223,1.94])*(-1.0e5)
E_el = array([8.4,3.0,0.8,4.4,0.7,3.3,8.9])*(-1000.0)
E_eu = array([8.2,3.2,0.9,4.4,0.8,3.8,8.2])*(-1000.0)
JR = array([620,900,270,1400,140,80,54])*1.0
JR_el = array([130,120,30,210,30,5,7])*1.0
JR_eu = array([160,140,30,240,30,10,10])*1.0
Jphi = array([-1250,-2800,-520,-3210,-350,-250,-590])*1.0
Jphi_el = array([150,90,30,210,30,30,110])*1.0
Jphi_eu = array([150,80,30,200,30,40,110])*1.0
Jz = array([230,310,100,800,66,71,150])*1.0
Jz_el = array([40,20,20,50,7,1.3,30])*(1.0)
Jz_eu = array([50,20,20,60,8,1.4,30])*1.0
pericentre = array([3.8,8.4,1.5,10.7,1.3,0.9,2.5])
pericentre_el = array([0.5,0.1,0.1,0.5,0.1,0.1,0.5])*(1.0)
pericentre_eu = array([0.5,0.1,0.1,0.5,0.1,0.1,0.5])
apocentre = array([16.2,29.3,7.2,41.4,4.5,3.5,4.9])
apocentre_el = array([2.6,2.1,0.2,4.2,0.1,0.5,0.8])*(1.0)
apocentre_eu = array([3.0,2.6,0.1,4.7,0.2,0.2,0.9])
ecc = array([0.62,0.55,0.65,0.59,0.59,0.59,0.34])
orb_incl_deg = array([146.5,152.6,139.7,143.1,159.0,149.2,142.5])
#data = zeros(shape=(7,20))
data = vstack((E,E_el,E_eu,JR,JR_el,JR_eu,Jphi,Jphi_el,Jphi_eu,Jz,Jz_el,Jz_eu,
pericentre,pericentre_el,pericentre_eu,
apocentre,apocentre_el,apocentre_eu,ecc,orb_incl_deg)).T
df_names = pandas.DataFrame(names,columns=['ID'])
names = array(['FSR1758','NGC 3201','$\omega$ Centauri','NGC 6101','NGC 6535','NGC 6388','NGC 6401'])
params = ['E','E_el','E_eu','JR','JR_el','JR_eu','Jphi','Jphi_el','Jphi_eu','Jz','Jz_el','Jz_eu',
'pericentre','pericentre_el','pericentre_eu',
'apocentre','apocentre','apocentre',
'ecc','orb_incl_deg']
df = pandas.DataFrame(data,columns=params)
df = df_names.join(df)
# Save basic data
df.to_csv('../data/Sequoia_GCs.csv',float_format='%.3f',index=False)
# -
data
shape(params)
| code/Save_GCs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yyynbone/firststep/blob/master/Dropout_and_Batch_Normalization_in_2D_cnn_dogvscat.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="LeDpDloIlXus" colab_type="text"
# ## sign in kaggle, and download data
# + id="kT5zQgCwlQ8C" colab_type="code" colab={}
# !pip install -q kaggle
# + id="rqIk7bv7l8l1" colab_type="code" colab={}
# !mkdir ~/.kaggle
# + id="KMZyYjufmhE2" colab_type="code" colab={}
# !cp kaggle.json ~/.kaggle/
# + id="4xIXSIhNomdl" colab_type="code" colab={}
# !chmod 600 ~/.kaggle/kaggle.json
# + id="oqDhrWdTpGgI" colab_type="code" outputId="2248b037-b3f1-41ae-d99e-d6f028ed4f35" colab={"base_uri": "https://localhost:8080/", "height": 67}
# !kaggle datasets download -d biaiscience/dogs-vs-cats
# + id="O_OYO3iBrvXs" colab_type="code" outputId="31ab660d-a9f3-4277-a229-a4310a724c56" colab={"base_uri": "https://localhost:8080/", "height": 423}
# !kaggle datasets list
# + id="D9u_qbt-sR0O" colab_type="code" outputId="eca2a52e-8a48-4294-e9f1-69b6b8ae3dc6" colab={"base_uri": "https://localhost:8080/", "height": 70}
# !kaggle competitions download -c covid19-global-forecasting-week-3
# + id="56Mei5vovDPS" colab_type="code" outputId="51129173-a197-4baf-f935-91996b96fa96" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !kaggle --version
# + id="tJDgtXrXvPBe" colab_type="code" outputId="8461b332-f2c1-42b8-f14a-db17b431be99" colab={"base_uri": "https://localhost:8080/", "height": 50}
# !pip install -q kaggle=1.5.6
# + id="HOU1GRwR1DaF" colab_type="code" outputId="ade77368-53a0-4666-a6a9-2ee7bc429037" colab={"base_uri": "https://localhost:8080/", "height": 134}
# !pip uninstall kaggle
# + id="LlkOz_uC1DdE" colab_type="code" outputId="7bee31cc-17c3-4f9f-c817-124e946b7b1c" colab={"base_uri": "https://localhost:8080/", "height": 171}
# !pip install --upgrade pip
# + id="ExC-e1ue1Df0" colab_type="code" outputId="8643cb6a-7d72-4d4e-f19c-d1cd925bbcb6" colab={"base_uri": "https://localhost:8080/", "height": 353}
# !pip install kaggle==1.5.6
# + id="XrWkPOnd1Diq" colab_type="code" outputId="c6b41186-b590-445e-fa12-f47514d3fdc2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !kaggle -v
# + id="_BH6K8TM1DlK" colab_type="code" outputId="c8ff217a-114c-47cb-912c-33cf676acba2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !kaggle competitions download -c covid19-global-forecasting-week-3
# + id="2D-N_-Z41Doi" colab_type="code" outputId="6f3dc22e-895c-4d4d-d76c-8eec96b6f611" colab={"base_uri": "https://localhost:8080/", "height": 67}
# !kaggle datasets download -d allen-institute-for-ai/CORD-19-research-challenge
# + id="7iKU_ZjPWC4K" colab_type="code" outputId="b2b40201-a633-4f74-d8f9-690075c9c8b6" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install tensorflow-gpu
# + id="4czHNQX2WEQp" colab_type="code" colab={}
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten,Dense,Conv2D,MaxPool2D,ZeroPadding2D,Dropout,BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
# + id="0W2sXyXrWEW_" colab_type="code" colab={}
# + id="vClIxD5yWEZx" colab_type="code" colab={}
# + id="E5H69ir4WEcY" colab_type="code" colab={}
# + id="xyQ0uPtTWEfq" colab_type="code" colab={}
| Dropout_and_Batch_Normalization_in_2D_cnn_dogvscat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.1 (''env'': venv)'
# language: python
# name: python3
# ---
# # DnD Monsters: Dice and Data
# As a Dungeon Master, it is very important to understand the strength of the monsters you pit against your players. Too weak, they are bored, too strong, they die... or worse..they don't have fun. The current method known as Challenge Rating, CR, is a numerical system used to determine how difficult an enemey is based on a party of 4 players. Challenge Ratings range from 0 to 30.
#
# CR uses AC, HP, attack bonus, damage per round and Save DC as a general guideline. Unfortunately, it doesn't fully take into account legendary actions, at will spells, special abilities that cause status ailments, or any other boosting abilities. It's pretty tuned into attack and defensive type behaviors. That makes sense too, because they are the most translatable into an equation, which WotC has done for us (I showed this in the previous notebook).
#
# The goal of this investigation is to learn more about Monster's abilities in relation to the CR system. To understand if there are correlations in any of the stats, abilities, environments, size, etc. To see if we can classify monsters based on any of these traits. Finally, to create a basic app that can predict a monster stat and trait block based on inputs such as party level, difficulty, monster type, environment, and alignment.
#
# In order to stay within WotC terms of service, we will only work on monsters that are part of the SRD.
# ## DnDWiki: html instead of DnDBeyond's javascript
# Although, DnDBeyond has a really well structured website, it requires javascript parsing, which is more advanced than the knowledge than the scope of my original project. I was hoping to stick with requests.
#
# Intead, I will try to work with DnDWiki since it utilizes all html.
# +
# Import Libraries for scraping
import requests as rq
import pandas as pd
import numpy as np
from time import sleep
from collections import defaultdict
#Scraping libraries
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
#Helper functions
from src.data.build_data import monster_stat_gathering
from src.data.selenium_scrape import Request
# -
# ### Get Request for Monster Names
# This will give us a list of monster names to use for stat block parsing
# +
# Fetching HTML
url = "https://www.dandwiki.com/wiki/5e_SRD:Monsters"
Request = rq.get(url).text
soup = bs(Request, 'html.parser')
# -
# ### Collect Names of All Monsters in a List
# Unfortunately, dndwiki is not well crafted, which meant I needed to get creative. There weren't distinguishing classes or names or ids, which is usually what I would use to parse. However, the styles between tables were a bit different, so i used that to gather the information needed.
# +
# Find the main content div and and extract it for processing
# This involves finding the list items that are only housed within the parent table that has a width of 100%.
tables = soup.findAll('table', {'style':"width: 100%;"})
monster_names_dndwiki = []
for table in tables:
li_table = table.findAll('li')
for name in li_table:
monster_names_dndwiki.append(name.text)
# -
# ### Clean up data
# Because DnDWiki doesn't have a great structure, even our creative method had some issues.
# We need to remove duplicates and non-monsters from the list.
# +
# Remove duplicate monsters
monster_names = list(set(monster_names_dndwiki))
# Filter through and replace spaces with dashes to format names for url intergration
monster_list = []
for name in monster_names:
if not(name.strip().isdigit()):
new_name = name.replace(' ','-')
monster_list.append(new_name)
else:
monster_list.append(name)
# -
# ### Dictionary of URLs to parse
# I will iterate through the monster name, knowing that dandwiki has a uniform site for all monsters pages www.dandwiki.com/wiki/5e_SRD:'MonsterName'.
monster_url=[]
for name in monster_list:
monster_url.append('https://www.dndbeyond.com/monsters/'+name)
# ### Website Structure is disgusing
# There are still some things on here that are not monsters (they summon monsters). For example the Deck of Many Things. This will break any analysis or modeling I try to do, so I need to remove them.
#
# We can look at all things monsters have in common that these other objects do not. Unfortunately, the 'DoMT' and the 'figures of power' also contain niche "monster" stats for their monsters.
#
# We will include these in our table, however Zombies and Dinosaurs do not, since they are just a category of many monsters, all of which are included in the list already.
#
#
# +
# Function to make sure each get request is functioning properly and to parse the url
def Run_Soup_If_Status_Ok(url):
request =rq.get(url)
soup = bs(request.text, 'html.parser')
return soup
monster_dict=defaultdict(list)
# Append dictionary with monster name and the soupy information
for name,url in zip(monster_names,monster_url):
monster_dict[name].append(Run_Soup_If_Status_Ok(url))
# -
# ## DNDBeyond: Testing selenium webdriver on DnDBeyond with a single Monster
# DnDWiki is frankly just very unhelpful in terms of web structure.
#
# There are no defining classes, ids , names, or elements on any of the information, which makes parsing a nightmare. I will move into DnDBeyond using Selenium.
#
# Since I've never used Selenium before, and its quite a bit more involved, I will take a lot of my information from this documentation: https://selenium-python.readthedocs.io/
#
# Additionally, I will develop the process incrementally, first by pulling information from a single monster.
#
# First, I will grab all the information from the Mummy Lord in the 'mon-stat-block' class and the footer information which contains all our tags like source book, environment, and monster tags.
# ### Libraries for Parsing
#
# First I need to gain access to our monster data sheet. As stated above, dndbeyond.com has a great repository of monster data. This will need to be scraped from there site. Unfortuntately, each of the monster pages is hidden behind an accordion dropdown and will need to be extracted. This is something I have not yet done, so I am excited to try.
# +
url = 'https://www.dndbeyond.com/monsters/mummy-lord'
# selenium webdriver: has to be updated occasionally
driver = webdriver.Chrome(executable_path='../env/chromedriver.exe')
driver.get(url)
driver.implicitly_wait(5)
# javascript parser
soup = bs(driver.page_source, 'lxml')
# search for div with stats and footer than contains tags such as environment
stat_block = soup.find('div',{'class':'mon-stat-block'})
Environment = soup.find('footer')
# -
# ### Column Names: Parsing for headings, labels, and tags
# Unfortunately, I don't know any one monster that contains every single type of column we are looking for. The Mummy Lord is a strong enemy that includes a lot of traits and features.
# I added any column names to the start of the list if they weren't included in the Mummy Lord's stat blocks.
#
# Then we create for loops looking for classes that end with 'label' or 'heading'. This should give us all the column names we need.
#
# I will start with just enviromnnt-tags (later I will decide to expand this to all tags)
# +
# Column names that weren't added from the Mummy Records, but exist in DnDBeyond
column_names = ['Monster Name','Size','Type', 'Alignment','Traits', 'Damage Resistances', 'Monster Tags:', 'Mythic Actions', 'Reactions','Source']
# Search for new headers and append to the column_name list
for headers in stat_block.findAll('span',{'class': lambda e: e.endswith('label') if e else False}):
column_names.append(headers.text)
for headers in stat_block.findAll('div',{'class': lambda e: e.endswith('heading') if e else False}):
column_names.append(headers.text)
# Formating for the environment-tags is a bit different and requires striping contents
for headers in Environment.findAll('p',{'class': lambda e: e.startswith('environment-tags') if e else False}):
column_names.append(headers.contents[0].strip())
# -
# ### Create Dictionary to Store Monster Data
# While we only have one monster now, this will become more important later on
# +
# Create empty dictionary with column names as keys
monster_dict = dict.fromkeys(column_names)
# Initialize the monster_dic with each value for all keys to be an empty list
for column in column_names:
monster_dict[column] = []
monster_dict
# -
# ### Add Values of Mummy Data into our Dictionary
# Here is our big show stopper. This will be turned into a function to be used in the main scrape
# +
# Monster Name
monster_name = stat_block.find('div', {'class':'mon-stat-block__name'}).text
monster_dict['Monster Name'].append(' '.join(str(monster_name).split()))
# The subtitle contains: (Size, Alignment, and Type), will split the single meta text using split() and replace() functions
monster_subtitle_info = stat_block.find('div', {'class':'mon-stat-block__meta'})
monster_subtitle_info=monster_subtitle_info.text
# Size (first word)
monster_size = monster_subtitle_info.split()[0]
monster_dict['Size'].append(monster_size)
# Alignment (after comma)
monster_alignment = monster_subtitle_info.split(', ')[-1]
monster_dict['Alignment'].append(monster_alignment)
# Type (remaining words).
'''The sublist will remove the above two variables from the text, as well as the loose comma. It will also create a list for the type, as sometimes there are sub-types associated with monsters (e.g Titan)'''
sub_list = (monster_size, monster_alignment, ', ')
monster_type = monster_subtitle_info
for substring in sub_list:
monster_type = monster_type.replace(substring,'')
monster_type = monster_type.split()
monster_dict['Type'].append(monster_type)
# Find all attribute metrics
attribute_data = stat_block.findAll('span',{'class':'mon-stat-block__attribute-data-value'})
# Armor Class
monster_ac = ' '.join(str(attribute_data[0].text).split())
monster_dict['Armor Class'].append(monster_ac)
# Hit Points
monster_hp = ' '.join(str(attribute_data[1].text).split())
monster_dict['Hit Points'].append(monster_hp)
# Speed
monster_speed = ' '.join(str(attribute_data[2].text).split())
monster_dict['Speed'].append(monster_speed)
# Find all tidbit metrics
tidbit_label = stat_block.findAll('span', {'class':'mon-stat-block__tidbit-label'})
for label in tidbit_label:
'''
Because the tidbits column shifts based on the monster, we can't index the rows, as they are added or deleted based on the monster. So instead, we will write a for loop that loops through the monsters tidbit headings (e.g. Skills, Saving Throws, etc.) and if they exits, it will take
the sibling data (i.e. it will take the actual data corresponding to each heading) and deposit it into the dictionary.
Any columns not in the monster data will be left blank for now. Each if statement is labeled with the corresponding tidbit.
'''
if label.text == "Saving Throws":
monster_saving_throw = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Saving Throws'].append(monster_saving_throw)
elif label.text == "Skills":
monster_skills = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Skills'].append(monster_skills)
elif label.text == "Damage Vulnerabilities":
monster_damage_vulnerability = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Damage Vulnerabilities'].append(monster_damage_vulnerability)
elif label.text == "Damage Immunities":
monster_damage_immunity = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Damage Immunities'].append(monster_damage_immunity)
elif label.text == 'Condition Immunities':
monster_condition_immunity = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Condition Immunities'].append(monster_condition_immunity)
elif label.text == 'Senses':
monster_senses = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Senses'].append(monster_senses)
elif label.text == 'Languages':
monster_languages = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Languages'].append(monster_languages)
elif label.text == 'Challenge':
monster_challenge= ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Challenge'].append(monster_challenge)
elif label.text == 'Proficiency Bonus':
monster_proficiency = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Proficiency Bonus'].append(monster_proficiency)
elif label.text == 'Damage Resistances':
monster_damage_resistence = ' '.join(str(label.find_next_sibling('span').text).split())
monster_dict['Damage Resistances'].append(monster_damage_resistence)
# Find all ability score metrics
ability_scores = stat_block.findAll('span',{'class':'ability-block__score'})
# STR Score
monster_str = ability_scores[0].text
monster_dict['STR'].append(monster_str)
# DEX Score
monster_dex = ability_scores[1].text
monster_dict['DEX'].append(monster_dex)
# CON Score
monster_con = ability_scores[2].text
monster_dict['CON'].append(monster_con)
# INT Score
monster_int = ability_scores[3].text
monster_dict['INT'].append(monster_int)
# WIS Score
monster_wis = ability_scores[4].text
monster_dict['WIS'].append(monster_wis)
# CHA Score
monster_cha = ability_scores[5].text
monster_dict['CHA'].append(monster_cha)
# Traits: because traits doesn't contain any defining HTML or any headings such as Actions or Legendary Actions I searched through all the description blocks of the text. If they don't contain the div 'heading' then we print the block. This allows us to only print traits and to place them in a list if need be for later wrangling and analysis.
trait_list = []
description_block = stat_block.findAll('div', {'class':'mon-stat-block__description-block'})
for block in description_block:
if not block.findAll('div',{'class':'mon-stat-block__description-block-heading'}):
for p in block.findAll('p'):
trait_list.append(p.text)
# Remaining descriptions that had headings
description_heading = stat_block.findAll('div', {'class':'mon-stat-block__description-block-heading'})
action_list = []
for heading in description_heading:
'''
Because the description column shifts based on the monster, we can't index the rows, as they are added or deleted based on the monster. So instead, we will write a for loop that loops through the monsters description headings (e.g. Actions, Legendary Actions, etc.) and if they exits, it will take
the sibling data (i.e. it will take the actual data corresponding to each heading) and deposit it into the dictionary. Any columns not in the monster data will be left blank for now. Each if statement is labeled with the corresponding tidbit.
'''
action_list = []
if heading.text == "Actions":
monster_actions = heading.find_next_sibling('div')
for p in monster_actions.findAll('p'):
action_list.append(p.text.strip())
monster_dict['Actions'].append(action_list)
elif heading.text == "Legendary Actions":
monster_legendary_actions = heading.find_next_sibling('div')
for p in monster_legendary_actions.findAll('p'):
action_list.append(p.text.strip())
monster_dict['Legendary Actions'].append(action_list)
elif heading.text == "Mythic Actions":
monster_mythic_actions = heading.find_next_sibling('div')
for p in monster_mythic_actions.findAll('p'):
action_list.append(p.text.strip())
monster_dict['Mythic Actions'].append(action_list)
elif heading.text == "Reactions":
monster_reactions = heading.find_next_sibling('div')
for p in monster_reactions.findAll('p'):
action_list.append(p.text.strip())
monster_dict['Reactions'].append(action_list)
# These final tags are either referring to the environment it lives in (can be multiple), the sub type its classified as, or the source book it came from. All of these or none of these may be represented in the monster sheet.
monster_tags = Environment.findAll('span')
for tag in Environment.find_all("p"):
if (tag.contents[0].strip()) == "Environment:":
monster_dict['Environment:'].append(monster_tags[0].text)
elif (tag.contents[0].strip()) == "Monster Tags:":
monster_dict['Monster Tags:'].append(monster_tags[1].text)
else:
monster_dict['Source'].append(tag.contents[0].strip())
# +
# Create a series for values that will add NaN for any missing values
monster_dict = dict([ (k,pd.Series(v)) for k,v in monster_dict.items()])
monster_df = pd.DataFrame(monster_dict)
monster_df.to_csv('../data/raw/MummyTest.csv')
# -
# ## Scraping DnDBeyond for all SRD monsters
# 1. We will change out naming database since DnDBeyond is now active for us. We will need to first iterate through each of the pages of monster files.
# 2. Then we will need to read each monster on each of the page and place them into our monster_list
# 3. Next we will remove any spaces in the monster names and replace them with '-' this will be necessary for the urls
# 4. We will append to the monster url and add to the monster_url list, which we will then use to iterate over for our above test.
# ### Parsing Request Class and Selenium Function
# We want our final request clean and clear, so we will create a reusable request class with a get_selenium function.
# This function will randomize our user profile to help protect against throttling/halting the srape. We will
# also perform this as headless so as not to tax our computer. The function looks for a certain class, and waits a certain
# amount of time. If it sees the class, the function will return the page_source information, otherwise it will close the
# browser.
#
# This class and function were adapted from a medium post by <NAME> on scraping Facebook and Tiktok
# +
# Testing function and class on adult green dragon
page_html = Request('https://www.dndbeyond.com/monsters/adult-green-dragon').get_selenium("mon-stat-block__name")
bs(page_html,'lxml')
# -
# ### DnD Monster Page Iteration
# The website has the same formula 'https://www.dndbeyond.com/monsters?page=' so we just need to iterate from 1 to 106 (last page)
# +
url = 'https://www.dndbeyond.com/monsters?page='
monster__name= []
# Iterate through monster pages and grab each monster class name to place in mosnter_name list
for i in range(1,3):
page_html = Request(url+str(i)).get_selenium('name')
soup = bs(page_html, 'lxml')
page_find_names = soup.find_all('span',{'class':'name'})
for span in page_find_names:
monster__name.append(span.text.strip())
sleep(60)
# +
monster_nospaces = []
# Filter through and replace spaces with dashes to format for urls
for name in monster__name:
if not(name.strip().isdigit()):
new_name = name.replace(' ','-')
monster_nospaces.append(new_name)
else:
monster_nospaces.append(name)
monster_name_url_ending = []
# Some monsters contain '()'. Filter and replace '()' with nothing
for name in monster_nospaces:
if not(name.strip().isdigit()):
new_name = name.replace('(','')
final_name = new_name.replace(')','')
monster_name_url_ending.append(final_name)
else:
monster_name_url_ending.append(name)
# -
# Save our names and urls in case IDE crashes (add -r to recover)
# %store -r monster_dict
# %store -r monster__name
# %store -r monster_name_url_ending
# ## Use Monster Names & Custom Function to scrap SRD Monster Data
# Using our test function from the Mummy, we will iterate over all the monsters in monster_name_url_ending
# to parse each monster page for their data and slam it into the dictionary!
#
# we saved our previous variable using store magic, so we don't need to rerun the monster names or column names each time.
#
# The function is stored as a helper function in the src folder
# ### Iterate over monster pages
# Don't grab any info that we don't have access to
# +
url = 'https://www.dndbeyond.com/monsters/'
j=0
# Iterate through monster names and add to url variable
for i in monster_name_url_ending[2:107]:
page_html = None
# Request the html using selenium function
page_html = Request(url+i).get_selenium('mon-stat-block__name')
j+=1
#If the request is successful (doesn't hit a blocked monster page), parse and run the function.
if page_html is not None:
soup = bs(page_html, 'lxml')
monster_stat_gathering(soup,monster_dict)
sleep(60)
print(j)
print(monster_dict)
# Store dictionary in case crash
# %store monster_dict
# +
# Ensure list lengths are the same in order to print to DF
list_length = []
for col in monster_dict:
list_length.append(len(monster_dict[col]))
print(list_length)
monster_df = pd.DataFrame(monster_dict)
monster_df.to_csv('../data/raw/Partial_Monster_Data8.csv')
# -
# ### Round 2: Reparse
# Apparently, we only got about 1/2 the monsters possible from that run. It's possible that our time to wait is too short, or we have some internet blips.
#
# I will consolidate all the tests we have done and create a list of names from that list. Then we can remove the names we already have from our master list so we don't rerun them on the next parse.
# %store -r monster_dict
# Create a list of monster names that we have already parsed for comparison.
monster_name_parsed = list(monster_dict["Monster Name"])
monster_name_parsed
# +
# Using our original list from DnDWiki, lets see what monsters we are missing from the basic rules
# DnDWiki Difference
monster_names_not_in_dndwiki = list(name for name in monster_name_parsed if name not in monster_names_dndwiki)
print(len(monster_names_not_in_dndwiki), "monsters not found on DnDWiki from DnDBeyond")
# DnDBeyond difference
monster_names_not_parsed_from_DnDBeyond = list(name for name in monster_names_dndwiki if name not in monster_name_parsed)
print(len(monster_names_not_parsed_from_DnDBeyond), "on the DnDWiki list we haven't parsed in DnDBeyond")
# -
# ### Interesting Results
# The dndwiki list has 173 monsters that were not parsed, 231 monsters total. We have parsed 101 monsters, which means the dndbeyond site has available monsters not on the dndwiki page.
# 43 monsters according to the difference in the list
#
# This means we shouldn't use the dndwiki list since it will clearly miss cool mosnters
# ### Can we make a DnDBeyond friendly list?
# I don't want to parse 1300 or even 1200 websites again. Is there a way to parse only the basic rules info?
#
# Or, what if I iterated through the dropdown on the monster page with Selienium clicks? That is how I got the monster names, I would still only parse what I have access to
# +
# Turn the monster name parsed list into a "url-ending" list
monster_nospace=[]
# Filter through and replace spaces with dashes to format for urls
for name in monster_name_parsed:
if not(name.strip().isdigit()):
new_name = name.replace(' ','-')
monster_nospace.append(new_name)
else:
monster_nospace.append(name)
monster_name_url_ending_parsed = []
# Filter and replace '()' with nothing
for name in monster_nospace:
if not(name.strip().isdigit()):
new_name = name.replace('(','')
final_name = new_name.replace(')','')
monster_name_url_ending_parsed.append(final_name)
else:
monster_name_url_ending_parsed.append(name)
# Remove the allready parsed monsters from the full url-ending list
monster_name_url_ending_second_round = list(name for name in monster_name_url_ending if name not in monster_name_url_ending_parsed)
len(monster_name_url_ending_second_round)
# -
# ### Rerun
# We have removed the monsters we have already parsed (101) and will rerun with 1279 monsters. I will increase the wait time to 20 seconds hopefully, this will improve capture.
#
# ### Rerun 2
# We have removed monsters after the second run (additional 58) and will rerun with 1221 monsters. I will increase the wait time to 30 seconds.
#
# ### Rerun 3
# We have removed monsters after the third run (additional 28) and will rerun with 1193 monsters. I brought the wait time down to 15 seconds again to see if there is any impact.
#
# ### Rerun 4
# We have removed monsters after the third run (additional 17) and will rerun with 1176 monsters.
#
# ### Rerun 5
# We have removed monsters after the 5th run (additional 10) and will rerun. Will spend time thinking about how to remove the known unavailable monsters before proceeding to reduce run time.
# +
url = 'https://www.dndbeyond.com/monsters/'
j=0
# Iterate through monster names as urls and perform scrape and stat function
for i in monster_name_url_ending_second_round[0:1381]:
page_html = None
page_html = Request(url+i).get_selenium('mon-stat-block__name')
j+=1
if page_html is not None:
soup = bs(page_html, 'lxml')
monster_stat_gathering(soup, monster_dict)
sleep(60)
print(j)
# %store monster_dict
# -
# ### Recreate List
# Before rerunning, we will recreate the list of names from the monster site, while filtering for monsters only in the free source lists. This should limit the number of monsters we need to parse.
# +
#find the uniqe source books we can parse through
source_list=np.array(monster_df['Source'])
print(np.unique(source_list))
#Iterating through all the monster pages in dndbeyond
ser = Service('../env/chromedriver.exe')
url = 'https://www.dndbeyond.com/monsters?page='
monster_name_source_material= []
for i in range(1,107):
browser = webdriver.Chrome(service=ser)
browser.get(url+str(i))
page_html = browser.page_source
soup = bs(page_html, 'lxml')
monster_names = soup.find_all('div',{'class':"row monster-name"})
#At each page, we will iterate through all the monsters, if the monsters contain one of the 3 sources from our list, we will grab that monster name
for monster in monster_names:
page_name = monster.find('span',{'class':'name'})
source_name = monster.find('span',{'class':'source'})
if source_name.text.strip() in source_list:
monster_name_source_material.append(page_name.text.strip())
browser.quit()
sleep(6)
# %store monster_name_source_material
# -
# %store -r monster_name_source_material
len(monster_name_source_material)
# ### Running Source Only
# After creating a list of public source only material, I have come up with 549 monster. We will remove any monster we have already parsed. This leaves us with 335 monsters left. A MUCH shorter list to parse.
# +
monster_name_parsed = list(monster_dict["Monster Name"])
monster_name_url_ending_source_material = list(name for name in monster_name_source_material if name not in monster_name_parsed)
len(monster_name_url_ending_source_material)
# +
monster_nospace = []
# Filter through and replace spaces with dashes to format for urls
for name in monster_name_url_ending_source_material:
if not(name.strip().isdigit()):
new_name = name.replace(' ','-')
monster_nospace.append(new_name)
else:
monster_nospace.append(name)
monster_name_url_ending = []
#filter and replace '()' with nothing
for name in monster_nospace:
if not(name.strip().isdigit()):
new_name = name.replace('(','')
final_name = new_name.replace(')','')
monster_name_url_ending.append(final_name)
else:
monster_name_url_ending.append(name)
# +
url = 'https://www.dndbeyond.com/monsters/'
j=0
# Iterate through monster names and add to url
for i in monster_name_url_ending[0:215]:
page_html = None
# Request the html using selenium function
page_html = Request(url+i).get_selenium('mon-stat-block__name')
j+=1
if page_html is not None:
soup = bs(page_html, 'lxml')
monster_stat_gathering(soup, monster_dict)
sleep(60)
print(j)
# %store monster_dict
# -
# Count and save monster dictionary
len(monster_dict['Monster Name'])
monster_df.to_csv('../data/raw/Monster_Data_RAW.csv')
| notebooks/1.0_ASI_Data_Scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/alpyesilgul/sunSpot_Prediction/blob/main/Time_Series_and_Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2IGDpKFu30cj"
# **Merhaba**, 1749 yılından 2018 yılına kadar Güneşimizde gözlenen [güneş lekeleri](https://tr.wikipedia.org/wiki/G%C3%BCne%C5%9F_lekeleri)nin bir kaydı ile gelecekte oluşabilecek lekeler için bir tahmin algoritması oluşturucağız
# + [markdown] id="P_sS51ow6mRD"
# Basit bir algoritma olmasına karşın genel amacım elimden geldiğince Türkçe bir kaynak oluşturmak; zaman serileri ile tahmin algoritmalarının nasıl geliştirileceğine dair bir önsezi oluşturmak.
# + [markdown] id="wPyeG5wQ697t"
# Gerekli açıklamalarda da bulunduğumuza göre biraz klavyeyi ısıtalım :)
# + id="x82ZHFwd7FgH"
# Datayı içeri aktaralım.
import pandas as pd
df = pd.read_csv('/content/Sunspots.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="kFgBnbf27YbM" outputId="15937c59-75a1-4634-d869-f1c728cbfd11"
# Dataya bi göz atalım
df.head()
# + [markdown] id="YnMbjtQP77_V"
# Datadan da görebildiğimiz gibi 1749'dan beri her ay olarak kayıt edilmiş şimdi gelin bu veriyi görselleştirelim.
# + id="eG70uOEo71ET"
series = df['Monthly Mean Total Sunspot Number'].values.astype(float)
time = df['Unnamed: 0'].values.astype(int)
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="1HMSpqIk9L99" outputId="6736663e-f8a8-442c-a5a6-620e8fc950e6"
import matplotlib.pyplot as plt
plt.figure(figsize=(10,6))
plt.plot(time, series)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
# + [markdown] id="1cRRkeK2AOcV"
# Şimdi görebiliceğimiz gibi bir zaman serisi grafiği var elimizde. Bu grafiği train ve validation olarak bölümlememiz gerekiyor.
# + id="iJGKs0u1AgZe"
# Toplamda 3214 zaman adımımız var 3000 train; 214 validation olarak bölümleyelim.
split_time = 3000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
# + [markdown] id="eRRUj_CtBDw2"
# Şimdi modele bir data pipeline oluşturmak gerekiyor. Pipeline veri işleyen bir sisteme veriyi en verimli şekilde kullanabilmesi için oluşturduğumuz bir dizi filtreleme işlemidir tabi kullanılma yerine göre tanımı farklı olabilir ama genel olarak anlamı bir filtreler dizisidir. Geleceğe yönelik bir tahmin yapıyoruz ve bunları geçmişe dayalı olarak ilerletiyoruz burda veriyi en verimli olarak örneğin 5'lik paketlerde(batch) birer adım ilerleyecek şekilde sisteme verebiliriz yani ağımız her aldığı veriyi ;
# >[1,2,3,4], [2,3,4,5], [3,4,5,6] etc.
#
# Bu batchlere karşılık da y değerleri yani doğru değerler olarak da bir sonraki değeri verebiliriz. Yani kısaca x,y haritalamamız şu şekilde olucak:
#
# >{x:[1,2,3,4], y:[5], x:[2,3,4,5], y:[6] etc.}
#
# "Bu bilgiler ışığında" -proje ödevlerinde kullanamayı en sevdiğim paragraf başı cümlesi ile- yardımcı fonksiyonumuzu oluşturalım
# + id="Sq0LeTVoDOU-"
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
series = tf.expand_dims(series, axis=-1) # Model girdisi için bir eksen daha ekliyoruz.
ds = tf.data.Dataset.from_tensor_slices(series) # Arrayı tensor objesine çeviririz.
ds = ds.window(window_size + 1, shift=1, drop_remainder=True) # Objeyi açıklamada olduğu gibi bölümleriz
ds = ds.flat_map(lambda w: w.batch(window_size + 1)) # her bölümlemeyi paketleriz.
ds = ds.shuffle(shuffle_buffer) # verim için veriyi karştırırz.
ds = ds.map(lambda w: (w[:-1], w[1:])) # x,y haritalanmasını belirleriz.
return ds.batch(batch_size).prefetch(1) #son olarak genel paketlenmeyi uygular,
#bir sonraki veri için hazırlık oluştururuz.(bkz.tf.data.Dataset.prefetch)
# + [markdown] id="j1yTv3XoFL0g"
# Geleceğe dair tahminlere "forecast" denir. Eğitilen ağa test verisini gönderip tahminleri almak için de ayrı bir yardımcı fonksiyon oluşturalım.
# + id="vsU8gco_FgEy"
def model_forecast(model, series, window_size):
# Hepsi bildiğimiz işlemler...
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds) # Burda modelden tahminleri alıyoruz.
return forecast
# + [markdown] id="a8SedVTBL1qB"
# Geldik modeli oluşturmaya :) Şimdi veri setimizdeki x,y haritalanmasını anladık. İlk katman olarak 1 boyutta Conv ağı kullandık. Bilenler elbette ki vardır, konvülasyon görüntülerdeki dikey yatay çizgileri öğrenmek için kullandığımız bir katman. Teorik olarak bir matristeki patternları yakalıyor ve veri setimize baktığımızda bu patternları yakalamak oldukça işimize yarayabilir. Günlük hayatımızda gelecek için deterministik tahminler yürütmek için ilk başvurumuz geçmiş, yani tecrübelerimizdir. Bu bakış açısı ile de Long-Short Term Memory katmanı kullanmak oldukça makul :) Son katmanda ise en nihayetinde bu bir grafik, tahminleri daha duyarlı hale getirmek adına her sonucu 400 ile çarptım. Bir bakıma bu tahmin-zaman fonksiyon eğrisini 400 birim yukarı çektim gibi kabaca bir açıklama ile anlatılabilir. İsterseniz Lambda katmanını silerek tahminlere bakabilirsiniz.
# + id="Q8B8w8vFPQ3g"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="UvC5TS4hOdg1" outputId="d3f6078e-acd6-49ce-9760-2dea361942cf"
shuffle_buffer_size = 1000
window_size = 60 # Her bir veri seti 60'lık arraylerden oluşur.
batch_size = 100 # batch_size*steps_per_epoch = 3000
train_set = windowed_dataset(x_train, window_size=window_size, batch_size=batch_size, shuffle_buffer=shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(), # Loss func için bkz.Huber loss-Wikipedia
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,epochs=500)
# + id="jjHTiza4XoEE"
# Modeli eğittik şimdi sıra tahminde
import numpy as np
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
# + colab={"base_uri": "https://localhost:8080/"} id="2M6VFCZKXtlT" outputId="33a77b76-ff80-474d-ec29-fe3cb948f4d6"
rnn_forecast
# + [markdown] id="caOW9akeYoHK"
# Tüm veri için tahmin aldık ve test kısmını bölümledik. Elde edilen tahminlerin doğruluğunu gerçek değerlerle bir grafikte yazdıralım ve tartışalım.
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="7A61Nxd4YxUX" outputId="ad482cc3-359e-4cb4-94bd-111671310bd4"
plt.figure(figsize=(10,6))
plt.plot(time_valid, rnn_forecast, 'r') #Kırmızı tahminlerimiz
plt.plot(time_valid, x_valid,'g') # Yeşil gerçek değerler
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
# + [markdown] id="fLNzgiFwcigx"
# Sezgisel olarak algoritma gerçek zamanlı kullanılması hiç makul değil :) Ki güneş lekeleri gibi hassas bi konu için tartışalamaz bile. Fakat önsezi için oluşturduğumuz ağ fena denemeyecek tahminler ortaya koydu. Doğru optimizasyon ve veri analizi teknikleri ile daha spesifik tahminler de bulunabilicek ağlar üretmemiz mümkün hele ki elimizde bunun gibi oldukça fazla veri varsa :) Ne derler bilirsiniz "Let the data take care ".
# Ben öğrendikçe buraya elimden geldiğince daha çok proje bırakıcam galiba bu benim için bir yeni hobi oldu popüler de olamıyacağım için istediğiniz zaman bana ulaşabilirsiniz :) Esen kalın <3
| Time_Series_and_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
# import load_data function from helper file
# %load_ext autoreload
# %autoreload 2
# fix system path
import sys
sys.path.append("/home/jovyan/work")
# -
# import dataset - train
training_data = pd.read_csv('../data/raw/train (1).csv')
# inspect the data
training_data.head()
training_data.info()
training_data.describe()
training_data['3P%'].describe()
# Data fields
# Id_old - Previous Player Identifier
# Id - Player Identifier
# GP - Games Played
# MIN - Minutes Played
# PTS - Points Per Game
# FGM - Field Goals Made
# FGA - Field Goals Attempts
# FG% - Field Goals Percent
# 3P Made - 3-Points Made
# 3PA - 3-Points Attempts
# 3P% - 3-Points Percent
# FTM - Free Throw Made
# FTA - Free Throw Attempts
# FT% - Free Throw Percent
# OREB - Offensive Rebounds
# DREB - Defensive Rebounds
# REB - Rebounds
# AST - Assists
# STL - Steals
# BLK - Blocks
# TOV - Turnovers
# TARGET_5Yrs - Outcome: 1 if career length >= 5 years, 0 otherwise
training_data.FTM.describe()
# SInce some values are <0 and contextually this doesn't make sense, we replace all negative values with zero.
training_data.shape
# how many distinct id's are there?
len(training_data.Id.unique())
# check distribution of the target
training_data.TARGET_5Yrs.value_counts()
# All variables are numeric, no missing values. Target is the last column, and use Id at column 2, remove column 1.
#
# 0 class is underrepresented, ratio about 16% 0, 84% 1
#
# This could affect the power of the model
# ## Prepare data for model
df_cleaned = training_data.copy()
# remove id
df_cleaned.drop('Id_old', axis=1, inplace=True)
df_cleaned.shape
# remove id
df_cleaned.drop('Id', axis=1, inplace=True)
df_cleaned.shape
df_cleaned[df_cleaned<0] = 0
# create Y
target = df_cleaned.pop('TARGET_5Yrs')
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(df_cleaned, target, test_size=0.2, random_state=8)
# +
test_data = pd.read_csv('../data/raw/test (1).csv')
# -
test_data.info()
test_data.describe()
# remove old Id
test_cleaned = test_data.copy()
test_cleaned
test_cleaned.drop('Id_old', axis=1, inplace=True)
print(test_cleaned)
# create Id column for pasting to output for kaggle
ID = pd.DataFrame(test_cleaned['Id'])
ID
ID.info()
ID.to_csv('../data/interim/test_id_col.csv', index=False)
id_test = pd.read_csv('../data/interim/test_id_col.csv')
print(id_test)
test_cleaned.drop('Id', axis=1, inplace=True)
test_cleaned[test_cleaned < 0] = 0
# +
# save them all out to data/processed folder
# np.save('../data/processed/X_train_new', X_train)
# np.save('../data/processed/X_val_new', X_val)
# np.save('../data/processed/y_train_new', y_train)
# np.save('../data/processed/y_val_new', y_val)
np.save('../data/processed/X_test_new', test_cleaned)
# -
| notebooks/data_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.python.platform import gfile
from google.protobuf import json_format
import json
with tf.Session() as sess:
with tf.gfile.GFile("frozen.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def)
str_json = json_format.MessageToJson(graph_def)
d = json.loads(str_json)
length_nodes = len(d['node'])
for i in range(length_nodes):
if 'value' in (d['node'][i]['attr'].keys()):
del d['node'][i]['attr']['value']['tensor']['tensorContent']
d['node'][5]
import networkx as nx
G = nx.Graph()
for i in range(length_nodes):
node_name = d['node'][i]['name']
G.add_node(node_name)
if 'input' in list(d['node'][i].keys()):
inputs = d['node'][i]['input']
for j in inputs:
G.add_edge(node_name, j)
G.number_of_nodes()
import matplotlib.pyplot as plt
nx.draw(G, with_labels=True)
d['node'][1]
| visualization/codes/drawing_graphs/graphvis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to derive moment equations in Compartor
# $$
# \def\n{\mathbf{n}}
# \def\x{\mathbf{x}}
# \def\N{\mathbb{\mathbb{N}}}
# \def\X{\mathbb{X}}
# \def\NX{\mathbb{\N_0^\X}}
# \def\C{\mathcal{C}}
# \def\Jc{\mathcal{J}_c}
# \def\DM{\Delta M_{c,j}}
# \newcommand\diff{\mathop{}\!\mathrm{d}}
# \def\Xc{\mathbf{X}_c}
# \def\Yc{\mathbf{Y}_c}
# \newcommand{\muset}[1]{\dot{\{}#1\dot{\}}}
# $$
# +
# initialize sympy printing (for latex output)
from sympy import init_printing, Symbol
init_printing()
# import functions and classes for compartment models
from compartor import *
# -
# ## Moment declaration
# The purpose of Compartor is to generate the moment equations: these are a system of ODEs that describes the time evolution of a set of expected population moments.
#
# To denote a particular moment, use the `Moment` class. It takes as constructor arguments the integer exponents in each species. For instance, the following list
moments = [
Moment(0),
Moment(0)**2,
Moment(1),
Moment(1)**2
]
display(moments)
# denotes 4 moment expressions of a system comprising $D=1$ chemical species. Instead, this list
moments = [
Moment(0,0),
Moment(0,0)**2,
Moment(1,0),
Moment(1,0)**2,
Moment(0,1),
Moment(0,1)**2,
Moment(1,0)*Moment(0,1)
]
display(moments)
# collects 7 moment expressions of a system of dimensionality $D=2$.
# Compartor offers two main modalities to handle the derivation of the moment equations for a given compartment population model.
#
# The user can choose a fully automated derivation, where possible further moment dependencies and moment closures are managed internally by the tool. The automated usage is usually recommended, especially when the user seeks for a one-shot solution to create a closed system of moment equations.
#
# Alternatively, more experienced users might opt for a step-by-step analysis of the moment equations, which permits a more gradual control on the included moment statistics and on the application of moment closures.
#
# In this notebook, we illustrate both automated and manual derivation for the case study shown in the paper.
#
# First, we specify the model (see (1) HOWTO - define the model).
# +
x = Content('x')
y = Content('y')
# Intake Distribution
pi_I = OutcomeDistribution.Poisson(Symbol('\pi_{I}(y; \lambda)'),y[0],Symbol('\lambda'))
Intake = TransitionClass( {} -to> [(y[0],0)], 'k_I', pi=pi_I, name='I')
Fusion = TransitionClass( [x] + [y] -to> [x+y], 'k_F', name='F')
Conversion = TransitionClass( [x] -to> [x + (-1,1)], 'k_c', x[0], name='c')
Degradation = TransitionClass( [x] -to> [x + (0,-1)], 'k_d', x[1], name='d')
transitions = [ Intake, Fusion, Conversion, Degradation]
display_transition_classes(transitions)
# -
# ## Automated derivation of moment equations
# The function `automated_moment_equations()` handles automatically the process of moment identification, equation generation and moment closure for a provided model.
#
# `automated_moment_equations()` takes the following arguments:
# * the number `D` of chemical species of the system
# * the set of transition classes defining the model
# * optionally, a list of `Moments` that must be included the system of moment equations.
#
# When the optional set of moments is not given, `automated_moment_equations()` outputs the smallest closed system of moment equations containing at least the total number number and the total amount of chemical species. Let's call `automated_moment_equations()` on the `transition` of the case study:
equations = automated_moment_equations(2, transitions)
# The function outputs a summary of the operations executed in the derivation and returns the final system of moment equations. To display the derived equations, use the `display_moment_equations()` function.
display_moment_equations(equations)
# Optionally, a list of moments can be passed to `automated_moment_equations()` to require them to be included in the system of moment equations.
desired_moments = [Moment(0,0), Moment(0,1)**2]
equations = automated_moment_equations(2, transitions, desired_moments)
display_moment_equations(equations)
# In the latter case, further moments and more closures were required to converge to a closed system of equations.
# Internally, the function `automated_moment_equations()` works by applying in iterative fashion the different steps that would occur in the manual derivation. For advanced users, it is then useful to get an idea of the step-by-step derivation of the moment equations as reported in the following.
# ## Step-by-step derivation of moment equations
# In the manual derivation, the user needs to specify a set of moment of interest. Considering again the case study, we define a set of moments which we aim to characterize:
moments = [
Moment(0,0),
Moment(0,0)**2,
Moment(1,0),
Moment(1,0)**2,
Moment(0,1),
Moment(0,1)**2,
Moment(1,0)*Moment(0,1)
]
display(moments)
# Then, call the function `compute_moment_equations()` to generate the moment ODES for the given model and the desired set of moments
equations = compute_moment_equations(transitions, moments)
display_moment_equations(equations)
# The `display_moment_equations()` function outputs a warning when the system is not closed, and which moments are missing. (This output can be suppressed by passing the optional second argument `display_moment_equations(equations, print_missing=False)`).
#
# These "missing" moments are moment expressions whose expectation occurs on the right-hand side of an equation but which are not governed by the system. They can also be computed using the `get_missing_moments()` function.
missing = get_missing_moments(equations)
missing
# At this point, the user can decide to add the missing moments to the current list (e.g. `moments += list(missing)`) and reiterate the derivation by calling `compute_moment_equations()`, or to apply a moment closure to replace the missing moments with functions of those already available.
# ### Moment closures
# The moment equations are usually not in closed form for a large class of models. In those cases, expanding the moment dynamics does not help to converge to a closed system of equations. Thus, it is necessary to resort to moment closure schemes.
#
# A moment closure is an expression that replaces moments of higher order with functions of moments of lower order. Thus, an open system of equations can be converted into an approximated closed system of equations. In Compartor, the following closure functions can be applied to a list of `missing` moments:
# * `gamma_closures()` applies a third-order multivariate Gamma closure,
# * `meanfield_closures()` applies a meanfield closure,
# * `hybrid_closures()` applies first the Gamma closure and then the meanfield closure to those moments not treatable by the Gamma closure.
# In the case above, the Gamma closure is suited to close the third-order missing moment. Thus, we can compute the closure
closures = gamma_closures(missing)
display_closures(closures)
# and substitute it in `equations` with the function `substitute_closures()`
equations = substitute_closures(equations, closures)
display_moment_equations(equations)
# The system of equations is now closed and ready to be solved.
#
# The usage of the meanfield and the hybrid closure is analogous, but typically is associated with a greater approximation error. This is why the function `automated_moment_equations()`, which was introduced in the previous section, prioritizes the Gamma closure and only resorts to meanfield (thus, the hybrid scheme) when the Gamma closure is not applicable.
| (2) HOWTO - derive moment equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Auto AI pieces
# - Profiler: https://www.geeksforgeeks.org/data-profiling-in-pandas-using-python/
# - AutoAI: https://www.kdnuggets.com/2019/01/automated-machine-learning-python.html
# - lale: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/docs_guide_for_sklearn_users.ipynb
# - auto-sklearn: https://automl.github.io/auto-sklearn/master/examples/20_basic/example_classification.html#sphx-glr-examples-20-basic-example-classification-py
# -
# ## Data Profiling
# +
## Install as needed
# #!pip install -U pandas-profiling
# -
import pandas as pd
import pandas_profiling
# Local data cache
datadir = '../common-data/covid/'
datafile = datadir + "data.csv"
# Read data from local file
data = pd.read_csv(datafile, parse_dates=['date'])
data.head()
# Run the profile report
profile = data.profile_report(title='Pandas Profiling Report')
# Saving the report as html file
profile.to_file(output_file="data/coviddata_profiling.html")
# ## Lale - select method and tune paramaters
# +
## Install if needed
# #! pip install lale
# -
# Import the datasets
import lale.datasets
# Load a data and see inside
(train_X, train_y), (test_X, test_y) = lale.datasets.california_housing_df()
data = pd.concat([train_X.head(), train_y.head()], axis=1)
data.describe()
# +
import numpy as np
import sklearn.metrics
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor as Tree
from sklearn.pipeline import Pipeline
from lale.lib.lale import Hyperopt
# -
lale.wrap_imported_operators()
pca_tree_planned = Pipeline(steps=[('tfm', PCA), ('estim', Tree)])
# %%time
pca_tree_trained = pca_tree_planned.auto_configure(
train_X, train_y, optimizer=Hyperopt, cv=3, max_evals=10, verbose=True)
# Predictions
predicted = pca_tree_trained.predict(test_X)
print(f'R2 score {sklearn.metrics.r2_score(test_y, predicted):.2f}')
# ## Automated Algo Selection
# +
## Install and other issues
# # !pip install auto-sklearn
## Note: If you face issue with swig, install with brew install swig
# +
# #!pip install -U scikit-learn
# #!pip install auto-sklearn
# +
import sklearn.datasets
import sklearn.metrics
import autosklearn.classification
# -
X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = \
sklearn.model_selection.train_test_split(X, y, random_state=1)
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=120,
per_run_time_limit=30,
tmp_folder='data/autosklearn_classification_example_tmp',
output_folder='data/autosklearn_classification_example_out',
)
automl.fit(X_train, y_train, dataset_name='breast_cancer')
# Print the model
print(automl.show_models())
# Performance metric
predictions = automl.predict(X_test)
print("Accuracy score:", sklearn.metrics.accuracy_score(y_test, predictions))
| sample-code/l12-explanability-autoai/auto ai exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.linear_model import LinearRegression # Model
from sklearn.metrics import r2_score # Goodness of fit
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
# data transformation, data augmentation
import matplotlib.pyplot as plt # plotting
import pandas as pd # data manipulation and analysis
import numpy as np # numerical computation
# -
df = pd.read_csv('CAE_dataset.csv')
df.shape
df.head(n=10)
# plotting the first feature as a function of time
# +
# want all the features so that i chunk each test run
def get_features_by_test_run(df):
features = np.transpose([np.array(df.iloc[:,n]) for n in range(1,13)])
indexes = list_of_indexes(df)
features_by_run = []
for i in indexes:
np.transpose(get_features_by_test_run(df))[0]
# +
# all of the features
def get_features(df):
return [np.array(df.iloc[:,n]) for n in range(1,12)]
features = get_features(df)
#for i in range(len(features)): print(len(features[i]),end="\t")
# +
# x is a list of start indexes of each person, [starting point index,pilot_id]
# returns x
def list_of_indexes(df):
pilot_id = np.array(df.iloc[:,-1])
x0 = pilot_id[0] # pilot id is the current
x=[[0,pilot_id[0]]]
for i in range(len(pilot_id)):
if pilot_id[i]!=x0:
x.append([i,pilot_id[i]])
x0 = pilot_id[i]
# find the number of ids that were counted twice
#len(x)
count=0
for i in range(len(x)):
for j in range(i+1,len(x)):
if x[i][1]==x[j][1]:
print(i,":",x[i],"\n",j,":",x[j])
count+=1
print(count)
return x
x = list_of_indexes(df)
print(len(x))
print(count)
# +
# plot the first person
i = 27 # e.g. the 5'th pilot's data, the index
def disp_features(i):
#n=1
plt.figure(figsize=(16,16))
features = get_features()
for n in range(len(features)):
feature = features[n]
plt.subplot(4,3,n+1)
try: plt.plot(np.linspace(0,x[i][0]/10,x[i+1][0]-x[i][0]),feature[x[i][0]:x[i+1][0]])
except: plt.plot(np.linspace(0,x[i][0]/10,x[i][0]),feature[x[i][0]:])
if n==10: plt.title("0 or 1, defective pilot label")
else: plt.title("feature n="+str(n))
plt.xlabel("time in seconds")
plt.show()
def disp_feat_start_index(start_index):
plt.figure(figsize=(16,16))
features = get_features()
for n in range(len(features)):
for n in range(len(features)):
feature = features[n]
plt.subplot(4,3,n+1)
y = [i[0] for i in x]
index_i = x.index(start_index)
try: plt.plot(np.linspace(0,x[index_i][0]/10, x[index_i+1][0]-x[index_i][0]), feature[x[index_i][0]])
except: print()
plt.show()
disp_feat_start_index(647)
# -
#x.index([0, 327])
x
disp_features(111)
| .ipynb_checkpoints/Features-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .robot
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: RobotFramework (XRobot)
# language: robotframework
# name: xrobot
# ---
# # Robotic Process Automation
# ## Libraries
# * Install xeus-robot - https://github.com/jupyter-xeus/xeus-robot
# * Install selenium library and web driver - https://robotframework.org/SeleniumLibrary/
# ## Settings
# +
*** Settings ***
Documentation Settings with reusable keywords and variables.
...
... The system specific keywords created here form our own
... domain specific language. They utilize keywords provided
... by the imported SeleniumLibrary.
Library SeleniumLibrary
Library OperatingSystem
Library String
*** Variables ***
${SERVER} https://www.investing.com
${BROWSER} Chrome
${STOCKS URL} ${SERVER}/equities/americas
${stocks_filter} xpath=//*[@id="stocksFilter"]
${stocks_to_grab} S&P 500
${stock_link} //tr[starts-with(@id,'pair')]/td/a
${link_count} 0
*** Keywords ***
Open Browser To Stocks Page
Open Browser ${STOCKS URL} ${BROWSER}
Save to File
[Arguments] ${value1} ${value2}
Append To File path=${EXECDIR}/stocks.txt content=${value1},${value2}\r\n
# -
# ## Test Cases
*** Test Cases ***
Get All Stocks
Open Browser to Stocks Page
Maximize Browser Window
Wait Until Element Is Visible ${stocks_filter}
Select From List By Label ${stocks_filter} ${stocks_to_grab}
Wait Until Element Is Visible xpath:${stock_link}
${link_count}= Get Element Count xpath:${stock_link}
Log Many link_count ${link_count}
Should Be True ${link_count} > 0
FOR ${index} IN RANGE 1 ${link_count}+1
${link_text}= Get Text xpath:(${stock_link})[${index}]
${link_url}= Get Element Attribute xpath=(${stock_link})[${index}] href
Log Many link_text ${link_text}
Log Many link_url ${link_url}
Save to File ${link_text} ${link_url}
END
Close All Browsers
| nbs/06_robotic_process_automation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Dataset python package
# En este notebook se muestran las funcionalidades implementadas con una base de datos creada como ejemplo, probando primero con 2 atributos sueltos y luego con una base de datos creada a partir de esos dos atributos.
#
# # Atributos
#
# En este primer apartado vamos a usar los objetos atributos y algunas de sus funcionalidades, con las que trabajaremos luego con el objeto Dataset. Lo primero, creamos un atributo numérico y otro nominal para ver las diferentes opciones
# + pycharm={"name": "#%%\n"}
import pandas as pd
from dataset.Attribute import Attribute
from dataset.Dataset import Dataset
from dataset.utils import roc, read_csv, create_log, write_log
a1 = Attribute([1,2,3,2,1,2,3,3,2,1,1], 'categoria')
a2 = Attribute(["dog", "dog", "cat", "wolve", "cat", "dog", "dog", "dog", "cat", "wolve", "dog"], "animal")
# -
# Una vez creado el atributo, vamos a probar algunas funcionalidades.
#
# + pycharm={"name": "#%%\n"}
# Este método trata los datos como si fuesen categóricos
a1.entropy()
# + pycharm={"name": "#%%\n"}
a2.entropy()
# + pycharm={"name": "#%%\n"}
# Normalizar y estandarizar
a11 = a1.normalize()
a12 = a1.estandarize()
print(a11)
# -
#
# + pycharm={"name": "#%%\n"}
print(a12)
# -
#
# + pycharm={"name": "#%%\n"}
a1.variance()
# + pycharm={"name": "#%%\n"}
a21 = a1.discretizeEF(3)
a22 = a1.discretizeEW(3)
print(a21[0])
# + pycharm={"name": "#%%\n"}
print(a22[0])
# -
# # Dataset
#
# Ahora, vamos a pasar a trabajar con el objeto Dataset. Reutilizamos los atributos a1 y a2 y les añadimos una clase, como si de clasificación supervisada se tratase.
# + pycharm={"name": "#%%\n"}
# Definimos la clase de la base de datos
clase = ["a", "a", "b", "a", "b", "a", "b", "c","a","b", "a"]
d = Dataset()
d = d.add_column(a1.name, None)
d = d.add_column(a2.name, None)
print(len(d.columns))
for i in range(a1.size):
d.append([a1.v[i], a2.v[i], clase[i]])
print(d)
# + pycharm={"name": "#%%\n"}
d.col_var()
# + pycharm={"name": "#%%\n"}
d.col_entropy()
# -
# Vemos que hemos calculado la varianza y la entropía por cada columna. Para la varianza, no se puede calcular la varianza de una variable categórica, pero en el caso de la entropía, la variable numérica la trata como categórica. Ahora, vamos a discretizar, normalizar y estandarizar el dataset como antes.
# + pycharm={"name": "#%%\n"}
print(d.estandarize_dataset())
# -
# Vemos que solo se ha modificado el atributo numérico.
# + pycharm={"name": "#%%\n"}
print(d.normalize_dataset())
# + [markdown] pycharm={"name": "#%% md\n"}
#
# -
# Vemos que en este caso, solo ha discretizado la columna numérica. También podemos sacar dos gráficas.
# + pycharm={"name": "#%%\n"}
d.normalized_entropy_plot()
# + pycharm={"name": "#%%\n"}
# Añadimos otro atributo numérico para que se pueda ver algo en la siguiente gráfica
c3 = [1,2,3,3,2,2,3,2,2,1,1]
d.add_column("categoria2", c3)
d.correlation_plot()
# -
# Como resultado del último plot, podemos ver un heatmap de correlación entre variables.
#
# ## Funciones extras
#
# También se proveen algunas funciones extras para trabajar con logs o dibujar la curva roc. Para la curva roc, primero crearemos un dataframe en el que la primera columna será la probabilidad y la segunda la clase real.
# + pycharm={"name": "#%%\n"}
df = pd.DataFrame({"PROBS": [0.6,0.8,0.1,0.7,0.5,0.4,0.6,0.6,0.9,1], "CLASE": [True,True,True,False,False,True,True,False,True,False]})
roc(df)
# -
# Y por último, leer un csv y algunas funciones de log.
# + pycharm={"name": "#%%\n"}
print(read_csv('iris.csv'))
# + pycharm={"name": "#%%\n"}
# Creamos archivo miLog.log
create_log("miLog.log")
write_log("miLog.log", "Nuevo mensaje")
# Vemos que efectivamente sea escrito en el archivo
with open('miLog.log', "r") as f:
text = f.read()
print(text)
# + [markdown] pycharm={"name": "#%% md\n"}
#
| dataset/vignette.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
#|hide
#|skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
# # Collaborative filtering tutorial
#
# > Using the fastai library for collaborative filtering.
from fastai.tabular.all import *
from fastai.collab import *
# +
#|all_slow
# -
# This tutorial highlights on how to quickly build a `Learner` and train a model on collaborative filtering tasks.
# ## Training a model
# For this tutorial, we will use the [Movielens 100k data dataset](https://grouplens.org/datasets/movielens/100k/). We can download it easily and decompress it with the following function:
path = untar_data(URLs.ML_100k)
# The main table is in `u.data`. Since it's not a proper csv, we have to specify a few things while opening it: the tab delimiter, the columns we want to keep and their names.
ratings = pd.read_csv(path/'u.data', delimiter='\t', header=None,
usecols=(0,1,2), names=['user','movie','rating'])
ratings.head()
# Movie ids are not ideal to look at things, so we load the corresponding movie id to the title that is in the table `u.item`:
movies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1',
usecols=(0,1), names=('movie','title'), header=None)
movies.head()
# Next we merge it to our ratings table:
ratings = ratings.merge(movies)
ratings.head()
# We can then build a `DataLoaders` object from this table. By default, it takes the first column for user, the second column for the item (here our movies) and the third column for the ratings. We need to change the value of `item_name` in our case, to use the titles instead of the ids:
dls = CollabDataLoaders.from_df(ratings, item_name='title', bs=64)
# In all applications, when the data has been assembled in a `DataLoaders`, you can have a look at it with the `show_batch` method:
dls.show_batch()
# fastai can create and train a collaborative filtering model by using `collab_learner`:
learn = collab_learner(dls, n_factors=50, y_range=(0, 5.5))
# It uses a simple dot product model with 50 latent factors. To train it using the 1cycle policy, we just run this command:
learn.fit_one_cycle(5, 5e-3, wd=0.1)
# Here's [some benchmarks](https://www.librec.net/release/v1.3/example.html) on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91 (scroll down to the 100k dataset), which corresponds to an MSE of `0.91**2 = 0.83`. So in less than a minute, we got pretty good results!
# ## Interpretation
# Let's analyze the results of our previous model. We will keep the 1000 most rated movies for this:
g = ratings.groupby('title')['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_movies[:10]
# ### Movie bias
# Our model has learned one bias per movie, a unique number independent of users that can be interpreted as the intrinsic "value" of the movie. We can grab the bias of each movie in our `top_movies` list with the following command:
movie_bias = learn.model.bias(top_movies, is_item=True)
movie_bias.shape
# Let's compare those biases with the average ratings:
mean_ratings = ratings.groupby('title')['rating'].mean()
movie_ratings = [(b, i, mean_ratings.loc[i]) for i,b in zip(top_movies,movie_bias)]
# Now let's have a look at the movies with the worst bias:
item0 = lambda o:o[0]
sorted(movie_ratings, key=item0)[:15]
# Or the ones with the best bias:
sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]
# There is certainly a strong correlation!
# ### Movie weights
# Now let's try to analyze the latent factors our model has learned. We can grab the weights for each movie in `top_movies` the same way as we did for the bias before.
movie_w = learn.model.weight(top_movies, is_item=True)
movie_w.shape
# Let's try a PCA to reduce the dimensions and see if we can see what the model learned:
movie_pca = movie_w.pca(3)
movie_pca.shape
fac0,fac1,fac2 = movie_pca.t()
movie_comp = [(f, i) for f,i in zip(fac0, top_movies)]
# Here are the highest score on the first dimension:
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
# And the worst:
sorted(movie_comp, key=itemgetter(0))[:10]
# Same thing for our second dimension:
movie_comp = [(f, i) for f,i in zip(fac1, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
# And we can even plot the movies according to their scores on those dimensions:
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11)
plt.show()
| nbs/46_tutorial.collab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="rBtZq-U83rnd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eb875d2b-2943-4cdd-96ad-9a429f0ce56a"
# !pip install jimner==1.2.3
# + id="koYQDeoe3t5A" colab_type="code" colab={}
from jimner import jimner
# + id="8QY7okbY5uKB" colab_type="code" colab={}
a=jimner()
# + id="R0C9eHLI3w7_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a08a0192-e51f-4253-8b4e-dddc2e90e13e"
a.__get_dir__()
# + id="P-D3_HPP_EnQ" colab_type="code" colab={}
# + id="A4EIdHfq5ru5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="4b71a4cd-f9d2-4809-85ce-b76da16717fa"
a.get_banner_from_text('3-D','jimner')
# + id="Ll6YXnN35xtn" colab_type="code" colab={}
| jimner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow2_p36
# language: python
# name: conda_tensorflow2_p36
# ---
# # Amazon SageMaker Notebook for ProcGen Starter Kit with Single Instance
# +
import os
import yaml
import sagemaker
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
import boto3
# -
with open(os.path.join("config", "sagemaker_config.yaml")) as f:
sagemaker_config = yaml.safe_load(f)
# ## Initialize Amazon SageMaker
# +
sm_session = sagemaker.session.Session()
s3_bucket = sagemaker_config["S3_BUCKET"]
s3_output_path = 's3://{}/'.format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# +
job_name_prefix = 'sm-ray-procgen'
role = sagemaker.get_execution_role()
print(role)
# -
# ### Configure training instance type and computational resources
#
# By default (`local_mode=False`) launch a separate instance for training and debug using the AWS CloudWatch to monitor the logs for the training instance.
# If you want to train on the same instance as your notebook for quick debugging, then set `local_mode=True`.
#
# The recommended instances include with cost per hour as of September, 1, 2020 are:
# * `ml.c5.4xlarge` $0.952 per hour (16 vCPU)
#
# * `ml.g4dn.4xlarge` $1.686 per hour (1 GPU, 16 vCPU)
#
# * `ml.p3.2xlarge` $4.284 per hour (1 GPU, 8 vCPU)
#
# After you choose your instance type, make sure the edit the resources in `source\train-sagemaker.py`. For example, with `ml.p3.2xlarge`, you have 1 GPU and 8 vCPUs. The corresponding resources in `source\train-sagemaker.py` should be set as for `ray` as `
#
# ```
# def _get_ray_config(self):
# return {
# "ray_num_cpus": 8, # adjust based on selected instance type
# "ray_num_gpus": 1,
# "eager": False,
# "v": True, # requried for CW to catch the progress
# }
# ```
# and for `rrlib` need to use 1 vCPU for driver ("num_workers": 7) and 1 GPU ("num_gpus": 1) for policy training.
# +
# Change local_mode to True if you want to do local training within this Notebook instance
# Otherwise, we'll spin-up a SageMaker training instance to handle the training
local_mode = False
if local_mode:
instance_type = 'local'
else:
instance_type = sagemaker_config["CPU_TRAINING_INSTANCE"]
# If training locally, do some Docker housekeeping..
if local_mode:
# !/bin/bash source/common/setup.sh
# -
# # Configure the framework you want to use
#
# Set `framework` to `"tf"` or `"torch"` for tensorflow or pytorch respectively.
#
# You will also have to edit your entry point i.e., `train-sagemaker.py` with the configuration parameter `"use_pytorch"` to match the framework that you have selected.
framework = "tf"
# # Train your model here
# ### Edit the training code
#
# The training code is written in the file `train-sagemaker.py` which is uploaded in the /source directory.
#
# #### *Warning: Confirm that the GPU and CPU resources are configured correctly for your instance type as described above.*
# !pygmentize source/train-sagemaker.py
# ### Train the RL model using the Python SDK Script mode
#
# If you are using local mode, the training will run on the notebook instance.
#
# When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.
#
# 1. Specify the source directory where the environment, presets and training code is uploaded.
# 2. Specify the entry point as the training code
# 3. Specify the custom image to be used for the training environment.
# 4. Define the training parameters such as the instance count, job name, S3 path for output and job name.
# 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
# *[Choose](https://github.com/aws/sagemaker-rl-container#rl-images-provided-by-sagemaker) which docker image to use based on the instance type.*
# For this notebook, it has to be a container with Ray 0.8.5 and TensorFlow 2.1.0 to be consistent with the AICrowd ProcGen starter kit.
#
# If you prefer to use PyTorch, it is recommended to update your notebook kernel to `conda_pytorch_p36`. You would need to substitute for the corresponding container listed on Amazon SageMaker Reinforcement Learning documentation. In addition, you will need to ensure your starter kit is modified to train using PyTorch.
# +
cpu_or_gpu = 'gpu' if instance_type.startswith(('ml.p', 'ml.g')) else 'cpu'
aws_region = boto3.Session().region_name
# Use Tensorflow 2 by default
custom_image_name = "462105765813.dkr.ecr.{}.amazonaws.com/sagemaker-rl-ray-container:ray-0.8.5-{}-{}-py36".format(aws_region, framework, cpu_or_gpu)
custom_image_name
# -
# You need to define metrics to be displayed in the logs. The challenge has requirements on the number of steps and uses mean episode reward to rank various solutions. For details, refer to the AICrowd challange website.
metric_definitions = [
{'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episodes_total', 'Regex': 'episodes_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'num_steps_trained', 'Regex': 'num_steps_trained: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'timesteps_total', 'Regex': 'timesteps_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_min', 'Regex': 'episode_reward_min: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
]
# ### Run the RL estimator
#
# There are 16 environments to choose from. You can run the RL estimator on multiple environments by proving a list of environments as well. The RL estimator will start the training job. This will take longer compared to the above cells, be patient. You can monitor the status of your training job from the console as well, go to Amazon SageMaker > Training jobs. The most recent job will be at the top.
# +
# Select which procgen environments to run in `envs_to_run`
'''
envs_to_run = ["coinrun", "bigfish", "bossfight", "caveflyer",
"chaser", "climber", "coinrun", "dodgeball",
"fruitbot", "heist", "jumper", "leaper", "maze",
"miner", "ninja", "plunder", "starpilot"]
'''
envs_to_run = ["coinrun", "bigfish", "bossfight"]
# -
for env in envs_to_run:
estimator = RLEstimator(entry_point="train-sagemaker.py",
source_dir='source',
dependencies=["source/utils", "source/common/", "neurips2020-procgen-starter-kit/"],
image_name=custom_image_name,
role=role,
train_instance_type=instance_type,
train_instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix + "-" + env,
metric_definitions=metric_definitions,
debugger_hook_config=False,
hyperparameters={
#"rl.training.upload_dir": s3_output_path,
"rl.training.config.env_config.env_name": env,
}
)
estimator.fit(wait=False)
print(estimator.latest_training_job.job_name)
# #### WAAAITTTTT... not more than 2 hours
# ## Evaluate the model
# ### Visualize algorithm metrics for training
#
# There are several options to visualize algorithm metrics. A detailed blog can be found [here](https://aws.amazon.com/blogs/machine-learning/easily-monitor-and-visualize-metrics-while-training-models-on-amazon-sagemaker/).
#
#
# Option 1 (Amazon CloudWatch): You can go to the [Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) metrics dashboard from your account to monitor and visualize the algorithm metrics as well as track the GPU and CPU usage. The training jobs details page has a direct link to the Amazon CloudWatch metrics dashboard for the metrics emitted by the training algorithm.
#
# Option 2 (Amazon SageMaker Python SDK API): You can also visualize the metrics inline in your Amazon SageMaker Jupyter notebooks using the Amazon SageMaker Python SDK APIs. Please, refer to the section titled *Visualize algorithm metrics for training* in `train.ipynb`.
#
# Option 3 (Tensorboard): You can also use Ray Tune's integrated Tensorboard by specifying the output directory of your results. It is recommended to set `upload_dir` to a Amazon S3 URI and Tune will automatically sync every 5 miniutes. You can thus visualize your experiment by running the following command on your local laptop:
#
# `
# $AWS_REGION=your-aws-region tensorboard --logdir s3://destination_s3_path --host localhost --port 6006
# `
#
# Check out `train-homo-distributed-cpu.ipynb` for an example of setting `upload_dir`.
# #### Option 2: Plot metrics using Amazon SageMaker Python SDK API
#
# You need to wait for the training job to allocate computational resources before viewing the logs.
#
# *Note: If you get a warning that the logs do not exist, wait for a few minutes and re-run the cell.*
#
# *Note 2: If you are getting an import error from Tensorflow, open a terminal and type `source activate tensorflow2_p36`*
# +
# For usage, refer to https://sagemaker.readthedocs.io/en/stable/api/training/analytics.html#
from sagemaker.analytics import TrainingJobAnalytics
import matplotlib.pyplot as plt
# %matplotlib inline
from source.utils.inference import get_latest_sagemaker_training_job
# Get last training job_names
eval_training_jobs = [get_latest_sagemaker_training_job(name_contains="{}-{}".format(
job_name_prefix, env)) for env in envs_to_run]
for training_job_name, env in zip(eval_training_jobs, envs_to_run):
metric_names = ['episode_reward_mean', 'timesteps_total']
# download the metrics on cloudwatch
metrics_dataframe = TrainingJobAnalytics(training_job_name=training_job_name, metric_names=metric_names).dataframe()
# pivot to get the metrics
metrics_dataframe= metrics_dataframe.pivot(index='timestamp', columns='metric_name', values='value')
fig = plt.figure()
ax = metrics_dataframe.plot(kind='line', figsize=(12, 5), x='timesteps_total', y='episode_reward_mean', style='b.', legend=False)
ax.set_ylabel('Episode Reward Mean')
ax.set_xlabel('Timesteps')
ax.set_title(env)
# -
# ## Rollout the model
#
# ### Note that the following evaluation requries that at least one training job has completed.
# +
import numpy as np
import gym
import matplotlib.pyplot as plt
from IPython import display
import ray
from ray.tune.registry import get_trainable_cls
from ray.rllib.models import ModelCatalog
from source.custom.envs.procgen_env_wrapper import ProcgenEnvWrapper
from source.custom.models.my_vision_network import MyVisionNetwork
from source.utils.inference import get_model_config, get_latest_sagemaker_training_job
from source.utils.inference import download_ray_checkpoint
from source.utils.inference import rollout
ray.init()
# -
run = "PPO"
rollout_env = "coinrun"
num_steps = 1000
# +
# You can choose to use the lastest training job or
# input the name of your previously trained job
latest_training_job = get_latest_sagemaker_training_job(name_contains="{}-{}".format(
job_name_prefix, rollout_env))
# latest_training_job = <name of your training job>
print("Rolling out training job {}".format(latest_training_job))
checkpoint_dir = "checkpoint"
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
last_checkpoint_num = download_ray_checkpoint(checkpoint_dir, s3_bucket, latest_training_job)
# Print the parameters in the model
# !cat $checkpoint_dir/params.json
# -
# You must register all agents, algorithms, models, and preprocessors that you defined in the entry-point.
# For example, model could be registed like `ModelCatalog.register_custom_model("my_vision_network", MyVisionNetwork)`, custom agents could be registered with `ray.registry.register_trainable`, etc.
cls = get_trainable_cls(run)
ModelCatalog.register_custom_model("my_vision_network", MyVisionNetwork)
config = get_model_config()
agent = cls(config=config)
checkpoint = os.path.join("checkpoint", "checkpoint-{}".format(last_checkpoint_num))
agent.restore(checkpoint)
rgb_array = rollout(agent, "procgen:procgen-{}-v0".format(rollout_env),
num_steps, no_render=False)
img = plt.imshow(rgb_array[0])
plt.axis('off')
for arr in rgb_array[1:]:
img.set_data(arr)
display.display(plt.gcf())
display.clear_output(wait=True)
ray.shutdown()
| sagemaker/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project:-1 Customer Segmentation.!!
# import modules
import pandas as pd # for dataframes.
import numpy as np
import matplotlib.pyplot as plt # for plotting graphs
import seaborn as sns # for plotting graphs
import datetime as dt
# ## Reading dataset
data = pd.read_excel("Online Retail.xlsx")
data.head()
data.tail()
data.info()
# ## Data Cleaning Removing Duplicates & Negative values
filtered_data=data[['Country','CustomerID']].drop_duplicates()
filtered_data.info()
# Top ten Country's Customer
filtered_data.Country.value_counts()[:10].plot(kind='bar') # Bar Charts
filtered_data.Country.value_counts()[:10].plot(kind='pie')
uk_data=data[data.Country=='United Kingdom']
uk_data.info()
uk_data.describe()
# Filter Quality greater than Zero.
uk_data=uk_data[(uk_data['Quantity']>0)]
uk_data.info()
# ## Performing K-Means Clustering
# +
#Agex Frequency
df = pd.read_excel("Online Retail.xlsx")
df.head()
df.drop(["CustomerID"], axis = 1, inplace=True)
plt.figure(figsize=(10,6))
plt.title("Ages Frequency")
sns.axes_style("dark")
sns.violinplot(y=df["Quantity"])
plt.show()
# -
# ## RFM Model Exceuted
# +
# Here, you can filter the necessary columns for RFM analysis.
#You only need her five columns CustomerID, InvoiceDate, InvoiceNo,
#Quantity, and UnitPrice. CustomerId will uniquely define your customers, invoiceDate help you calculate recency of purchase, InvoiceNo helps you to count the number of time
#transaction performed(frequency). Quantity purchased in each transaction and UnitPrice of each unit purchased by the customer will help you to calculate the total purchased amount.#
# -
uk_data=uk_data[['CustomerID','InvoiceDate','InvoiceNo','Quantity','UnitPrice']]
uk_data['TotalPrice']=uk_data['Quantity'] * uk_data['UnitPrice']
uk_data['InvoiceDate'].min(),uk_data['InvoiceDate'].max()
PRESENT = dt.datetime(2011,12,10)
uk_data['InvoiceDate'] = pd.to_datetime(uk_data['InvoiceDate'])
uk_data.head()
# ## RFM Analysis (Recency, Frequency, Monetary).
rfm = uk_data.groupby('CustomerID').agg({'InvoiceDate': lambda date: (PRESENT - date.max()).days,
'InvoiceNo': lambda num: len(num),
'TotalPrice': lambda price: price.sum()})
rfm.columns
# Change the names of columns
rfm.columns=['monetary','frequency','recency']
rfm['recency']=rfm['recency'].astype(int)
rfm.head()
# ## Computing Quantile of RFM values
# +
# Customers with the lowest recency, highest frequency and monetary amounts considered as top customers.
rfm['r_quartile'] = pd.qcut(rfm['recency'], 4,['1','2','3','4'])
rfm['f_quartile'] = pd.qcut(rfm['frequency'], 4,['4','3','2','1'])
rfm['m_quartile'] = pd.qcut(rfm['monetary'], 4,['4','3','2','1'])
# -
rfm.head()
# ## RFM Result Interpretation
# Combine all three quartiles(r_quartile,f_quartile,m_quartile) in a single column, this rank will help you to segment the customers well group.
rfm['RFM_Score']= rfm.r_quartile.astype(str) + rfm.f_quartile.astype(str) + rfm.m_quartile.astype(str)
rfm.head()
# Filter out Top/Best Customers
rfm[rfm['RFM_Score']=='441'].sort_values('monetary',ascending=False).head()
| Customer Segmentation Project 1 (Capstone Project) (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("Civil_List_2014.csv",
names={"DPT","NAME","ADDRESS"})
df.head()
df['SAL-RATE'].describe()
df.columns
"DPT ".strip()
for col in df.columns:
print(col.strip())
# +
# its a list, so-[]
[col.strip() for col in df.columns]
# -
df.columns = [col.strip() for col in df.columns]
df.columns
df['PC'].value_counts()
df['SAL-RATE'].describe()
def money_to_float(money_str):
return float(money_str.replace("$","").replace(",",""))
print(money_to_float("$7888.00"))
#money_to_float(df['SAL-RATE'])
df['SAL-RATE'].apply(money_to_float)
df['salary']= df['SAL-RATE'].apply(money_to_float)
df.head()
# %matplotlib inline
df['salary'].hist(bins=50) #bins is 50 bars
df['ADDRESS'].value_counts()
agencies_df = pd.read_csv("cleaned-agencies.csv", dtype='str')
agencies_df.head()
col_types={'code':'str'}
agencies_df = pd.read_csv("cleaned-agencies.csv", dtype='str', true_values='Y', false_values='N')
agencies_df.head()
agencies_df = pd.read_csv("cleaned-agencies.csv", na_values="-999", true_values='Y', false_values='N')
df.merge(agencies_df)
| homework10/11-data/classwork 11_june 27_foundations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: imlpython3
# language: python
# name: imlpython3
# ---
# # Batch Job Analytics -- Master Batch Job - Elapsed Time Prediction
#
# 1. Read joined widetable data file for Master Batch Job, which generated from "2_BatchJob_MBJ_DataExploration.ipynb"
# 2. Seperate data into train & test parts
# 3. Train model with 3 different combinations
# 4. Emsemble 3 algorithm results as final result
# 5. Visualization and evaluation of test result
#
# ### Define parameters
# Define parameters indicate input path, file, split date and target column for prediction<p>
# P_FILE_PATH: path where input file widetable_MBJ.csv located<br>
# P_JOBNAME: job name, here use "MBJ" for Master Batch Job data<br>
# P_FILENAME: the file with data ready for prediction <br>
# P_SPLIT_DATE: the date to split data into train data and test data, before it for train, the date equal and later than it is for test<br>
# P_TARGET_NAME: the column name in data for predict <br>
#
# _Usage:<br>
# When you try to use this notebook to predict other date or other jobs/applications elapsed time or even other metrics, you could change split date, data file name, jobname, target name here, according to your prepared input data file. _
#
P_FILE_PATH = ''
P_JOBNAME = 'MBJ'
P_FILENAME = P_FILE_PATH+'widetable_'+P_JOBNAME+'.csv'
P_SPLIT_DATE = '2016-11-01'
P_TARGET_NAME = 'ELAPSED_MINUTE'
# ### Import necessary packages and define 3 basic functions
# - Definition of function __*"CalMape(actual, pred)"*__
# - function calculates Mean Absolute Percentage Error between actual value and predicted value
# - Definition of function __*"Eda(dataframe)"*__
# - function prints essential characteristics of a data frame
# - Definition of function __*"ConvertDate(s)"*__
# - function formats data to *'%Y-%m-%d'*
# +
import warnings
warnings.filterwarnings("ignore") #would not show warning message
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
def CalMape(actual, pred):
MAPE = []
PRED = pd.DataFrame(pred)
for i in range(len(actual)):
if actual.values[i] <= 1:
if actual.values[i]==0:
if (PRED.values[i]==0) or (PRED.values[i]==-1):
L_PE = 0
else: L_PE = 0
else:
L_PE = np.abs(actual.values[i] - PRED.values[i]) / actual.values[i]
L_PE = CutoffValue(L_PE,1,0)
else:
L_PE = np.abs(actual.values[i] - PRED.values[i]) / actual.values[i]
MAPE.append(L_PE)
return np.mean(MAPE)
# define a basic function for Exploratory Data Analysis
def Eda(dataframe):
print('---------- Running Basic Exploratory Data Analysis ------------')
print('---------------------- [ Data Head -5 ] ----------------------')
print(dataframe.head())
print('---------------------- [Data Description] ----------------------')
print(dataframe.shape)
print(dataframe.describe())
def ConvertDate(s):
return pd.to_datetime(s,format='%Y-%m-%d')
# -
# ## Step 1: Read input data for Master Batch Job elapsed time prediction
#
# Get project context
# 1. Move text edit caret into begining of below cell
# 2. Click the button in upright menu, with hint of "Create new"
# 3. Select "Insert project context"
# 4. Use new generated code to replace old code in the cell
# +
#!!Notes:# Please Prepare the project environment manualy, click the pull-down menu (v) of the Create new (+) tool and select "Insert project context".
########## The project context should like this:
########## pc = ProjectContext.ProjectContext('Batch_Job_Analytics', '2_BatchJob_MBJ_DataExploration', '***', 'xx.xx.xx.xx')
# -
#
# - Definition of column names for the data
# - Data import from .csv file
# - Data preprocessing
FILE_PATH = dsx_core_utils.get_local_dataset(pc, 'widetable_MBJ.csv')
P_FILENAME = FILE_PATH
# +
COLS = ['ELAPSED_MINUTE', 'DATE', 'YEAR', 'MONTH', 'DAY',
'WEEKDAY', 'IS_WEEKEND', 'IS_MONTHEND', 'NEXTDAY', 'LASTDAY',
'REFER_DATE', 'IS_HOLIDAY', 'IS_SPECIALDAY', 'BT01', 'BT02', 'BT03',
'BT04', 'BT05', 'BT06', 'BT07', 'BT08', 'BT09', 'BT10', 'BT11', 'BT12',
'BT13', 'BT14', 'BT15', 'BT16']
WIDETABLE = pd.read_csv(P_FILENAME,encoding="ISO-8859-1",
dtype={'YEAR':str,'MONTH':str,'DAY':str,'WEEKDAY':str,
'IS_WEEKEND':str,'IS_MONTHEND':str,'IS_HOLIDAY':str,'IS_SPECIALDAY':str},
usecols=COLS,
parse_dates=['DATE'])
print(WIDETABLE.info())
# -
WIDETABLE.head(5)
# +
#For all columns with continuous values add a new column with logarithmic scaled values each
CONTINUOUS_COLS = [ 'BT01', 'BT02', 'BT03',
'BT04', 'BT05', 'BT06', 'BT07', 'BT08', 'BT09', 'BT10', 'BT11', 'BT12',
'BT13', 'BT14', 'BT15', 'BT16']
L_COLS = CONTINUOUS_COLS.copy()
for col in (L_COLS):
col_add = col+'_ln'
WIDETABLE[col_add] = WIDETABLE[col].apply(lambda x: np.log(x) if x!=0 else 0)
CONTINUOUS_COLS.append(col_add)
# +
FEATURE_LIST = ['YEAR', 'MONTH', 'DAY',
'WEEKDAY', 'IS_WEEKEND', 'IS_MONTHEND',
'IS_HOLIDAY', 'IS_SPECIALDAY']+CONTINUOUS_COLS
TARGET_NAME = P_TARGET_NAME
FEATURE_LIST_NUM = CONTINUOUS_COLS
# -
# ## Step 2: Split data set to train and test dataset
# - Split data set in training and test set, [2016-01-01, split_date) as training data, [split_date,next 1 month) as test data
# - Weight data instances according to their age and corralation, according to some known experience from domain experts
# +
import datetime as dt
from dateutil.relativedelta import relativedelta
pd.options.mode.chained_assignment = None # default='warn'
def SplitTrainTestWithWeightSplit(widetable,split_date):
WIDETABLE_FRQ = widetable.copy()
L_DATE = pd.to_datetime(split_date)
L_1MONTH_AGO = (L_DATE+relativedelta(months=-1)).strftime("%Y-%m-%d")
L_2MONTH_AGO = (L_DATE+relativedelta(months=-2)).strftime("%Y-%m-%d")
L_3MONTH_AGO = (L_DATE+relativedelta(months=-3)).strftime("%Y-%m-%d")
L_11MONTH_AGO = (L_DATE+relativedelta(months=-11)).strftime("%Y-%m-%d")
L_12MONTH_AGO = (L_DATE+relativedelta(months=-12)).strftime("%Y-%m-%d")
L_1MONTH_LATER = (L_DATE+relativedelta(months=+1)).strftime("%Y-%m-%d")
for i in range(16):
DF = widetable[(widetable['DATE']>=L_1MONTH_AGO) & (widetable['DATE']<split_date)]
DF.loc[TARGET_NAME] = DF[TARGET_NAME].apply(lambda x:x+np.random.random())
WIDETABLE_FRQ = WIDETABLE_FRQ.append(DF,ignore_index=True)
for i in range(4):
DF = widetable[(widetable['DATE']>=L_2MONTH_AGO) & (widetable['DATE']<L_1MONTH_AGO)]
DF.loc[TARGET_NAME] = DF[TARGET_NAME].apply(lambda x:x+np.random.random())
WIDETABLE_FRQ = WIDETABLE_FRQ.append(DF,ignore_index=True)
for i in range(8):
DF = widetable[(widetable['DATE']>=L_3MONTH_AGO) & (widetable['DATE']<L_2MONTH_AGO)]
DF.loc[TARGET_NAME]=DF[TARGET_NAME].apply(lambda x:x+np.random.random())
WIDETABLE_FRQ = WIDETABLE_FRQ.append(DF,ignore_index=True)
for i in range(4):
DF = widetable[(widetable['DATE']>=L_12MONTH_AGO) & (widetable['DATE']<L_11MONTH_AGO)]
DF.loc[TARGET_NAME] = DF[TARGET_NAME].apply(lambda x:x+np.random.random())
WIDETABLE_FRQ = WIDETABLE_FRQ.append(DF,ignore_index=True)
L_WT_TRAIN = WIDETABLE_FRQ[(WIDETABLE_FRQ['DATE']<split_date) & (WIDETABLE_FRQ['DATE']>='2016-01-01')]
L_WT_TEST = WIDETABLE_FRQ[(WIDETABLE_FRQ['DATE']<L_1MONTH_LATER) & (WIDETABLE_FRQ['DATE']>=split_date)]
return L_WT_TRAIN,L_WT_TEST
WT_TRAIN,WT_TEST = SplitTrainTestWithWeightSplit(WIDETABLE,P_SPLIT_DATE)
X_train = WT_TRAIN[FEATURE_LIST]
y_train = WT_TRAIN[TARGET_NAME]
X_test = WT_TEST[FEATURE_LIST]
y_test = WT_TEST[TARGET_NAME]
# -
# ### Transform categorical features to one-hot encoding
# +
from sklearn.feature_extraction import DictVectorizer
VEC = DictVectorizer(sparse=False)
X_train = VEC.fit_transform(X_train.to_dict(orient='record'))
X_test = VEC.transform(X_test.to_dict(orient='record'))
X_train_d = pd.DataFrame(X_train,columns=VEC.feature_names_).copy()
X_test_d = pd.DataFrame(X_test,columns=VEC.feature_names_).copy()
#Print vectorized result
print(VEC.feature_names_)
print(len(VEC.feature_names_))
# -
# ### Standardize all numeric fields to z-score, and adjust outliner values to outliner bound
#
# +
from sklearn.preprocessing import StandardScaler
X_SCALER = StandardScaler()
X_TRAIN_SS = X_SCALER.fit_transform(X_train_d[FEATURE_LIST_NUM])
X_TEST_SS = X_SCALER.transform(X_test_d[FEATURE_LIST_NUM])
X_TRAIN_SS = pd.DataFrame(X_TRAIN_SS, columns=FEATURE_LIST_NUM)
X_TEST_SS = pd.DataFrame(X_TEST_SS, columns=FEATURE_LIST_NUM)
def CutoffValue(x,up_bound,down_bound):
if x>up_bound:
return up_bound
else:
if x< down_bound:
return down_bound
return x
for COL in FEATURE_LIST_NUM:
X_train_d[COL] = X_TRAIN_SS[COL].apply(lambda x: CutoffValue(x,3,-3))
X_test_d[COL] = X_TEST_SS[COL].apply(lambda x:CutoffValue(x,3,-3))
L_M = y_train.mean()
L_STD = y_train.std()
ORIGINAL_Y_TRAIN = y_train.copy()
ORIGINAL_Y_TEST = y_test.copy()
y_train_d = y_train.apply(lambda x: CutoffValue(x,L_M+3*L_STD,L_M-3*L_STD))
y_test_d = y_test.apply(lambda x: CutoffValue(x,L_M+3*L_STD,L_M-3*L_STD))
y_train = y_train_d.copy()
y_test = y_test_d.copy()
# -
# ## Start to build model
#
# - training dataset: X_train_d, y_train
# - test dataset: X_test_d, y_test
# - feature selection: percentile selection
#
# Try 3 combinations from below algorithms
#
# 1. Lasso
# 2. Random Forest
# 3. GradientBoostingRegressor
#
# Combine with methods of
# 1. Feature Selection
# 2. cross validation
# 3. grid search for parameters
#
# Ensemble result from 3 models by mean
#
#
# +
# set some calculator used in pipeline for feature selection, cross validation
import shutil
import tempfile
import time
from sklearn import feature_selection
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.externals.joblib import Memory
from sklearn import linear_model
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
CV = KFold(3) # cross-validation generator for model selection
CACHEDIR = tempfile.mkdtemp()
MEM = Memory(cachedir=CACHEDIR, verbose=False)
VT = feature_selection.VarianceThreshold()
# Anova univariate feature selection
F_REGRESSION = MEM.cache(feature_selection.f_regression)
ANOVA = feature_selection.SelectPercentile(F_REGRESSION)
#RES_LOG is a dataframe to record evaluation results for various algorithm, for train dataset or test dataset
RES_LOG = pd.DataFrame(columns=['Algorithm','Train_Test','RMSE','R2_Score','MAPE'])
#res_train record predicted result for train dataset, in specific algorithm
RES_TRAIN_PRED = pd.DataFrame(index=y_train.index)
#res_test record predicted result for test dataset, in specific algorithm
RES_TEST_PRED = pd.DataFrame(index=y_test.index)
# -
# Definition of function __*"EvalResult(algname,test,actual,predicted)"*__
# - function prints and returns evaluation metrics comparing actual and predicted values
def EvalResult(algname,test,actual,predicted):
print('\n',algname)
print('================================================================================================')
print("Root Mean Squared Error: %.2f" % np.sqrt(mean_squared_error(actual, predicted)))
# Explained variance score: 1 is perfect prediction
print('R2 score: %.2f' % r2_score(actual, predicted))
DF = pd.DataFrame({'Actual':actual,'Predicted':predicted,'Date':test['DATE']})
DF.index = DF['Date']
DF.drop('Date',axis=1,inplace=True)
print(' MAPE=%.3f'% CalMape(actual,predicted))
return RES_LOG.append({'Algorithm':algname[:len(algname)-5],
'Train_Test':algname[len(algname)-5:],
'RMSE':np.sqrt(mean_squared_error(actual, predicted)),
'R2_Score':r2_score(actual, predicted),
'MAPE':CalMape(actual,predicted)},ignore_index=True)
# Definition of function __*"PlotFeatureScores(scores,names)"*__
# - function plots the relevanz of features in sorted order
def PlotFeatureScores(scores,names):
NAMED_SCORES = zip(names, scores)
SORTED_NAMED_SCORES = sorted(NAMED_SCORES, key=lambda z: z[1], reverse=True)
SORTED_NAMED_SCORES = SORTED_NAMED_SCORES[0:20]
SORTED_NAMED_SCORES = sorted(SORTED_NAMED_SCORES, key=lambda z: z[1], reverse=False)
SORTED_SCORES = [each[1] for each in SORTED_NAMED_SCORES]
SORTED_NAMES = [each[0] for each in SORTED_NAMED_SCORES]
Y_POS = np.arange(len(SORTED_NAMES))
plt.figure(figsize=(20,12))
plt.barh(Y_POS,SORTED_SCORES,color='b',align='center',alpha=0.5)
plt.yticks(Y_POS,SORTED_NAMES)
plt.xlabel('Scores')
plt.title('Variable Importance')
# ### Combination 1: Lasso Regression
# Using Grid Search Cross-Validation to get optimal parameters automatically
# +
from sklearn.linear_model import Lasso
LASSO = Lasso(random_state=0)
PL = Pipeline([('lasso', LASSO)])
PL = GridSearchCV(PL, {'lasso__alpha': [0.1,0.5,1,10]}, cv=CV,verbose=False)
PL.fit(X_train_d, y_train) # set the best parameters
Y_TRAIN_PRED = PL.predict(X_train_d)
Y_TEST_PRED = PL.predict( X_test_d)
print(PL.best_params_)
print(PL.best_score_)
RES_LOG = EvalResult('Lasso train',WT_TRAIN,y_train,Y_TRAIN_PRED)
RES_LOG = EvalResult('Lasso test',WT_TEST,y_test,Y_TEST_PRED)
#Select this combination's result as part of final result
RES_TEST_PRED['Lasso'] = Y_TEST_PRED
RES_TRAIN_PRED['Lasso'] = Y_TRAIN_PRED
# +
# Plot Feature Importance for LASSO
LASSO = Lasso(random_state=0,alpha=0.5)
LASSO.fit(X_train_d,y_train)
SCORES = np.abs(LASSO.coef_)
SCORES4 = SCORES/np.max(SCORES)
PlotFeatureScores(SCORES4,X_train_d.columns)
# -
# ### Combination 2: Random Forest Regressor
# Using Feature Selection to reduce dimension and Grid Search Cross-Validation to get optimal parameters automatically
# +
# RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
RF = RandomForestRegressor(max_depth=2, random_state=0,n_estimators=10)
PL = Pipeline([ ('anova', ANOVA), ('rf', RF)])
# Select the optimal percentage of features with grid search
PL = GridSearchCV(PL, {'anova__percentile': [30,40,50,60,70,80,90,100],'rf__max_depth':[4,5,6]}, cv=CV,verbose=False)
PL.fit(X_train_d, y_train) # set the best parameters
print(PL.best_params_)
print(PL.best_score_)
Y_TRAIN_PRED = PL.predict(X_train_d)
Y_TEST_PRED = PL.predict(X_test_d)
RES_LOG = EvalResult('RandomForestRegressor train',WT_TRAIN,y_train,Y_TRAIN_PRED)
RES_LOG = EvalResult('RandomForestRegressor test',WT_TEST,y_test,Y_TEST_PRED)
#Select this combination's result as part of final result
RES_TRAIN_PRED['RandomForest'] = Y_TRAIN_PRED
RES_TEST_PRED['RandomForest'] = Y_TEST_PRED
# -
# Plot feature importance for Random Forest Regressor
# +
RF = RandomForestRegressor(max_depth=6, random_state=0,n_estimators=10)
RF.fit(X_train_d, y_train)
SCORES = RF.feature_importances_
SCORES3 = SCORES/np.max(SCORES)
PlotFeatureScores(SCORES3,X_train_d.columns)
# -
# ### Combination 3: Feature Selection + Gradient Boosting Regressor
# Using Feature Select to reduce dimension and Grid Search Cross-Validation to get optimal parameters automatically
#
# +
from sklearn.ensemble import GradientBoostingRegressor
PARAMS = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
GBR = GradientBoostingRegressor(**PARAMS)
PL = Pipeline([ ('anova', ANOVA), ('gbr', GBR)])
PL = GridSearchCV(PL, {'anova__percentile': [60,70,80],'gbr__max_depth':[2,3,4],'gbr__n_estimators':[300,400]}, cv=CV,verbose=False)
PL.fit(X_train_d, y_train) # set the best parameters
print(PL.best_params_)
print(PL.best_score_)
Y_TEST_PRED = PL.predict(X_test_d)
Y_TRAIN_PRED = PL.predict(X_train_d)
RES_LOG = EvalResult('GradientBoostingRegressor train',WT_TRAIN,y_train,Y_TRAIN_PRED)
RES_LOG = EvalResult('GradientBoostingRegressor test',WT_TEST,y_test,Y_TEST_PRED)
RES_TRAIN_PRED['GradientBoostingRegressor'] = Y_TRAIN_PRED
RES_TEST_PRED['GradientBoostingRegressor'] = Y_TEST_PRED
# -
# ## Step 4: Visualization of prediction result
#
#predicted result of test dataset
RES_TEST_PRED
#result evaluation for all 3 combinations on train and test
RES_LOG
#result evaluation for all 3 combinations on train, sorted by RMSE
RES_LOG[RES_LOG['Train_Test']=='train'].sort_values(by=['RMSE'])
#result evaluation for all 3 combinations on test, sorted by RMSE
RES_LOG[RES_LOG['Train_Test']==' test'].sort_values(by=['RMSE'])
#
# ### Draw predicted results on test data
# +
#Draw prediction result of test dataset for all combinations
def DrawResults(test,actual,dfRes,colors):
DF = pd.DataFrame({'Actual':actual,'Date':test['DATE']})
for i in range(len(dfRes.columns)):
DF[dfRes.columns[i]]=dfRes[dfRes.columns[i]]
DF.index = DF['Date']
DF.drop('Date',axis=1,inplace=True)
print(DF)
# Plot outputs
plt.figure(figsize=(8,8))
ax1 = plt.subplot(111)
ax1.plot(DF['Actual'], 'blue')
for i in range(len(dfRes.columns)):
ax1.plot(DF[dfRes.columns[i]],colors[i])
ax1.legend()
ax1.set_xlabel('Date', fontsize = 16)
ax1.set_ylabel('Batch Minute', fontsize =16)
plt.xticks(rotation='vertical')
plt.show()
plt.clf()
plt.cla()
plt.close()
return
DrawResults(WT_TEST,ORIGINAL_Y_TEST,RES_TEST_PRED,['purple','g','y'])
# -
# ## Step 5: Ensemble predicted results
#
# Apply 3 algorithms to predict result, consider linear and unlieaner characteristics of MBJ elapsed time we observed in Data Exploration.
#
# 1. GradientBoosting
# 2. LASSO
# 3. Random Forest
#
# Get the __*mean()*__ of them as result output, use RMSE of training data as __*stderr*__ to calculate upper bound and lower bound
#
#
# +
#Draw ensembled predicted result with upper bound and lower bound
def EvalResultRange(algname,test,actual,predicted,std,times=3):
print(algname)
print('================================================================================================\n')
print("Root Mean Squared Error: %.2f" % np.sqrt(mean_squared_error(actual, predicted)))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(actual, predicted))
DF = pd.DataFrame({'Actual':actual,'Predicted':predicted,'Date':test['DATE'],
'Upper_Bound':predicted+times*std,'Lower_Bound':predicted-times*std})
DF.index = DF['Date']
DF.drop('Date',axis=1,inplace=True)
DF['Lower_Bound'][DF['Lower_Bound']<0]=0
print(DF)
# Plot outputs
plt.figure(figsize=(8,18))
ax1 = plt.subplot(211)
ax1.plot(DF['Actual'], 'b')
ax1.plot(DF['Predicted'],'r')
ax1.plot(DF['Upper_Bound'],'r--')
ax1.plot(DF['Lower_Bound'],'r--')
ax1.set_ylim([100,280])
ax1.legend()
ax1.set_xlabel('Date', fontsize = 16)
ax1.set_ylabel('Batch Minute', fontsize =16)
plt.xticks(rotation='vertical')
ax2 = plt.subplot(212)
ax2.set_xlim([0,280])
ax2.set_ylim([0,280])
ax2.scatter(actual, predicted, color='black')
L = max(actual)
X = np.arange(0.0, L, 1)
ax2.plot(X,X,'r')
ax2.set_xlabel('Actual', fontsize = 16)
ax2.set_ylabel('Predicted', fontsize = 16)
plt.show()
plt.clf()
plt.cla()
plt.close()
print(algname,' MAPE=%.3f'% CalMape(actual,predicted))
return
from sklearn.metrics import mean_squared_error
from math import sqrt
RES_FINAL_TRAIN = pd.DataFrame()
RES_FINAL_TRAIN['mean'] = RES_TRAIN_PRED.mean(axis=1)
RMSE = sqrt(mean_squared_error(y_train, RES_FINAL_TRAIN['mean']))
RES_FINAL_TEST = pd.DataFrame()
RES_FINAL_TEST['mean'] = RES_TEST_PRED.mean(axis=1)
RES_FINAL_TEST['std'] = RMSE
EvalResultRange('Ensambled result',WT_TEST,ORIGINAL_Y_TEST,RES_FINAL_TEST['mean'],RES_FINAL_TEST['std'],3)
| Notebook/3_BatchJob_MBJ_Prediction.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + tags=["remove-cell"]
library(repr) ; options(repr.plot.width = 5, repr.plot.height = 6) # Change plot sizes (in cm)
# -
# # Model Fitting using Maximum Likelihood
# ## Introduction
#
#
# In this Chapter we will work through various examples of model fitting to biological data using Maximum Likelihood. It is recommended that you see the [lecture](https://github.com/mhasoba/TheMulQuaBio/tree/master/content/lectures/ModelFitting) on model fitting in Ecology and Evolution.
#
# [Previously](./20-ModelFitting-NLLS.ipynb), we learned how to fit a mathematical model/equation to data by using the Least Squares method (linear or nonlinear). That is, we choose the parameters of model being fitted (e.g., straight line) to minimize the sum of the squares of the residuals/errors around the fitted model.
#
# An alternative to minimizing the sum of squared errors is to find parameters to the function such that the * likelihood * of the parameters, given the data and the model, is maximized. Please see the [lectures](https://github.com/vectorbite/VBiTraining2/tree/master/lectures) for the theoretical background to the following examples.
#
# We will first implement the (negative log) likelihood for [simple linear regression (SLR)](./14-regress.ipynb) in R. Recall that SLR assumes every observation in the dataset was generated by the model:
#
# $$
# Y_i = \beta_0 + \beta_1 X_i + \varepsilon_i, \;\;\; \varepsilon_i \stackrel{\mathrm{iid}}{\sim} \mathrm{N}(0, \sigma^2)
# $$
#
# That is, this is a model for the * conditional distribution * of $Y$ given $X$. The pdf for the normal distribution is given by
#
# $$
# f(x) = \frac{1}{\sqrt{2\sigma^2 \pi}} \exp\left(-\frac{(x-\mu)^2}{2\sigma^2} \right)
# $$
#
# In the SLR model, the conditional distribution has * this * distribution.
#
# That is, for any single observation, $y_i$
# $$
# f(y_i|\beta_0, \beta_1, x_i) = \frac{1}{\sqrt{2\sigma^2 \pi}} \exp\left(-\frac{(y_i-(\beta_0+\beta_1 x_i))^2}{2\sigma^2} \right)
# $$
#
# Interpreting this function as a function of the parameters $\theta=\{ \beta_0, \beta_1, \sigma \}$, then it gives us the likelihood of the $i^{\mathrm{th}}$ data point.
#
# As we did for the simple binomial distribution (see [lecture](https://github.com/vectorbite/VBiTraining2/tree/master/lectures)), we can use this to estimate the parameters of the model.
# We will use R. For starters, clear all variables and graphic devices and load necessary packages:
rm(list = ls())
graphics.off()
# ## Implementing the Likelihood in R
#
# First, we need to build an R function that returns the (negative log) likelihood for simple linear regression (it is negative log because the log of likelihood is itself negative):
nll.slr <- function(par, dat, ...){
args <- list(...)
b0 <- par[1]
b1 <- par[2]
X <- dat$X
Y <- dat$Y
if(!is.na(args$sigma)){
sigma <- args$sigma
} else
sigma <- par[3]
mu <- b0+b1 * X
return(-sum(dnorm(Y, mean=mu, sd=sigma, log=TRUE)))
}
# Note that we do something a bit different here (the "`...`" bit). We do it this way because we want to be able to use R's `optim()` function later.
#
# The `dnorm()` function calculates the logged (the `log=TRUE` argument) probability of observing Y given mu, sigma and that X.
#
# The negative sign on `sum()` is because the `optim()` function in R will minimize the negative log-likelihood, which is a sum: Recall that The log-likelihood of the parameters $\theta$ being true given data x equals to the sum of the logged probability densities of observing the data x given parameters $\theta$. We want to maximize this (log-) likelihood using `optim()`.
# Let's generate some simulated data, assuming that: $\beta_0=$ `b0`, $\beta_1=$ `b1`, and $\sigma=$ `sigma`. For this, we will generate random deviations to simulate sampling or measurement error around an otherwise perfect line of data values:
set.seed(123)
n <- 30
b0 <- 10
b1 <- 3
sigma <- 2
X <- rnorm(n, mean=3, sd=7)
Y <- b0 + b1 * X + rnorm(n, mean=0, sd=sigma)
dat <- data.frame(X=X, Y=Y) # convert to a data frame
# In the first line, we `set.seed()` to ensure that we can reproduce the results. The seed number you choose is the starting point used in the generation of a sequence of random numbers. No plot the "data":
plot(X, Y)
# ### Likelihood profile
#
# For now, let's assume that we know what $\beta_1$ is. Let's build a likelihood profile for the simulated data:
N <- 50
b0s <- seq(5, 15, length=N)
mynll <- rep(NA, length=50)
for(i in 1:N){
mynll[i] <- nll.slr(par=c(b0s[i],b1), dat=dat, sigma=sigma)
}
# That is, we calculate the negative log-likelihood for fixed b1, across a range (5 - 15) of b0.
# Now plot the profile:
plot(b0s, mynll, type="l")
abline(v=b0, col=2)
abline(v=b0s[which.min(mynll)], col=3)
# The true value for b0 (10) is the red line, while the value that minimizes the log-likelihood (i.e., maximizes the negative log-likelihood) is the green line. These are not the same because maximum likelihood is providing an * estimate * of the true value given the measurement errors (that we ourselves generated in tgis synthetic dataset).
# (ModelFitting-MLE-LikelihoodSurface)=
# ### Likelihood surface
#
# If we wanted to estimate both $\beta_0$ and $\beta_1$ (two parameters), we need to deal with a two-dimensional maximum likelihood surface. The simplest approach is to do a *grid search* to find this likelihood surface.
# +
N0 <- 100
N1 <- 101
b0s <- seq(7,12, length=N0)
b1s <- seq(1,5, length=N1)
mynll <- matrix(NA, nrow=N0, ncol=N1)
for(i in 1:N0){
for(j in 1:N1) mynll[i,j] <- nll.slr(par=c(b0s[i],b1s[j]), dat=dat, sigma=sigma)
}
ww <- which(mynll==min(mynll), arr.ind=TRUE)
b0.est <- b0s[ww[1]]
b1.est <- b1s[ww[2]]
rbind(c(b0, b1), c(b0.est, b1.est))
filled.contour(x = b0s, y = b1s, z= mynll, col=heat.colors(21),
plot.axes = {axis(1); axis(2); points(b0,b1, pch=21);
points(b0.est, b1.est, pch=8, cex=1.5); xlab="b0"; ylab="b1"})
# -
# There is a lot going on here. Make sure you ask one of us if some of the code does not make sense!
#
# Again, note that the true parameter combination (asterisk) and the one what maximizes the negative log-likelihood (circle) are different.
# ### Conditional Likelihood
# We can also look at the conditional surfaces (i.e., we look at the slice around whatever the best estimate is for the other parameter):
par(mfrow=c(1,2), bty="n")
plot(b0s, mynll[,ww[2]], type="l", xlab="b0", ylab="NLL")
plot(b1s, mynll[ww[1],], type="l", xlab="b1", ylab="NLL")
# ## Alternatives to Grid Search
#
# There are many alternative methods to grid searches. Since we are seeking to minimize an arbitrary function (the negative log likelihood) we typically use a descent method to perform general optimization.
#
# There are lots of options implemented in the `optim`function in R. We won't go into the details of these methods, due to time constraints. However, typically one would most commonly use:
#
# * Brent's method: for 1-D search within a bounding box, only
# * L-BFGS-B (limited-memory Broyden–Fletcher–Goldfarb–Shanno algorithm with bounding box constraints): a quasi-Newton method, used for higher dimensions, when you want to be able to put simple limits on your search area.
#
#
# ## Maximum Likelihood using `optim()`
#
# We can now do the fitting. This involves optimization (to find the appropriate parameter values that achieve the maximum of the likelihood surface above). For this, we will use R's versatile `optim()` function.
#
# The first argument for `optim()` is the function that you want to minimize, and the second is a vector of starting values for your parameters (as always, do a`?optim`). After the main arguments, you can add what you need to evaluate your function (e.g. `sigma` ). The addtional argument sigma can be "fed" to `nll.slr` because we use the `...` convention when defining it.
# +
fit <- optim(nll.slr, par=c(2, 1), method="L-BFGS-B", ## this is a n-D method
lower=-Inf, upper=Inf, dat=dat, sigma=sigma)
fit
# -
# Easy as pie (once you have the recipe)! We can also fit sigma as the same time if we want:
fit <- optim(nll.slr, par=c(2, 1, 5), method="L-BFGS-B", ## this is a n-D method
lower=c(-Inf, -Inf, 0.1), upper=Inf, dat=dat, sigma=NA)
fit$par
# The starting values (b0 = 2, b1 = 1, sigma = 5) need to be assigned as we would do for NLLS. Also note that much like NLLS, we have bounded the parameters. The exact starting values are not too important in this case (try changing them see what happens).
# Now visualize the fit:
plot(X, Y)
abline(a=fit$par[1], b=fit$par[2], col=2, lwd=2)
# ### Confidence intervals
#
# The joint distribution of the MLEs are asymptotically Normally distributed. Given this, if you are minimizing the negative log likelihood (NLL) then the covariance matrix of the estimates is (asymptotically) the inverse of the Hessian matrix. The Hessian matrix evalutes the second derivatives of the NLL (numerically here), which gives us information about the curvature the likelihood. Thus we can use the Hessian to estimate confidence intervals:
# +
fit <- optim(nll.slr, par=c(2, 1), method="L-BFGS-B", hessian=TRUE, lower=-Inf, upper=Inf, dat=dat, sigma=sigma)
fisher_info <- solve(fit$hessian)
est_sigma <- sqrt(diag(fisher_info))
upper <- fit$par+1.96 * est_sigma
lower <- fit$par-1.96 * est_sigma
interval <- data.frame(value=fit$par, upper=upper, lower=lower)
interval
# -
# ### Comparison to fitting with least squares
#
# We can, of course, simply fit the model with lest squares using the `lm()` function:
# +
lmfit <- lm(Y~X)
summary(lmfit)$coeff
# -
# The estimates we get using `optim()` are almost identical to the estimates that we obtain here, and the standard errors on the intercept and slope are very similar to those we calculated from the Hessian (est_sigma= `r est_sigma`).
# ## Model Selection
#
# You can use [AIC or BIC as you did in NLLS](#Comparing-models) using the likelihood you have calculated.
#
# You can also use the Likelihood Ratio Test (LRT).
#
# ## Exercises <a id='MLE_Exercises'></a>
#
# Try MLE fitting for the allometric trait data example [above](#Allometric-scaling-of-traits). You will use the same data + functions that you used to practice fitting curves using non-linear least squares methods. You have two options here. The easier one is to convert the power law model to a straight line model by taking a log (explained the Allometry [Exercises](#Allom_Exercises). Specifically,
#
# (a) Using the [`nll.slr`](#Implementing-the-Likelihood-in-R) function as an example, write a function that calculates the negative log likelihood as a function of the parameters describing your trait and any additional parameters you need for an appropriate noise distribution (e.g., $\sigma$ if you have normally distributed errors).
#
# (b) For at least one of your parameters plot a likelihood profile given your data, with the other parametes fixed.
#
# (c) Use the `optim` function to find the MLE of the same parameter and indicate this on your likelihood profile.
#
# (d) Obtain a confidence interval for your estimate.
#
# A more challenging option is to fit the allometry data directly to the power law equation. You would need to assume a log-normal distribution for the errors instead of normal, in this case.
# Readings and Resources <a id='Readings'></a>
# --------------------------------------------
# * <NAME>. Ecological models and data in R. (Princeton University Press, 2008).
| content/notebooks/4-ModelFitting-MLE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
df = pd.read_table('sp_abs15_tab_mmsplice_vcf_dbscSNV_chr.results')
import pandas as pd
import numpy as np
ft.dropna().head()
ft = ft.applymap(abslute)
ft = df[['mmsplice_delta_logit_psi']].applymap(tanh)
def tanh(x):
try:
x = abs(float(x))
return np.tanh(x)
except:
return x
def abslute(x):
try:
return abs(x)
except:
return x
df['mmsplice_delta_logit_psi_norm'] = ft
df.dropna().head()
df[df.mmsplice_delta_logit_psi_norm != '-'].dropna
df.ada_score.describe()
df.to_csv('sp_abs15_tab_mmsplice_vcf_dbscSNV_chr.norm.txt',sep='\t', index=False)
| notebooks/mmsplice_tanh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decision Trees
#
# In machine learning gli alberi decisionali sono strutture basati su due concetti chiavi:
#
# - nodi: ovvero oggetti in grado di contenere informazioni
# - link: collegamenti tra nodi che permettono di esprimere una possibile relazione
#
# nel caso dei modelli decisional trees essi sono costituiti da questi tipi di elementi:
#
# - I nodi che applicano una condizione sulle feature
# - Branch o Edges che sono il risultato della condizione nei nodi e collegano i nodi tra di essi
# - Nodi leaf che sono dei nodi terminali in cui l'albero prevede il risultato finale
#
# Per comprendere meglio possiamo rappresentare un albero decisionale usando un modello grafico a nodi.
#
# <div class="alert alert-block alert-warning">
# La libreria che userò è <a href="https://graphviz.org/">graphivz</a> per installarlo digitare nel anaconda prompt: <b>conda install python-graphviz, potrebbe essere necessario anche conda install graphviz</b>.
# <\div>
# +
from graphviz import Digraph
#create the graph
gra = Digraph()
#create the nodes
gra.node('first node condition', shape='box')
gra.node('left second node condition', shape='box')
gra.node('right second node condition', shape='box')
gra.node('output 1')
gra.node('output 2')
gra.node('output 3')
gra.node('output 4')
#creates the links
gra.edge('first node condition', 'left second node condition', label='True')
gra.edge('first node condition', 'right second node condition', label='False')
gra.edge('left second node condition', 'output 1', label='True')
gra.edge('left second node condition', 'output 2', label='False')
gra.edge('right second node condition', 'output 3', label='True')
gra.edge('right second node condition', 'output 4', label='False')
#save graph
gra.render(filename="tree_example", directory="../img", cleanup= True)
#show graph, decomment next line to show it
gra
# -
#
# Come possiamo vedere il modello ad albero applica **delle condizioni sulle feature che vengono scelte in base alla metrica per ottenere con il minor numero di condizioni l'output desiderato riducendone quanto più possibile l'inacuratezza**.<br>
# Un particolare proprietà da sottolineare è il fatto che ***l'albero può essere utilizzato per fare classificazione o regressione*** per distiguenrli si definisce il:
#
# - Decision Tree : albero decisionale in grado di classificare
# - Decision Regression Tree: lbero decisionale in graado di fare una regressione
#
# Dei parametri che di solito si usano:
#
# - max depth : ovvero la profondità dell'albero per nodi impiegati
# - criterion : il criterio usato per scegliere la feature su cui applicare la condizione con il rispettivo threshold
#
# Sono presenti molti altri parametri oltre ad essi, potete consutarli sia per __[decision tree regression](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html#sklearn.tree.DecisionTreeRegressor)__ che per __[decision tree classifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier)__.
#
# Prima di vedere le applicazioni di questi metodi, vediamo alcuni vantaggi e svantaggi.<br>
#
# **Vantaggi**:
#
# - facili da comprendere e interpretare nelle loro decisioni
# - computazionalmente non troppo complesso
# - utile anche in casi di più output
#
# **Svantaggi:**
#
# - Prono all'overfitting
# - sono instabili poiché se i dati hanno piccole variazioni dal modello originale l'albero sarà diverso
# - se ci sono classi dominanti l'albero può esserne fortemente influenzato
#
# Ci sono molti altri punti che possono essere consultati da __[scikit](https://scikit-learn.org/stable/modules/tree.html)__ tra cui anche i possibili algoritmi di implementazione.
# ## Applicazione nei dataset già visti
#
# Ora che abbiamo visto come sono fatti applichiamoli ai problemi di regressione e classificazione.
# <div class="alert alert-block alert-warning">
# Purtroppo per esportare il pdf i grafici dei tree verranno messi con latex, per farveli venire dovete decommentare le righe dopo #plot the tree.
# </div>
# +
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree, export_graphviz, DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
#regression data
boston = load_boston()
X_boston, y_boston = boston.data, boston.target
#classification data
diabetes = pd.read_csv('../data/diabetes2.csv')
X_diabetes, y_diabetes = diabetes.drop('Outcome', axis = 1).values, diabetes.Outcome.values
feature_diabetes = diabetes.columns.values[:-1]
target_names = ["Not Diabetes", "Diabetes"]
#divide the data in training and testing
X_boston_train, X_boston_test, y_boston_train, y_boston_test = train_test_split(
X_boston, y_boston, random_state=0, test_size = 0.2)
X_diabetes_train, X_diabetes_test, y_diabetes_train, y_diabetes_test = train_test_split(
X_diabetes, y_diabetes, random_state=0, test_size = 0.2)
#create the decision tree regressor
regr = DecisionTreeRegressor(max_depth=3)
regr.fit(X_boston_train, y_boston_train)
#plot the tree, decomment later this to block to show it in cell
plt.figure(figsize=(16,10))
plot_tree(regr, feature_names = boston.feature_names)
plt.title("Decision Tree Regression on boston house prices")
#export the tree
#plt.savefig("../img/boston_tree.pdf")
plt.show()
print("-"*80)
print(f'R^2 score on training : {regr.score(X_boston_train, y_boston_train)}')
print(f'R^2 score on testing : {regr.score(X_boston_test, y_boston_test)}')
print("-"*80)
plt.figure(figsize=(16,10))
plt.xticks(range(X_boston.shape[1]), boston.feature_names)
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.title('Feature importance')
plt.bar(range(X_boston.shape[1]), regr.feature_importances_)
plt.show()
#create the decision tree classifier
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X_diabetes_train, y_diabetes_train)
#plot the tree
plt.figure(figsize=(16,10))
plot_tree(clf, feature_names = feature_diabetes)
plt.title("Decision Tree Classifier on diabetes")
#plt.savefig("../img/daibetes_tree.pdf")
plt.show()
plot_confusion_matrix(clf, X_diabetes_test, y_diabetes_test, display_labels=target_names)
plt.title("Confusion matrix of classification")
plt.show()
plt.figure(figsize=(16,10))
plt.xticks(range(X_diabetes.shape[1]), feature_diabetes)
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.title('Feature importance')
plt.bar(range(X_diabetes.shape[1]), clf.feature_importances_)
plt.show()
# -
#
# Per sapere tutto di cosa interpretare consultate questa __[documentazione](https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html#sphx-glr-auto-examples-tree-plot-unveil-tree-structure-py)__.
#
# ## Metodi ensemble
#
# I problemi prima citati dei decision trees possono essere in qualche modo risolti utilizzando delle tecniche che sono usate anche per altri tipi di modelli sono i __[metodi ensemble](https://scikit-learn.org/stable/modules/ensemble.html)__, in cui la d'idea può essere riassunta come segue: **se un singolo modello fallisce, è possibile creare più modelli anche non dello stesso tipo e poi unire le loro previsioni** il punto è ***come combinare i modelli e su quale principio si basano?***<br>
# Qualora consultaste il link appena mostrato potreste capire che i metodi ensemble si dividono in due tipi:
#
# - averaging method: si creano diversi modelli indipendenti e si applica un tipo di media sulle previsioni di essi (questo in genere riduce la varianza)
# - boosting method: partendo da un modello di base se ne creano di altri in modo da ridurne l'errore complessivo
#
# Vediamolo per alcuni modelli di decision tree.
#
# ### Averaging methods on decision tree
#
# #### Random Forest
#
# Il termine __[Random Forest](https://en.wikipedia.org/wiki/Random_forest)__ si riferisce a un insieme di alberi decisionali in cui le regole di splitting sono ottenute facendo in prima battuta un __[bootstrap sample](https://www.analyticsvidhya.com/blog/2020/02/what-is-bootstrap-sampling-in-statistics-and-machine-learning/)__, ovvero noi prendiamo una piccola quantità del dataset randomicamente applichiamo il modello e ritorniamo il sample all'interno del dataset(questo tecnica di bootstrap con replacement è detta __[bagging](https://en.wikipedia.org/wiki/Bootstrap_aggregating)__) da cui poi ne estraiamo un altro fino ad avere un numero di sample sufficiente e su ognuno di esse applichiamo il modello, ***per il nome random forest quello che succede è che da ogni sample noi estraiamo una feature randomica e poi otteniamo un threshold in grado di massimizzare la capacità di classificazione***, quindi **l'effetto randomico è apllicato sulla selezione delle feature**.<br>
# Nella __[implementazione scikit del classificatore](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier)__ e la sua __[documentazione](https://scikit-learn.org/stable/modules/ensemble.html#forests-of-randomized-trees)__ potete trovare tutti i parametri e i dettagli, quello che a noi interessa è ***n_estimator che è il numero di alberi decisionali, bootstrap che dice di applicare il bagging e il __[oob_score](https://en.wikipedia.org/wiki/Out-of-bag_error)__ (out-of-bag score) in cui una parte dei sample viene usata per valutare la capacità di generalizzazione del modello***.
#
# #### Extra Trees
#
# Il termine __[Extra Trees](https://scikit-learn.org/stable/modules/ensemble.html#forest)__ si riferisce sempre ad un altro metodo di ensemble averaging in cui la randomicità dell'albero è più accentuata rispetto alle Random forest, come spiegato __[qui](https://quantdare.com/what-is-the-difference-between-extra-trees-and-random-forest/)__, in sintesi quello che succede è che **gli extra trees non solo scelgono una feature randomica per ogni nodo, ma anche il threshold è scelto da una serie di valori randomica per il threshold tennedo conto solo del migliore a differenza delle Random Forest che scelgono il threshold migliore dal dataset**.
#
# Sono presenti altri algoritmi come sempre consiltate la __[guida](https://scikit-learn.org/stable/modules/ensemble.html#forest)__.
#
# ### Boosting methods on decision trees
#
# #### Adaboost
#
# Con __[Adaboost](https://scikit-learn.org/stable/modules/ensemble.html#adaboost)__ si intende un algoritmo chiamato Adaptive Boosting in cui sono inizializzato un numero definito di modelli come decision trees, ma potrebbero anche essere altri, in cui dei i modelli "deboli" ovvero modelli superiori solo leggermente al random guessing nella loro capacità di predire sono combinati. ***Ad ogni iterazione dell'algoritmo si associa un peso agli esempi per capire quali sono significativi e si usa un peso per la capacità di predizione del singolo e verrà aggiornato come segue, se i modelli sono incapaci di predire un particolare dato il peso associato al dato sarà aumentato o nel caso opposto abbassato, mentre i modelli in grado di predire correttamente avranno il loro peso nella predizione aumentato.***<br>
# Per una spiegazione completa rimando al __[link di <NAME>](http://www.andreaminini.com/ai/machine-learning/adaboost)__.
#
# #### Gradient Tree Boosting
#
# Detto anche gradient boosted decision tree(GBDT) si basa su **una metrica di loss function che deve essere minimizzata attraverso la creazione di nuovi modelli**.
# Al fine di essere precisi potete immaginarvi di definire un primo DecisionTree in grado di classificare con una certo errore ovvero, ipotizziamo che abbiamo un primo modello detto $F_0$ in grado di classificare con un errore $\epsilon_0$, l'obiettivo dell'algoritmo sarà di creare un nuovo albero $F_1$ in grado di combinare la predizione dell'albero precedemnte minimizzando la loss function ovvero $F_1 = F_0 + h_1(x)$ con $h_1 = argmin_{w} L_1$ in maniera tale da avere un errore $\epsilon_1 < \epsilon_0$ e ripetere fino ad ottenere l'errore desiderata, per info sull' implementazione matematica guardate __[qui](https://scikit-learn.org/stable/modules/ensemble.html#mathematical-formulation)__.
#
# <div class="alert alert-block alert-success">
# Di recente scikit ha aggiunto anche un altro tipo di ensemble boosting detto <a href="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html#sklearn.ensemble.HistGradientBoostingClassifier">histgradientboosting</a> basato su <a href = "https://papers.nips.cc/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html"> LightGBM </a> molto più veloce del classico Gradient boosting.
# </div>
# +
import time
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier
#wherever is required put that classes must be balanced to avoid biases on one class
clfs = [AdaBoostClassifier(), ExtraTreesClassifier(class_weight="balanced"),
RandomForestClassifier(class_weight="balanced"), GradientBoostingClassifier()]
#classifiers fitting
for clf in clfs:
start = time.time()
clf.fit(X_diabetes_train, y_diabetes_train)
end = time.time()
print(f"Time taken to train {str(clf).replace('()','')}: {end - start}s \t")
#confusion matrixes plots
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(16,10))
for clf, ax in zip(clfs, axes.flatten()):
plot_confusion_matrix(clf,
X_diabetes_test,
y_diabetes_test,
ax=ax,
display_labels=target_names,
colorbar= False)
ax.title.set_text(str(clf).replace('()',''))
plt.tight_layout()
plt.show()
#feature importance bar plots
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(16,10))
#let's abbreviate the feature names
feature_diabetes[2] = "BP" #BloodPressure
feature_diabetes[3] = "ST" #SkinTickness
feature_diabetes[6] = "DBF" #DiabetesPedigreeFunction
for clf, ax in zip(clfs, axes.flatten()):
ax.bar(range(X_diabetes.shape[1]), clf.feature_importances_)
ax.set_xlabel('Feature')
ax.set_ylabel('Importance')
ax.title.set_text(str(clf).replace('()',''))
ax.set_xticks(range(X_diabetes.shape[1]))
ax.set_xticklabels(feature_diabetes)
plt.tight_layout()
plt.show()
#lets use a classification report
for clf in clfs:
print(f"Classification report of {str(clf).replace('()','')}\n")
print(classification_report(y_diabetes_test, clf.predict(X_diabetes_test),
target_names=target_names))
# -
#
# <div class="alert alert-block alert-success">
# I metodi ensemble e i decision tree possiedono anche funzioni come <b> predict_proba()</b> in cui il modello fornisce anche la probabilità di appartenenza alle classi
# </div>
#
# ***
#
# COMPLIMENTI AVETE FINITO LA LEZIONE SUI DECISION TREE!
| 3.machine learning/5-Decision_Trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
sys.stderr = open(snakemake.log[0], "w")
import pandas as pd
from pathlib import Path
import pysam
min_fraction = snakemake.params.get("min_fraction", 0.01)
quant = pd.read_csv(Path(snakemake.input.quant) / "abundance.tsv", sep="\t")
# calculate total number of reads
with pysam.FastxFile(snakemake.input.fq1) as fq1:
total_counts = sum(1 for _ in fq1)
# calculate fraction
quant["fraction"] = quant["est_counts"] / total_counts
# clean up dataframe
quant = quant[["target_id", "fraction", "est_counts"]]
# summarize noise
other = quant.loc[quant["fraction"] < min_fraction, ["fraction", "est_counts"]].sum()
other["target_id"] = "other"
other.name = "other"
# filter dataframe and add noise row
quant = quant.loc[quant["fraction"] >= min_fraction].append(other)
# add unmapped row
quant = quant.append({
"target_id": "unmapped",
"fraction": 1.0 - quant["fraction"].sum(),
"est_counts": total_counts - quant["est_counts"].sum(),
}, ignore_index=True)
quant = quant.set_index("target_id", drop=True)
# store results
quant.to_csv(snakemake.output[0], sep="\t")
| workflow/notebooks/call-strains.py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: EnergyDemandForecast local
# language: python
# name: energydemandforecast_local
# ---
# # XGBoost (eXtreme Gradient Boosting)
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBRegressor
import pickle
import os
import scipy.stats as st
from itertools import combinations
from azure.storage.blob import BlockBlobService
# Use the Azure Machine Learning data collector to log various metrics
from azureml.logging import get_azureml_logger
logger = get_azureml_logger()
model_name = "xgboost"
aml_dir = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY']
ACCOUNT_NAME = "petcgexperimentstorage"
ACCOUNT_KEY = "<KEY>
CONTAINER_NAME = "energy-demand-demo"
block_blob_service = BlockBlobService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY)
train = None
list_of_blobs = block_blob_service.list_blobs(CONTAINER_NAME)
for each_blob in list_of_blobs:
if ("part-00" in each_blob.name):
block_blob_service.get_blob_to_path(CONTAINER_NAME, blob_name=each_blob.name, file_path='./nyc_demand.csv')
train = pd.read_csv('./nyc_demand.csv')
break
# Use randomised search to find optimal hyperparameters. This model will take about 5 minutes to train. The quality of the solution may be improved by increasing the number of iterations in the randomized search, at the expense of increased training times.
X = train.drop(['demand', 'timeStamp'], axis=1)
regr = XGBRegressor()
tscv = TimeSeriesSplit(n_splits=3)
param_dist = {'n_estimators': st.randint(3, 100),
'learning_rate': st.uniform(0.05, 0.4),
'max_depth': range(3,40),
'min_child_weight': st.expon(0, 40),
'colsample_bytree': st.beta(10, 1),
'gamma': st.uniform(0, 10),
'subsample': st.beta(10, 1),
'reg_alpha': st.expon(0, 40)
}
regr_cv = RandomizedSearchCV(estimator=regr,
param_distributions=param_dist,
n_iter=50,
cv=tscv,
scoring='neg_mean_squared_error',
verbose=2,
n_jobs=-1)
regr_pipe = Pipeline([('regr_cv', regr_cv)])
regr_pipe.fit(X, y=train['demand'])
# Cross validation results
cv_results = pd.DataFrame(regr_pipe.named_steps['regr_cv'].cv_results_)
cv_results.sort_values(by='rank_test_score', inplace=True)
cv_results.head()
# Inspect the pairwise distribution of cross validation scores. Darker shades of blue indicate superior performance while the red star represents the optimal solution found.
params = ['param_n_estimators', 'param_max_depth', 'param_min_child_weight', 'param_colsample_bytree', 'param_subsample']
gs = gridspec.GridSpec(4,3)
fig = plt.figure(figsize=(15, 20), tight_layout=True)
plt_best = cv_results.head(1)
for idx, params in enumerate(combinations(params, 2)):
ax = fig.add_subplot(gs[idx])
plt.scatter(cv_results[params[0]], cv_results[params[1]], c=-np.log(-cv_results['mean_test_score']), s=100, cmap="Blues")
plt.scatter(plt_best[params[0]], plt_best[params[1]], s=500, marker="*", c="r")
plt.xlabel(params[0])
plt.ylabel(params[1])
# Inspect feature importance
feature_importance = pd.DataFrame.from_dict({'feature':X.columns, 'importance':regr_pipe.named_steps['regr_cv'].best_estimator_.feature_importances_})
feature_importance.plot.bar('feature', 'importance', figsize=(15,5), logy=False, title='Feature importance', legend=False)
plt.show()
model_dir = "C:/Users/nelgoh/Desktop/Resources/Petronas/energy_demand_forecast/EnergyDemandForecast/outputs/models/"
with open(os.path.join(model_dir, model_name + '.pkl'), 'wb') as f:
pickle.dump(regr_pipe, f)
| 8-xgboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
# Voxelize a Surface Mesh
# =======================
#
# Create a voxel model (like legos) of a closed surface or volumetric
# mesh.
#
# This example also demonstrates how to compute an implicit distance from
# a bounding `pyvista.PolyData`{.interpreted-text role="class"} surface.
#
# +
# sphinx_gallery_thumbnail_number = 2
from pyvista import examples
import pyvista as pv
import numpy as np
# Load a surface to voxelize
surface = examples.download_foot_bones()
surface
# +
cpos = [(7.656346967151718, -9.802071079151158, -11.021236183314311),
(0.2224512272564101, -0.4594554282112895, 0.5549738359311297),
(-0.6279216753504941, -0.7513057097368635, 0.20311105371647392)]
surface.plot(cpos=cpos, opacity=0.75)
# -
# Create a voxel model of the bounding surface
#
# +
voxels = pv.voxelize(surface, density=surface.length/200)
p = pv.Plotter()
p.add_mesh(voxels, color=True, show_edges=True, opacity=0.5)
p.add_mesh(surface, color="lightblue", opacity=0.5)
p.show(cpos=cpos)
# -
# We could even add a scalar field to that new voxel model in case we
# wanted to create grids for modelling. In this case, let\'s add a scalar
# field for bone density noting:
#
voxels["density"] = np.full(voxels.n_cells, 3.65) # g/cc
voxels
voxels.plot(scalars="density", cpos=cpos)
# A constant scalar field is kind of boring, so let\'s get a little
# fancier by added a scalar field that varies by the distance from the
# bounding surface.
#
voxels.compute_implicit_distance(surface, inplace=True)
voxels
# +
contours = voxels.contour(6, scalars="implicit_distance")
p = pv.Plotter()
p.add_mesh(voxels, opacity=0.25, scalars="implicit_distance")
p.add_mesh(contours, opacity=0.5, scalars="implicit_distance")
p.show(cpos=cpos)
| examples/01-filter/voxelize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import math
import numpy as np
import os
import random
import tensorflow as tf
from data.tiny_imagenet.read_tiny_imagenet import TinyImageNet
# +
import torch
from torch.utils import data
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from torchsummary import summary
torch.cuda.empty_cache()
# -
path ="E:/code/adapted_deep_embeddings/data/tiny_imagenet/tiny-imagenet-200"
dataset = TinyImageNet(path)
# +
# dataset.load(path)
# for i in os.walk('data/tiny_imagenet/tiny-imagenet-200'):
# print(i)
# -
(x_train, y_train), (x_test, y_test) = dataset.kntl_data_form(350, 5, 10, 5)
# +
np.random.seed(1)
shuffle = np.random.permutation(len(y_train))
batch_size = 32
learning_rate = 0.005
IMG_SIZE = 224 # All images will be resized to 160x160
def format_example(image):
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image
x_train=format_example(x_train)
x_test=format_example(x_test)
# -
print(type(y_test))
print(type(y_train))
print(type(x_test))
print(type(x_train))
# +
x_train = tf.Session().run(x_train)
# y_train = tf.Session().run(y_train)
x_test = tf.Session().run(x_test)
# y_test = tf.Session().run(y_test)
# -
x = x_train.transpore(2,0,1)
x.shape
# +
x_train = torch.Tensor(x_train) # transform to torch tensor
y_train = torch.Tensor(y_train)
x_test = torch.Tensor(x_test) # transform to torch tensor
y_test = torch.Tensor(y_test)
train_data = data.TensorDataset(x_train,y_train) # create your datset
test_data = data.TensorDataset(x_test,y_test) # create your datset
# +
train_dataloader = data.DataLoader(train_data, batch_size=32, num_workers=4, shuffle=True)
test_dataloader = data.DataLoader(test_data, batch_size=32, num_workers=4, shuffle=True)
dataloaders = {'train':train_dataloader, 'val':test_dataloader}
# -
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# +
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.cuda.empty_cache()
# del x_train, x_test
del y_train, y_test
# +
torch.cuda.empty_cache()
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# -
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=5)
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import gurobipy as gp
from gurobipy import GRB
from itertools import product
from math import sqrt
import numpy as np
import random as rd
import copy
def read_data(file_name):
edge = []
with open(file_name) as f:
data = f.readlines()
_,p,v = data[0].replace('\n','').split(' ')
for i in data[1:]:
if i == '\n':
break
line_data = i.split(' ')
edge.append((int(line_data[1]),int((line_data[2]).replace('\n', ''))))
return edge,int(p),int(v)
edge,p,v = read_data('text.txt')
m = gp.Model('max-cut')
x = m.addVars(p,vtype=GRB.BINARY,name='x')
e = m.addVars(len(edge),vtype=GRB.BINARY,name='edge')
m.addConstrs(e[j] <= x[i[0]-1] + x[i[1]-1] for j,i in enumerate(edge))
m.addConstrs(e[j] <= 2 -x[i[0]-1] - x[i[1]-1] for j,i in enumerate(edge))
m.setObjective(gp.quicksum(e[i] for i in range(len(e))),GRB.MAXIMIZE)
m.optimize()
def ini_solution(p):
"""
node's number
"""
solution = []
for i in range(p):
solution.append(rd.randint(0,1))
return solution
solution = ini_solution(p)
def get_obj(solution):
obj = 0
for i in range(len(solution)):
for a,b in edge:
if a == i:
if solution[b] != solution[i]:
obj += 1
return obj
print(get_obj(solution))
def ls(solution):
for i in range(1000):
new_solution = copy.deepcopy(solution)
j = rd.randint(0,len(solution)-1)
obj = get_obj(new_solution)
if new_solution[j]==1:
new_solution[j]=0
else:
new_solution[j]=1
new_obj = get_obj(new_solution)
if new_obj>obj:
solution = new_solution
print(new_obj)
return solution
class ga_solution:
obj = 0
solution = []
def __init__(self,p):
self.solution = ini_solution(p)
self.obj = get_obj(self.solution)
def mutation(self):
j = rd.randint(0, len(self.solution)-1)
if self.solution[j] == 1:
self.solution[j] = 0
else:
self.solution[j] = 1
self.obj = get_obj(self.solution)
def update(self):
self.obj = get_obj(self.solution)
solution = ga_solution(p)
print(solution.obj)
# +
def crossover(solutions):
p = rd.randint(0, len(solutions)-1)
q = rd.randint(0, len(solutions)-1)
i = rd.randint(0,len(solutions[p].solution)-1)
j = rd.randint(0,len(solutions[p].solution)-1)
if i > j:
i,j = j,i
mid = solutions[p].solution[i:j]
solutions[p].solution[i:j] = solutions[q].solution[i:j]
solutions[q].solution[i:j] = mid
solutions[p].update()
solutions[q].update()
return solutions
# -
def select(solutions):
"""
轮盘赌算法
"""
new_solutions = []
obj_sum = sum([i.obj for i in solutions])
perc = [i.obj/obj_sum for i in solutions]
for i in range(len(solutions)-1):
rn = rd.random()
for j in range(len(perc)-1):
if rn >= sum(perc[:j]):
new_solutions.append(solutions[j])
return new_solutions
# +
def ga(solutions):
for i in range(100):
solutions = crossover(solutions)
solutions = select(solutions)
for j in range(10):
t = rd.randint(0,len(solutions)-1)
print(solutions[t].obj)
solutions[t].mutation()
p_mean = sum(i.obj for i in solutions)/len(solutions)
print(p_mean)
# +
solutions = []
for i in range(20):
solution = ga_solution(p)
solutions.append(solution)
ga(solutions)
# -
| MIP_for_max_cut.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="_QW4fQyQ4vQb"
from sklearn.cluster import KMeans #K-Means clustering.
import pandas as pd #Package with multiple methods of data filtering.
from sklearn.preprocessing import MinMaxScaler #Transform features by scaling each feature to a given range.
from matplotlib import pyplot as plt #For plottting data in multiple ways
#To render a figure in a notebook
# %matplotlib inline
# + id="7n68C2lm5Yqq" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="4c3cd04f-1c70-4be4-9db2-dbe6c2785685"
X = pd.read_csv('/content/drive/MyDrive/ML DataSet/Updated_product_des.csv') #Read dataset
X1 = X.head(150) #Store top 150 rows of the dataset
X1 #Print
# + colab={"base_uri": "https://localhost:8080/", "height": 399} id="eIXl66F08Zr_" outputId="73548ff1-3a21-4cfa-dbe9-afec545326da"
X2 = X1[["Product_Id","Ratings"]]#Assign matrix to X2 containing just Prouduct Id and Ratings.
X2
# + id="x0fxkdE54xeu" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="91efdd58-3ceb-46e3-caf7-5cba79dc91eb"
plt.scatter(X2['Product_Id'],X2['Ratings']) #Plots ratings vs ProductId
# + id="NUS9BlA7439k" colab={"base_uri": "https://localhost:8080/"} outputId="16981331-839a-4cf9-9ddd-bbe1274e910c"
km = KMeans(n_clusters=5) #CLusters of 5 because we have ratings out of 5.
km
# + id="3AxdYm3s46v2" colab={"base_uri": "https://localhost:8080/"} outputId="0522be9d-dd7e-41c8-ed8f-e0ef47c901ab"
y = km.fit_predict(X2[['Ratings']]) #Assign values in array form based on ratings.
y
# + id="_Nnt_BX448ru" colab={"base_uri": "https://localhost:8080/", "height": 506} outputId="0a5dea7a-4d87-4507-c996-8955614a6d06"
X2['cluster'] = y #Adds a column named 'cluster'
Data1=X2[["Product_Id","Ratings","cluster"]] #Storing X2 in series named Data1
Data1 #To display
# + colab={"base_uri": "https://localhost:8080/", "height": 315} id="giTKv9fbfE9n" outputId="5a87ff6a-7aa6-4d35-84cc-f975e4314d86"
# Assign values to variable Y based on the cluster.
Y1=X2[X2.cluster==0]
Y2=X2[X2.cluster==1]
Y3=X2[X2.cluster==2]
Y4=X2[X2.cluster==3]
Y5=X2[X2.cluster==4]
#Assigns a particular color to each color.
plt.scatter(Y1.Product_Id,Y1['Ratings'], color='green')
plt.scatter(Y2.Product_Id,Y2['Ratings'], color='red')
plt.scatter(Y3.Product_Id,Y3['Ratings'], color='blue')
plt.scatter(Y4.Product_Id,Y4['Ratings'], color='purple')
plt.scatter(Y5.Product_Id,Y5['Ratings'], color='orange')
#Plot
plt.xlabel('Product Id')
plt.ylabel('Ratings')
plt.legend()
# + id="0z71oePygMpy"
#User-Built function that displays similar products based on user input
def show_recommendations(id):
out1 = X2.query("Product_Id == @id")
final = out1.Ratings
out2 = X2.loc[X2['Ratings'].isin(final)]
return out2[["Product_Id","Ratings"]]
# + colab={"base_uri": "https://localhost:8080/", "height": 663} id="R2WJ7Xi23UBZ" outputId="18ab663c-9c3a-4676-c04b-d83fa68e06bb"
show_recommendations("B000179R3I")
| Code/Clustering_RecommendationBasedOn_ProuductId_Ratings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Determine os valores possível para $a$, para que o sistema abaixo seja compatível.</b>
# $M = \begin{cases}
# x + y - az = 0 \\
# ax + y - z = 2 - a \\
# x + ay - z = -a
# \end{cases} $
# $\left ( \begin{array}{rcr}
# 1 & 1 & -a & 0 \\
# a & 1 & -1 & 2 - a \\
# 1 & a & -1 & -a
# \end{array} \right )$
# $L3 - L1 \rightarrow L3$
# $\left ( \begin{array}{rcr}
# 1 & 1 & -a & 0 \\
# a & 1 & -1 & 2 - a \\
# 0 & a - 1 & a - 1 & -a
# \end{array} \right )$
# $L2 - aL1 \rightarrow L2$
# $\left ( \begin{array}{rcr}
# 1 & 1 & -a & 0 \\
# 0 & 1 - a & -1 + a^2 & 2 - a \\
# 0 & a - 1 & a - 1 & -a
# \end{array} \right )$
# $L3 + L2 \rightarrow L3$
# $\left ( \begin{array}{rcr}
# 1 & 1 & -a & 0 \\
# 0 & 1 - a & -1 + a^2 & 2 - a \\
# 0 & 0 & -2 + a + a^2 & 2 -2a
# \end{array} \right )$
# $-2 + a + a^2 = 2 - 2a$
# $a^2 + a - 2 - 2 - 2a = 0$
# $a^2 -a - 4 = 0 $
# $\delta = 1^2 - 4 \cdot 1 \cdot (-4)$
# $\delta = 9$
# $\frac{-1 + 3}{2} = 1$
# $\frac{1 + 3}{2} = 2$
# $1 - a - 1 + a^2 = 2 - a$
# $-a + a^2 = 2 - a$
# $a^2 - 2 = 0$
# $a^2 = 2$
# $a = \sqrt{2}$
# $2 - a \neq 0$
# $a \neq 2$
| Lista 03/08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, sys
sys.path.append(os.getcwd())
from PIL import Image
import random
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
# -
class Dataset():
def __init__(self,mode):
path = 'F:\\torch_projects\\data\\Retinal\\train.txt' if mode=='train' else 'F:\\torch_projects\\data\\Retinal\\train.txt'
self.data = []
transform = []
transform.append(T.CenterCrop(560))
transform.append(T.Resize(500))
transform.append(T.ToTensor())
transform = T.Compose(transform)
with open(path, 'r') as file_to_read:
while(1):
line = file_to_read.readline() # 整行读取数据
if not line:
break
image = Image.open('F:\\torch_projects\\data\\Retinal\\'+line.strip('\n'))
self.data.append(transform(image))
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
train_loader = data.DataLoader(dataset=Dataset('train'),
batch_size=1,
shuffle=True)
| tflib/trial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_anaconda3)
# language: python
# name: conda_anaconda3
# ---
# # Pattern quantification
# ### *Arothron* species, RD model (uni, in silico hybrids)
# +
# %load_ext autoreload
# %autoreload 1
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
from quant import *
# %aimport quant
# %matplotlib inline
# -
# ### Pattern quantification (*Arothron* species)
# +
df_Aro = pd.read_csv("Aro.csv", index_col=0)
Ls = [quant.quant(np.asarray(Image.open("Aro/"+id+"_mono.png")))[0]
for id in df_Aro.id]
PCSs = [quant.quant(np.asarray(Image.open("Aro/"+id+"_mono.png")))[1]
for id in df_Aro.id]
df_Aro['lightness'] = Ls
df_Aro['complexity'] = PCSs
df_Aro.head()
# -
# ### RD model (sim_uni)
# +
df_uni_p = pd.read_csv("uni_p.csv", index_col=0)
df_uni_p['id'] = ['uni_{:03d}'.format(i) for i in df_uni_p.index]
df_uni_p['species'] = 'sim_uni'
df_uni_p['specimen'] = [fb.split("/")[2] for fb in df_uni_p.file_base]
df_uni = df_uni_p[['id',
'species',
'specimen',
'lightness',
'complexity']]
df_uni.head()
# -
# ### in silico hybrids (sim_ish)
# +
df_ish = pd.read_csv("ish.csv", index_col=0)
df_ish['id'] = ['ish_{:03d}'.format(i) for i in df_ish.index]
df_ish['species'] = 'sim_ish'
df_ish['specimen'] = [fb.split("/")[2] for fb in df_ish.file_base]
df_ish = df_ish[['id',
'species',
'specimen',
'lightness',
'complexity']]
df_ish.head()
# -
# ### Save to a csv file
# +
df_all = pd.concat([df_Aro, df_uni, df_ish],
ignore_index=True,
sort=False)
df_all.to_csv("Aro_uni_ish.csv")
# -
# ### Plotting
df = pd.read_csv("Aro_uni_ish.csv", index_col=0)
df.iloc[[0, 1, 120, 121, 565, 566], :]
# +
df['color']='black'
cud_red='#FF2800'
cud_yellow='#f0e442' #'#FAF500'
cud_green='#009E73' #'#35A16B'
cud_blue='#0072B2' #'#0041FF'
cud_skyblue='#66CCFF'
cud_pink='#FF99A0'
cud_orange='#FF9900'
cud_purple='#9A0079'
cud_brown='#663300'
# gold='#FFD700'
sns_deep_blue=sns.color_palette('deep').as_hex()[0]
sns_deep_green=sns.color_palette('deep').as_hex()[2]
sns_deep_red=sns.color_palette('deep').as_hex()[3]
sns_deep_purple=sns.color_palette('deep').as_hex()[4]
sns_deep_yellow=sns.color_palette('deep').as_hex()[8]
sns_deep_lightblue=sns.color_palette('deep').as_hex()[9]
sns_dark_yellow=sns.color_palette('dark').as_hex()[8]
sns_bright_yellow=sns.color_palette('bright').as_hex()[8]
df.loc[df['species']=='nig', ['color']] = sns_deep_blue
df.loc[df['species']=='ste', ['color']] = sns_deep_green
df.loc[df['species']=='map', ['color']] = sns_bright_yellow #'goldenrod'
df.loc[df['species']=='car', ['color']] = 'seagreen'
df.loc[df['species']=='mul', ['color']] = 'chocolate'
df.loc[df['species']=='ret', ['color']] = 'orange'
df.loc[df['species']=='fir', ['color']] = sns_deep_red
df.loc[df['species']=='mel', ['color']] = sns_deep_purple
df.loc[df['species']=='his', ['color']] = sns_deep_yellow
df.loc[df['species']=='sim_uni', ['color']] = 'lightskyblue'
# -
df_Aro = df[0: 120]
df_uni = df[120: 565]
df_ish = df[565:965]
# +
sns.set('notebook')
sns.set_style("ticks")
plt.figure(figsize=(18, 7))
plt_sc = []
sp_full = ["hispidus",
"meleagris",
"firmamentum",
"reticularis",
"multilineatus",
"carduus",
"mappa",
"stellatus",
"nigropunctatus"]
with plt.style.context('seaborn-deep'):
plt_sc.append(plt.scatter(df_uni['lightness'],
df_uni['complexity'],
color=df_uni['color'],
s=50,
alpha=0.5,
label="model"))
numS = 200
for idx, sp in enumerate(["his",
"mel",
"fir",
"ret",
"mul",
"car",
"map",
"ste",
"nig"]):
df_sp=df[df['species']==sp]
# sci_name_label = "$\it{"+sp+"}$"
# sci_name_label = "$\it{"+sp_full[idx]+"}$"
sci_name_label = "$\it{"+sp_full[idx]+"}$ ($\it{"+sp+")}$"
if ( sp=="car" or sp=="mul"):
plt_sc.append(plt.scatter(df_sp['lightness'],
df_sp['complexity'],
color=df_sp['color'],
marker="x",
linewidth=7.0,
s=numS,
label=sci_name_label))
elif ( sp=="map" ):
plt_sc.append(plt.scatter(df_sp['lightness'],
df_sp['complexity'],
color=df_sp['color'],
marker="x",
linewidth=7.0,
s=numS,
alpha=0.8,
label=sci_name_label))
elif ( sp=="ste" or sp=="nig" ):
plt_sc.append(plt.scatter(df_sp['lightness'],
df_sp['complexity'],
color=df_sp['color'],
marker="s",
s=numS,
alpha=0.9,
label=sci_name_label))
elif ( sp=="ret" ):
plt_sc.append(plt.scatter(df_sp['lightness'],
df_sp['complexity'],
color=df_sp['color'],
marker="o",
s=numS,
alpha=0.9,
label=sci_name_label))
else:
plt_sc.append(plt.scatter(df_sp['lightness'],
df_sp['complexity'],
color=df_sp['color'],
marker="o",
s=numS,
alpha=0.9,
label=sci_name_label))
plt_sc[idx].axes.set_xlim(-0.02, 1.05)
plt_sc[idx].axes.set_ylim(0.06, 1.02)
plt.xlabel("Overall color tone", fontsize=28)
plt.ylabel("Pattern complexity", fontsize=28)
plt.tick_params(axis='both', which='major', labelsize=20)
leg = plt.legend(bbox_to_anchor=(1.07, 0.5),
loc='center left',
ncol=1,
frameon=False,
fontsize=24)
plt.tight_layout()
plt.show()
# plt.savefig("quant_scatter.pdf", transparent = True)
# -
df_Aro_ish = pd.concat([df_Aro, df_ish], sort=False)
df_Aro_ish
# +
sp_order = ['his',
'mel',
'fir',
'ret',
'mul',
'car',
'map',
'ste',
'nig',
'sim_ish']
df_Aro_ish["species"] = pd.Categorical(df_Aro_ish["species"], sp_order)
df_Aro_ish = df_Aro_ish.sort_values(by=["species"])
# +
sns.set('notebook')
sns.set_style("ticks")
# sp_names = ['his',
sp_labels = ['his',
'mel',
'fir',
'ret',
'mul',
'car',
'map',
'ste',
'nig',
'ISH']
# sp_labels = []
# for sn in sp_names[:-1]:
# sp_labels.append("$\it{"+sn+"}$")
# sp_labels.append(sp_names[-1])
colors = ['lightsteelblue',
'lightsteelblue',
'lightsteelblue',
'lightsteelblue',
'lightpink',
'lightpink',
'lightpink',
'lightsteelblue',
'lightsteelblue',
'hotpink']
# +
plt.figure(figsize=(9, 6))
with plt.style.context('seaborn-deep'):
plt_bx = sns.boxplot(x="species",
y="lightness",
data=df_Aro_ish,
linewidth=2.0,
palette=colors)
plt_bx.axes.set_xlim(-0.6, 9.6)
plt_bx.axes.set_ylim(0.0, 1.05)
plt_bx.set_xticklabels(sp_labels, fontsize=28, fontstyle='italic', rotation='vertical')
plt_bx.set_xlabel("")
plt_bx.set_ylabel("Overall color tone", fontsize=30)
plt.tick_params(axis='y', which='major', labelsize=18)
plt.tight_layout()
plt.show()
# plt.savefig("quant_lightness.pdf", transparent = True)
# +
plt.figure(figsize=(9, 6))
with plt.style.context('seaborn-deep'):
plt_bx = sns.boxplot(x="species",
y="complexity",
data=df_Aro_ish,
linewidth=2.0,
palette=colors)
plt_bx.axes.set_xlim(-0.6, 9.6)
plt_bx.axes.set_ylim(0.1, 0.95)
plt_bx.set_xticklabels(sp_labels, fontsize=28, fontstyle='italic', rotation='vertical')
plt_bx.set_xlabel("")
plt_bx.set_ylabel("Pattern complexity", fontsize=30)
plt_bx.set_yticks([0.2, 0.4, 0.6, 0.8])
plt.tick_params(axis='y', which='major', labelsize=18)
plt.tight_layout()
plt.show()
# plt.savefig("quant_complexity.pdf", transparent = True)
# -
| sim-quant/quant_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
df=pd.read_csv("D:/UW/Guilherme Dashboard/Machine Learning Model/expdata.csv", encoding = "ISO-8859-1", engine='python')
df.info()
df.head()
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
train, test = train_test_split(df, test_size=0.5, random_state=111, stratify = df.Churn)
x = df.columns[df.columns!="Churn"]
y = "Churn"
train_x = train[x]
train_y = train[y]
test_x = test[x]
test_y = test[y]
#function for model fitting
def churn_prediction(algo, training_x, training_y, testing_x, testing_y, cols, cf = 'coefficients'):
algo.fit(training_x,training_y)
predictions = algo.predict(testing_x)
probabilities = algo.predict_proba(testing_x)[:,1]
#coeffs
if cf == "coefficients":
coefficients = pd.DataFrame(algo.coef_.ravel())
elif cf == "features":
coefficients = pd.DataFrame(algo.feature_importances_)
column_df = pd.DataFrame(cols)
coef_sumry = (pd.merge(coefficients,column_df,left_index= True,
right_index= True, how = "left"))
coef_sumry.columns = ["coefficients","features"]
coef_sumry = coef_sumry.sort_values(by = "coefficients",ascending = False)
print (algo)
print ("\n Classification report : \n",classification_report(testing_y,predictions))
print ("Accuracy Score : ",accuracy_score(testing_y,predictions))
#confusion matrix
conf_matrix = confusion_matrix(testing_y,predictions)
plt.figure(figsize=(12,12))
plt.subplot(221)
sns.heatmap(conf_matrix, fmt = "d",annot=True, cmap='Blues')
plt.title('Confuion Matrix')
plt.ylabel('True Values')
plt.xlabel('Predicted Values')
#roc_auc_score
model_roc_auc = roc_auc_score(testing_y,probabilities)
print ("Area under curve : ",model_roc_auc,"\n")
fpr,tpr,thresholds = roc_curve(testing_y,probabilities)
plt.subplot(222)
plt.plot(fpr, tpr, color='darkorange', lw=1, label = "Auc : %.3f" %model_roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.subplot(212)
sns.barplot(x = coef_sumry["features"] ,y = coef_sumry["coefficients"])
plt.title('Feature Importances')
plt.xticks(rotation="vertical")
plt.show()
# +
param_grid1 = {'max_features':['auto', 'sqrt', 'log2', None],
'n_estimators':[300, 500, 700, 900, 1100, 1300]
}
rf_model = RandomForestClassifier()
grid1 = GridSearchCV(estimator=rf_model, param_grid=param_grid1, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid1.fit(train_x, train_y)
# -
grid1.best_estimator_
# +
dt = pd.DataFrame(grid1.cv_results_)
dt.param_max_features = dt.param_max_features.astype(str)
dt.param_n_estimators = dt.param_n_estimators.astype(str)
table = pd.pivot_table(dt, values='mean_test_score', index='param_n_estimators',
columns='param_max_features')
sns.heatmap(table)
# -
grid1.best_score_
# +
param_grid2 = {'max_features':['auto'],
'n_estimators':[1000, 1100, 1200],
'criterion': ['entropy', 'gini'],
'max_depth': [7, 9, 11, 13, 15, None],
}
rf_model = RandomForestClassifier()
grid2 = GridSearchCV(estimator=rf_model, param_grid=param_grid2, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid2.fit(train_x, train_y)
# -
grid2.best_estimator_
# +
dt = pd.DataFrame(grid2.cv_results_)
table = pd.pivot_table(dt, values='mean_test_score', index='param_max_depth',
columns='param_criterion')
sns.heatmap(table)
# -
grid2.best_score_
# +
param_grid2_2 = {'max_features':['auto'],
'n_estimators':[950, 1000, 1050],
'criterion': ['entropy'],
'max_depth': [10, 11, 12],
}
rf_model = RandomForestClassifier()
grid2_2 = GridSearchCV(estimator=rf_model, param_grid=param_grid2_2, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid2_2.fit(train_x, train_y)
# -
grid2_2.best_score_
# +
param_grid3 = {'max_features':['auto'],
'n_estimators':[1000],
'criterion': ['entropy'],
'max_depth': [10],
'min_samples_leaf': [1, 3, 5, 7],
'min_samples_split': [2, 4, 6, 8]
}
rf_model = RandomForestClassifier()
grid3 = GridSearchCV(estimator=rf_model, param_grid=param_grid3, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid3.fit(train_x, train_y)
# -
grid3.best_estimator_
# +
dt = pd.DataFrame(grid3.cv_results_)
table = pd.pivot_table(dt, values='mean_test_score', index='param_min_samples_leaf',
columns='param_min_samples_split')
sns.heatmap(table)
# -
grid3.best_score_
# +
param_grid4 = {'class_weight':[{0:1, 1:1}, {0:1, 1:2}, {0:1, 1:3}],
'max_features':['auto'],
'n_estimators':[1000],
'criterion': ['entropy'],
'max_depth': [10],
'min_samples_leaf': [1],
'min_samples_split': [8]
}
rf_model = RandomForestClassifier()
grid4 = GridSearchCV(estimator=rf_model, param_grid=param_grid4, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid4.fit(train_x, train_y)
# -
grid4.best_estimator_
# +
dt = pd.DataFrame(grid4.cv_results_)
dt.param_class_weight = dt.param_class_weight.astype(str)
table = pd.pivot_table(dt, values='mean_test_score', index='param_class_weight')
sns.heatmap(table)
# -
grid4.best_score_
model = RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight={0: 1, 1: 2},
criterion='entropy', max_depth=10, max_features='auto',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=8,
min_weight_fraction_leaf=0.0, n_estimators=1000,
n_jobs=None, oob_score=False, random_state=None,
verbose=0, warm_start=False)
churn_prediction(model, train_x, train_y, test_x, test_y, x,"features")
train_scores = cross_val_score(model, train_x, train_y, cv = 5, scoring='f1')
train_scores
np.mean(train_scores)
import pickle
pickle.dump(model, open('model.pkl','wb'))
# +
import shap
shap.initjs()
import joblib
# +
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(np.array(test_x.iloc[0]))
shap.force_plot(explainer.expected_value[1], shap_values[1], test_x.iloc[0])
# -
# Saving Explainer
ex_filename = 'explainer.bz2'
joblib.dump(explainer, filename=ex_filename, compress=('bz2', 9))
explainer = joblib.load(filename="explainer.bz2")
shap_values = explainer.shap_values(np.array(test_x.iloc[0]))
shap.force_plot(explainer.expected_value[1], shap_values[1], list(test_x.columns), matplotlib = True, show = False).savefig('static/images/shap.png', bbox_inches="tight")
# +
from matplotlib.patches import Circle, Wedge, Rectangle
def degree_range(n):
start = np.linspace(0,180,n+1, endpoint=True)[0:-1]
end = np.linspace(0,180,n+1, endpoint=True)[1::]
mid_points = start + ((end-start)/2.)
return np.c_[start, end], mid_points
def rot_text(ang):
rotation = np.degrees(np.radians(ang) * np.pi / np.pi - np.radians(90))
return rotation
def gauge(labels=['LOW','MEDIUM','HIGH','EXTREME'], \
colors=['#007A00','#0063BF','#FFCC00','#ED1C24'], Probability=1, fname=False):
N = len(labels)
colors = colors[::-1]
"""
begins the plotting
"""
fig, ax = plt.subplots()
ang_range, mid_points = degree_range(4)
labels = labels[::-1]
"""
plots the sectors and the arcs
"""
patches = []
for ang, c in zip(ang_range, colors):
# sectors
patches.append(Wedge((0.,0.), .4, *ang, facecolor='w', lw=2))
# arcs
patches.append(Wedge((0.,0.), .4, *ang, width=0.10, facecolor=c, lw=2, alpha=0.5))
[ax.add_patch(p) for p in patches]
"""
set the labels (e.g. 'LOW','MEDIUM',...)
"""
for mid, lab in zip(mid_points, labels):
ax.text(0.35 * np.cos(np.radians(mid)), 0.35 * np.sin(np.radians(mid)), lab, \
horizontalalignment='center', verticalalignment='center', fontsize=14, \
fontweight='bold', rotation = rot_text(mid))
"""
set the bottom banner and the title
"""
r = Rectangle((-0.4,-0.1),0.8,0.1, facecolor='w', lw=2)
ax.add_patch(r)
ax.text(0, -0.05, 'Churn Probability ' + np.round(Probability,2).astype(str), horizontalalignment='center', \
verticalalignment='center', fontsize=22, fontweight='bold')
"""
plots the arrow now
"""
pos = (1-Probability)*180
ax.arrow(0, 0, 0.225 * np.cos(np.radians(pos)), 0.225 * np.sin(np.radians(pos)), \
width=0.04, head_width=0.09, head_length=0.1, fc='k', ec='k')
ax.add_patch(Circle((0, 0), radius=0.02, facecolor='k'))
ax.add_patch(Circle((0, 0), radius=0.01, facecolor='w', zorder=11))
"""
removes frame and ticks, and makes axis equal and tight
"""
ax.set_frame_on(False)
ax.axes.set_xticks([])
ax.axes.set_yticks([])
ax.axis('equal')
plt.tight_layout()
if fname:
fig.savefig(fname, dpi=200)
# -
gauge(Probability=model.predict_proba(test_x.iloc[0:1])[0,1])
| Churn Prediction Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A very short notebook to convert EnzymeML Spreadsheets to EnzymeML documents
# With a small peek into the timecourse data (?)
# ### imports
# +
# API
import requests
import json
# Operating system for files
import os
# PyEnzyme
from pyenzyme.enzymeml.tools import EnzymeMLReader
# Visualization
import seaborn as sns
import matplotlib.pyplot as plt
# global Variables
enzmldoc = ''
# Visu
#basic/general settings
sns.set_theme(style="whitegrid", palette ='bright',color_codes=True, context = 'notebook')
# -
# ### preparing the API
api_url = "http://127.0.0.1:5000/"
# convert xlsm to omex
endpoint_convert = f"{api_url}/template/convert"
# read
endpoint_read = f"{api_url}/read"
# ### Some functions
# +
def reqConvert(path_xlsm, name_xlsm):
'''
API request to convert EnzymeML spreadsheet (xlsm) to EnzymeML document (omex)
Args:
path_xlsm (string): path to directory with spreadsheet
name_xlsm (string): name of spreadsheet, with file extension .xlsm
Returns:
response_status_code (string): status code of response, 200 everything fine, 500 Error
response_content (string): binary-formated EnzymeML document
'''
payload={}
files=[
('xlsm',(name_xlsm, open(os.path.join(path_xlsm,name_xlsm),'rb'),'application/octet-stream'))
]
headers = {}
response = requests.request("POST", endpoint_convert, headers=headers, data=payload, files=files)
return [response.status_code, response.content]
def writeBinary(path, name, inBinary):
'''
Writes a binary file like an EnzymeML document (.omex)
Args:
path (string): path to directory
name (string): filename, for an EnzymeML document the file extension must be .omex
inBinary (string): string with binary content of file
'''
f = open(os.path.join(path,name), 'wb')
f.write(inBinary)
f.close()
# -
# ## Convert Spreadsheet to EnzymeML document
# #### define path and name
# +
path = './datasets'
name_xlsm = 'ApPDC.xlsm'
reqOMEX = reqConvert(path, name_xlsm)
if reqOMEX[0]==200:
omexName = os.path.splitext(name_xlsm)[0]+'.omex'
writeBinary(path, omexName, reqOMEX[1])
print('EnzymeML document created: '+omexName)
enzmldoc = EnzymeMLReader().readFromFile(os.path.join(path, omexName))
else:
print("conversion failed. Error: " + str(reqOMEX[0]))
# -
# #### Overview of EnzymeML document
print(enzmldoc)
# #### Short Visualization
# reaction ids
for id_, reac in enzmldoc.getReactionDict().items():
print(id_)
# one reaction
reaction = enzmldoc.getReaction('r0')
reaction_name = reac.getName()
reac_educts = reac.getEducts()
reac_products = reac.getProducts()
# Educts:
for reactant_id, stoich, _, replicates, init_conc in reac_educts:
# Aus dem Edukt folgt Tuple:
# Reactant ID, stoichiometry, Constant, Replikate, Initial-Konzentrationen
if len(replicates) > 0:
df = reaction.exportReplicates(reactant_id)
time_val = df.index.tolist()
time = df.index.name.split('/')[0]
time_unit = df.index.name.split('/')[1]
f, ax = plt.subplots(figsize=(7,3.5))
# Visualisation
for col in df.columns:
name = enzmldoc.getReactant(col.split('/')[1]).getName()
unit_name = enzmldoc.getReactant(col.split('/')[1]).getSubstanceUnits()
unit = enzmldoc.getUnitDict()[unit_name].getName()
sns.lineplot( x=time_val, y=df[col], label = col.split('/')[0] )
#set graph title, legend, axes
ax.set_title(reaction_name, fontsize = 12)
ax.legend(fontsize = 10, \
bbox_to_anchor= (1, 0.75), \
title= name, \
title_fontsize = 10, \
shadow = True, \
facecolor = 'white');
xlabel = f"{time} [{time_unit}]"
ylabel = f"{'concentration'} [{unit}]"
ax.set_xlabel(xlabel , fontsize=10)
ax.set_ylabel(ylabel, fontsize=10)
# products
for reactant_id, stoich, _, replicates, init_conc in reac_products:
# Aus dem Edukt folgt Tuple:
# Reactant ID, stoichiometry, Constant, Replikate, Initial-Konzentrationen
if len(replicates) > 0:
df = reac.exportReplicates(reactant_id)
time_val = df.index.tolist()
time = df.index.name.split('/')[0]
time_unit = df.index.name.split('/')[1]
f, ax = plt.subplots(figsize=(7,3.5))
# Visualisation
for col in df.columns:
name = enzmldoc.getReactant(col.split('/')[1]).getName()
unit_name = enzmldoc.getReactant(col.split('/')[1]).getSubstanceUnits()
unit = enzmldoc.getUnitDict()[unit_name].getName()
sns.lineplot( x=time_val, y=df[col], label = col.split('/')[0] )
#set graph title, legend, axes
ax.set_title(reaction_name, fontsize = 12)
ax.legend(fontsize = 10, \
bbox_to_anchor= (1, 0.75), \
title= name, \
title_fontsize = 10, \
shadow = True, \
facecolor = 'white');
xlabel = f"{time} [{time_unit}]"
ylabel = f"{'concentration'} [{unit}]"
ax.set_xlabel(xlabel , fontsize=10)
ax.set_ylabel(ylabel, fontsize=10)
| MiniConvertNB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Artificial Intelligence Nanodegree
#
# ## Convolutional Neural Networks
#
# ---
#
# In this notebook, we train a CNN on augmented images from the CIFAR-10 database.
#
# ### 1. Load CIFAR-10 Database
# +
import keras
from keras.datasets import cifar10
# load the pre-shuffled train and test data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# -
# ### 2. Visualize the First 24 Training Images
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure(figsize=(20,5))
for i in range(36):
ax = fig.add_subplot(3, 12, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_train[i]))
# -
# ### 3. Rescale the Images by Dividing Every Pixel in Every Image by 255
# rescale [0,255] --> [0,1]
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
# ### 4. Break Dataset into Training, Testing, and Validation Sets
# +
from keras.utils import np_utils
# break training set into training and validation sets
(x_train, x_valid) = x_train[5000:], x_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
# one-hot encode the labels
num_classes = len(np.unique(y_train))
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
y_valid = keras.utils.to_categorical(y_valid, num_classes)
# print shape of training set
print('x_train shape:', x_train.shape)
# print number of training, validation, and test images
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(x_valid.shape[0], 'validation samples')
# -
# ### 5. Create and Configure Augmented Image Generator
# +
from keras.preprocessing.image import ImageDataGenerator
# create and configure augmented image generator
datagen_train = ImageDataGenerator(
width_shift_range=0.1, # randomly shift images horizontally (10% of total width)
height_shift_range=0.1, # randomly shift images vertically (10% of total height)
horizontal_flip=True) # randomly flip images horizontally
# fit augmented image generator on data
datagen_train.fit(x_train)
# -
# ### 6. Visualize Original and Augmented Images
# +
import matplotlib.pyplot as plt
# take subset of training data
x_train_subset = x_train[:12]
# visualize subset of training data
fig = plt.figure(figsize=(20,2))
for i in range(0, len(x_train_subset)):
ax = fig.add_subplot(1, 12, i+1)
ax.imshow(x_train_subset[i])
fig.suptitle('Subset of Original Training Images', fontsize=20)
plt.show()
# visualize augmented images
fig = plt.figure(figsize=(20,2))
for x_batch in datagen_train.flow(x_train_subset, batch_size=12):
for i in range(0, 12):
ax = fig.add_subplot(1, 12, i+1)
ax.imshow(x_batch[i])
fig.suptitle('Augmented Images', fontsize=20)
plt.show()
break;
# -
# ### 7. Define the Model Architecture
# +
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# My theory behind adding more steps here is that the additional images will allow us to ask questions
# that look like "where are the boundaries of the image" and then "how big is the image" and then "which angle"
# and all of that initial input would eventually flow into actual item features like if it has wheels. I thought
# more steps in the model would help, which proved to be true.
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',
input_shape=(32, 32, 3)))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu',
input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=128, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=256, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=512, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# I changed the optimizer, below to Adam as well. My final result was .73
model.summary()
# -
# ### 8. Compile the Model
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
# ### 9. Train the Model
# +
from keras.callbacks import ModelCheckpoint
batch_size = 32
epochs = 10
# train the model
checkpointer = ModelCheckpoint(filepath='aug_model.weights.best.hdf5', verbose=1,
save_best_only=True)
# If we are feeding images based on generated images, we need to use fit_generator
# In place of the raw training data, we use this flow data to train - which is enhanced by the augmenting work
model.fit_generator(datagen_train.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs, verbose=2, callbacks=[checkpointer],
validation_data=(x_valid, y_valid),
validation_steps=x_valid.shape[0] // batch_size)
# -
# ### 10. Load the Model with the Best Validation Accuracy
# load the weights that yielded the best validation accuracy
model.load_weights('aug_model.weights.best.hdf5')
# ### 11. Calculate Classification Accuracy on Test Set
# evaluate and print test accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
| cifar10-augmentation/cifar10_augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import skimage
import psutil
import os
import shutil
# -
pathname = "Training/Normal/"
for folder in os.listdir("Training/Normal"):
nfolder = "".join(folder.split(" "))
os.rename(pathname + folder, pathname + nfolder)
pathname = "Training/Damaged/"
for folder in os.listdir("Training/Damaged"):
nfolder = "".join(folder.split(" "))
os.rename(pathname + folder, pathname + nfolder)
# +
ap_normal = []
lat_normal = []
ap_pedicle_normal = []
ap_spinous_normal = []
ap_vertebra_normal = []
lat_ant_line_normal = []
lat_disk_height_normal = []
lat_post_line_normal = []
lat_spinous_normal = []
lat_vertebra_normal = []
pathname = "Training/Normal/"
i = 0
j = []
for folder in os.listdir("Training/Normal"):
try:
ap = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/AP.jpg", as_gray=True), (380, 194), anti_aliasing=True)
lat = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/LAT.jpg", as_gray=True), (380, 194), anti_aliasing=True)
ap_pedicle = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Pedicle.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_spinous = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Spinous_Process.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_vertebra = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Vertebra.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_ant_line = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Anterior_Vertebral_Line.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_disk_height = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Disk_Height.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_post_line = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Posterior_Vertebral_Line.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_spinous = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Spinous_Process.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_vertebra = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Vertebra.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_normal.append(ap)
lat_normal.append(lat)
ap_pedicle_normal.append(ap_pedicle)
ap_spinous_normal.append(ap_spinous)
ap_vertebra_normal.append(ap_vertebra)
lat_ant_line_normal.append(lat_ant_line)
lat_disk_height_normal.append(lat_disk_height)
lat_post_line_normal.append(lat_post_line)
lat_spinous_normal.append(lat_spinous)
lat_vertebra_normal.append(lat_vertebra)
except Exception as e:
print("Some problem in {}".format(folder))
j.append(folder)
if i % 30 == 0:
print("{} done out of {}".format(i, len(os.listdir("Training/Normal"))))
print("CPU Usage {}".format(psutil.cpu_percent()))
print("Memory Usage {}".format(psutil.virtual_memory()[2]))
i+=1
print("Done")
# +
ap_damaged = []
lat_damaged = []
ap_pedicle_damaged = []
ap_spinous_damaged = []
ap_vertebra_damaged = []
lat_ant_line_damaged = []
lat_disk_height_damaged = []
lat_post_line_damaged = []
lat_spinous_damaged = []
lat_vertebra_damaged = []
pathname = "Training/Damaged/"
i = 0
jd = []
for folder in os.listdir("Training/Damaged"):
try:
ap = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/AP.jpg", as_gray=True), (380, 194), anti_aliasing=True)
lat = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/LAT.jpg", as_gray=True), (380, 194), anti_aliasing=True)
ap_pedicle = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Pedicle.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_spinous = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Spinous_Process.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_vertebra = skimage.transform.resize(skimage.io.imread(pathname + folder + "/AP/Ap_Vertebra.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_ant_line = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Anterior_Vertebral_Line.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_disk_height = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Disk_Height.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_post_line = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Posterior_Vertebral_Line.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_spinous = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Spinous_Process.png", as_gray=True), (380, 194), anti_aliasing=True)
lat_vertebra = skimage.transform.resize(skimage.io.imread(pathname + folder + "/LAT/Lat_Vertebra.png", as_gray=True), (380, 194), anti_aliasing=True)
ap_damaged.append(ap)
lat_damaged.append(lat)
ap_pedicle_damaged.append(ap_pedicle)
ap_spinous_damaged.append(ap_spinous)
ap_vertebra_damaged.append(ap_vertebra)
lat_ant_line_damaged.append(lat_ant_line)
lat_disk_height_damaged.append(lat_disk_height)
lat_post_line_damaged.append(lat_post_line)
lat_spinous_damaged.append(lat_spinous)
lat_vertebra_damaged.append(lat_vertebra)
except Exception as e:
print("Some problem in {}".format(folder))
jd.append(folder)
shutil.rmtree(pathname + folder)
if i % 30 == 0:
print("{} done out of {}".format(i, len(os.listdir("Training/Normal"))))
print("CPU Usage {}".format(psutil.cpu_percent()))
print("Memory Usage {}".format(psutil.virtual_memory()[2]))
i+=1
print("Done")
# -
ap_normal = np.array(ap_normal)
lat_normal = np.array(lat_normal)
ap_pedicle_normal = np.array(ap_pedicle_normal)
ap_spinous_normal = np.array(ap_spinous_normal)
ap_vertebra_normal = np.array(ap_vertebra_normal)
lat_ant_line_normal = np.array(lat_ant_line_normal)
lat_disk_height_normal = np.array(lat_disk_height_normal)
lat_post_line_normal = np.array(lat_post_line_normal)
lat_spinous_normal = np.array(lat_spinous_normal)
lat_vertebra_normal = np.array(lat_vertebra_normal)
ap_pedicle_normal[ap_pedicle_normal > 0.01] = 1.
ap_spinous_normal[ap_spinous_normal > 0.01] = 1.
ap_vertebra_normal[ap_vertebra_normal > 0.01] = 1.
lat_ant_line_normal[lat_ant_line_normal > 0.01] = 1.
lat_disk_height_normal[lat_disk_height_normal > 0.01] = 1.
lat_post_line_normal[lat_post_line_normal > 0.01] = 1.
lat_spinous_normal[lat_spinous_normal > 0.01] = 1.
lat_vertebra_normal[lat_vertebra_normal > 0.01] = 1.
ap_damaged = np.array(ap_damaged)
lat_damaged = np.array(lat_damaged)
ap_pedicle_damaged = np.array(ap_pedicle_damaged)
ap_spinous_damaged = np.array(ap_spinous_damaged)
ap_vertebra_damaged = np.array(ap_vertebra_damaged)
lat_ant_line_damaged = np.array(lat_ant_line_damaged)
lat_disk_height_damaged = np.array(lat_disk_height_damaged)
lat_post_line_damaged = np.array(lat_post_line_damaged)
lat_spinous_damaged = np.array(lat_spinous_damaged)
lat_vertebra_damaged = np.array(lat_vertebra_damaged)
ap_pedicle_damaged[ap_pedicle_damaged > 0.01] = 1.
ap_spinous_damaged[ap_spinous_damaged > 0.01] = 1.
ap_vertebra_damaged[ap_vertebra_damaged > 0.01] = 1.
lat_ant_line_damaged[lat_ant_line_damaged > 0.01] = 1.
lat_disk_height_damaged[lat_disk_height_damaged > 0.01] = 1.
lat_post_line_damaged[lat_post_line_damaged > 0.01] = 1.
lat_spinous_damaged[lat_spinous_damaged > 0.01] = 1.
lat_vertebra_damaged[lat_vertebra_damaged > 0.01] = 1.
# +
ap = np.concatenate((ap_normal * 255, ap_damaged * 255), axis = 0).astype(np.uint8)
lat = np.concatenate((lat_normal * 255, lat_damaged * 255), axis = 0).astype(np.uint8)
ap_pedicle = np.concatenate((ap_pedicle_normal, ap_pedicle_damaged), axis = 0).astype(np.uint8)
ap_spinous = np.concatenate((ap_spinous_normal, ap_spinous_damaged), axis = 0).astype(np.uint8)
ap_vertebra = np.concatenate((ap_vertebra_normal, ap_vertebra_damaged), axis = 0).astype(np.uint8)
lat_ant_line = np.concatenate((lat_ant_line_normal, lat_ant_line_damaged), axis = 0).astype(np.uint8)
lat_disk_height = np.concatenate((lat_disk_height_normal, lat_disk_height_damaged), axis = 0).astype(np.uint8)
lat_post_line = np.concatenate((lat_post_line_normal, lat_post_line_damaged), axis = 0).astype(np.uint8)
lat_spinous = np.concatenate((lat_spinous_normal, lat_spinous_damaged), axis = 0).astype(np.uint8)
lat_vertebra = np.concatenate((lat_vertebra_normal, lat_vertebra_damaged), axis = 0).astype(np.uint8)
# -
print(ap.shape)
print(lat.shape)
print(ap_pedicle.shape)
print(ap_spinous.shape)
print(ap_vertebra.shape)
print(lat_ant_line.shape)
print(lat_disk_height.shape)
print(lat_post_line.shape)
print(lat_spinous.shape)
print(lat_vertebra.shape)
labels = np.zeros((len(ap), 1))
labels[len(ap_normal):] = 1
labels = labels.astype(np.uint8)
print(labels.shape)
np.savez("dataset.npz", ap = ap, lat = lat, ap_pedicle = ap_pedicle, ap_spinous = ap_spinous, ap_vertebra = ap_vertebra, lat_ant_line = lat_ant_line, lat_disk_height = lat_disk_height, lat_post_line = lat_post_line, lat_spinous = lat_spinous, lat_vertebra = lat_vertebra, labels = labels)
| assignment_1/data_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="CseKHKAsQ5bD"
# # Homework 1
# + [markdown] id="9fWNWWCcRC6D"
# ### 1. Taking inspiration from the notebook `01-intro-to-pt.ipynb`, build a class for the Multilayer Perceptron (MLP) whose scheme is drawn in the last figure of the notebook. As written there, no layer should have bias units and the activation for each hidden layer should be the Rectified Linear Unit (ReLU) function, also called ramp function. The activation leading to the output layer, instead, should be the softmax function, which prof. Ansuini explained during the last lecture. You can find some notions on it also on the notebook.
#
# + id="2e63-5SBUeNn"
import torch
import numpy as np
# + id="XSHjiZH8RyPy"
class MLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(in_features=5, out_features=11, bias = False)
self.layer2 = torch.nn.Linear(in_features=11, out_features=16, bias = False)
self.layer3 = torch.nn.Linear(in_features=16, out_features=13, bias = False)
self.layer4 = torch.nn.Linear(in_features=13, out_features=8, bias = False)
self.layer5 = torch.nn.Linear(in_features=8, out_features=4, bias = False)
def forward(self, X):
out = self.layer1(X)
out = torch.nn.functional.relu(out)
out = self.layer2(out)
out = torch.nn.functional.relu(out)
out = self.layer3(out)
out = torch.nn.functional.relu(out)
out = self.layer4(out)
out = torch.nn.functional.relu(out)
out = self.layer5(out)
out = torch.nn.functional.softmax(out)
return out
# + [markdown] id="LW9SnWnLRGQF"
#
# ### 2. After having defined the class, create an instance of it and print a summary using a method of your choice.
#
# + colab={"base_uri": "https://localhost:8080/"} id="XXPEHyNkRyy3" outputId="19189598-4e2b-479d-d94d-00730fa185ef"
import sys
# !{sys.executable} -m pip install torch-summary #how to use pip or conda in jupyter notebooks
from torchsummary import summary
# + colab={"base_uri": "https://localhost:8080/"} id="16IC7-QRVJPm" outputId="043795b7-c2a9-467a-8fa4-5620adfdf26e"
from random import seed
seed(123456)
model = MLP()
summary(model)
# + [markdown] id="GcIjHreNRGSL"
#
# ### 3. Provide detailed calculations (layer-by-layer) on the exact number of parameters in the network.
#
#
#
#
# + [markdown] id="aXxHLoiPXhOC"
# A set of parentheses for each layer, without bias units:
# $$(5 \times 11)+(11 \times 16) + (16 \times 13) + (13 \times 8) + (8 \times 4) = 55 + 176 + 208 + 104 + 32 = 575$$
# + [markdown] id="Z3u2e36rRjap"
# ### Provide the same calculation in the case that the bias units are present in all layers (except input).
# + [markdown] id="warMPeFPRzXY"
# A set of parentheses for each layer, in each the additive term represents the biases.
#
# $$(5 \times 11+11)+(11 \times 16+16) + (16 \times 13 +13) + (13 \times 8+8) + (8 \times 4 +4) = 66 + 192 + 221 + 112 + 36 = 627$$
# + [markdown] id="ERMwc5gbRl30"
# ### 4. For each layer within the MLP, calculate the L2 norm and L1 norm of its parameters.
# + colab={"base_uri": "https://localhost:8080/"} id="hwB5FkUlR0iK" outputId="5c90a924-a7c0-4cad-d7f6-850faba6af4d"
# L2: Sum of the squared weights
# L1: Sum of the absolute weights
i = 0
for param in model.parameters():
print("LAYER", i)
print("Norm L1:", torch.norm(param, 1).item())
print("Norm L2:", torch.norm(param, 2).item())
print("")
i+=1
| Homework/HW1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.getcwd()
# nuclio: ignore
import nuclio
# %nuclio config kind = "job"
# %nuclio config spec.image = "mlrun/ml-base"
# +
import os
import json
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
import pyarrow as pa
from mlrun.execution import MLClientCtx
from mlrun.datastore import DataItem
from typing import List, Optional
# -
def _chunk_readwrite(
archive_url,
dest_path,
chunksize,
header,
encoding,
dtype
):
"""stream read and write archives
pandas reads and parquet writes
notes
-----
* dest_path can be either a file.parquet, or in hte case of partitioned parquet
it will be only the destination folder of the parquet partition files
"""
pqwriter = None
header = []
for i, df in enumerate(pd.read_csv(archive_url, chunksize=chunksize,
names=header, encoding=encoding,
dtype=dtype)):
table = pa.Table.from_pandas(df)
if i == 0:
if dataset:
header = copy(table.schema)
else:
pqwriter = pq.ParquetWriter(dest_path, table.schema)
if dataset:
pq.write_to_dataset(table, root_path=dest_path, partition_cols=partition_cols)
else:
pqwriter.write_table(table)
if pqwriter:
pqwriter.close()
return header
def arc_to_parquet(
context: MLClientCtx,
archive_url: DataItem,
header: List[str] = [None],
chunksize: int = 0,
dtype=None,
encoding: str = "latin-1",
key: str = "data",
dataset: str = "None",
part_cols = [],
file_ext: str = "parquet",
index: bool= False,
refresh_data: bool = False,
stats: bool = False
) -> None:
"""Open a file/object archive and save as a parquet file or dataset
Notes
-----
* this function is typically for large files, please be sure to check all settings
* partitioning requires precise specification of column types.
* the archive_url can be any file readable by pandas read_csv, which includes tar files
* if the `dataset` parameter is not empty, then a partitioned dataset will be created
instead of a single file in the folder `dataset`
* if a key exists already then it will not be re-acquired unless the `refresh_data` param
is set to `True`. This is in case the original file is corrupt, or a refresh is
required.
:param context: the function context
:param archive_url: MLRun data input (DataItem object)
:param chunksize: (0) when > 0, row size (chunk) to retrieve
per iteration
:param dtype destination data type of specified columns
:param encoding ("latin-8") file encoding
:param key: key in artifact store (when log_data=True)
:param dataset: (None) if not None then "target_path/dataset"
is folder for partitioned files
:param part_cols: ([]) list of partitioning columns
:param file_ext: (parquet) csv/parquet file extension
:param index: (False) pandas save index option
:param refresh_data: (False) overwrite existing data at that location
:param stats: (None) calculate table stats when logging artifact
"""
base_path = context.artifact_path
os.makedirs(base_path, exist_ok=True)
archive_url = archive_url.local()
if dataset is not None:
dest_path = os.path.join(base_path, dataset)
exists = os.path.isdir(dest_path)
else:
dest_path = os.path.join(base_path, key+f".{file_ext}")
exists = os.path.isfile(dest_path)
if not exists:
context.logger.info("destination file does not exist, downloading")
if chunksize > 0:
header = _chunk_readwrite(archive_url, dest_path, chunksize,
encoding, dtype)
context.log_dataset(key=key, stats=stats, format='parquet',
target_path=dest_path)
else:
df = pd.read_csv(archive_url)
context.log_dataset(key, df=df, format=file_ext, index=index)
else:
context.logger.info("destination file already exists, nothing done")
# +
# nuclio: end-code
# -
# ### mlconfig
# +
from mlrun import mlconf
import os
mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'
mlconf.artifact_path = mlconf.artifact_path or f'{os.environ["HOME"]}/artifacts'
# -
# ### save
# +
from mlrun import code_to_function
# create job function object from notebook code
fn = code_to_function('arc_to_parquet')
# add metadata (for templates and reuse)
fn.spec.default_handler = 'arc_to_parquet'
fn.spec.description = "retrieve remote archive, open and save as parquet"
fn.metadata.categories = ['data-movement', 'utils']
fn.metadata.labels = {"author": "yjb"}
fn.export('function.yaml')
# -
# ## tests
if "V3IO_HOME" in os.environ:
from mlrun import mount_v3io
fn.apply(mount_v3io())
else:
# is you set up mlrun using the instructions at https://github.com/mlrun/mlrun/blob/master/hack/local/README.md
from mlrun.platforms import mount_pvc
fn.apply(mount_pvc('nfsvol', 'nfsvol', '/home/joyan/data'))
# +
from mlrun import NewTask
task_params = {"name": "tasks archive to parquet",
"params":{"key": "higgs-sample"}}
# -
DATA_URL = "https://raw.githubusercontent.com/yjb-ds/testdata/master/data/higgs-sample.csv.gz"
# original large file "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
# +
from mlrun import run_local
run = run_local(NewTask(**task_params),
handler=arc_to_parquet,
inputs={"archive_url" : DATA_URL})
# -
# #### the following will run quickly if your artifact path hasn't changed, the large file will be detected and not downloaded a second time:
from mlrun import NewTask
run = fn.run(NewTask(**task_params),
inputs={"archive_url" : DATA_URL},
artifact_path=mlconf.artifact_path)
| arc_to_parquet/arc_to_parquet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DonnaVakalis/forecasting-MRT-gap/blob/main/model/model_BiLSTM_MSLE.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="PotS_WG4C0bC"
# # PREAMBLE (ABOUT THIS NOTEBOOK)
# + [markdown] id="_gBDZLPtC7VV"
# This notebook applies data prediction with Keras LSTM model in Python, on time series (hourly) weather data to predict daily thermal comfort exceedances indoors (e.g., when it gets too hot, and by how much). While this notebook contains only the first steps, ultimately it can help determine how much cooling is needed (in terms of mean radiant temperature or MRT), and ultimately help decide on which cooling technologies are feasible and most carbon efficient.
# + [markdown] id="7TblImSlCdcM"
# # SETUP AND IMPORTS
# + colab={"base_uri": "https://localhost:8080/"} id="h4uWABiYwGMt" outputId="ba57bef9-b2d1-4222-9562-8a53dfa9945d"
# first install requests, for importing libraries from github
# !pip install requests
# + [markdown] id="0fHyFFZ2svZc"
# Import pre-packaged libraries
# + id="GqrUpMCWCcCi"
import requests
import os
import sys
from google.colab import drive
import pickle
import glob
import numpy as np
import pandas as pd
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf\
import torch # for custom loss function
from keras.models import Sequential
from keras.layers import LSTM, Dense, Bidirectional
from numpy import array
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# + [markdown] id="R8N14VdVtYgr"
# Mount drive and set paths
# + colab={"base_uri": "https://localhost:8080/"} id="ea4B0rT7tcvn" outputId="01745b48-9241-4180-a32a-c5b33d987470"
# Point to the folder for this project (forecasting-MRT-gap/data)
drive.mount('/content/gdrive')
DATA_PATH = "/content/gdrive/My Drive/Colab Notebooks/forecasting-MRT-gap/data"
FIGURES_PATH = "/content/gdrive/My Drive/Colab Notebooks/forecasting-MRT-gap/figures"
#LOSS_PATH = "/content/gdrive/My Drive/Colab Notebooks/forecasting-MRT-gap/loss"
# + [markdown] id="AAFXnOugCxQy"
# # LOAD + FILTER THE DATA
#
# + [markdown] id="VoRTvWQ-JxmY"
# The training and testing datasets were pre-processed in a separate notebook saved as separate pickles for 2015,2016 and 2017 (and separated by training and test and output variables based on a prior project) in this public repository:
#
# https://github.com/eta-lab/forecasting-comfort
#
# Details about methods from this project can be found here: <NAME>., <NAME>., <NAME>, (2021). Forecasting the impact of climate change on thermal comfort using a weighted ensemble of supervised learning models. Building & Environment.
# + [markdown] id="CJS4khlBBodr"
# ## GET OUTDOOR AIR TEMPERATURES FOR SUMMER 2015, 2016 and 2017
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="doivvZSBJv8T" outputId="71e77771-b54d-4cd8-a348-ce5585b958e0"
# 2015
dat_train = pickle.load(open(DATA_PATH +'/trainX_2015.pkl','rb'))
dat_test = pickle.load(open(DATA_PATH +'/testX_2015.pkl','rb'))
# check if train and test set are the same, and visualize temp values
df_diff = pd.concat([dat_train,dat_test]).drop_duplicates(keep=False)
print(df_diff.shape, dat_train.shape, dat_test.shape)
df_train = dat_train[['temp']]
df_test = dat_test[['temp']]
plt.plot(df_train.index, df_train.temp, df_test .index, df_test .temp)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="4AktS9jKsSzD" outputId="8ee2d401-f323-436c-c7c9-3fc4c5328750"
# 2016
dat_train = pickle.load(open(DATA_PATH +'/trainX_2016.pkl','rb'))
dat_test = pickle.load(open(DATA_PATH +'/testX_2016.pkl','rb'))
# check if 2015 train and test set are the same
df_diff = pd.concat([dat_train,dat_test]).drop_duplicates(keep=False)
print(df_diff.shape, dat_train.shape, dat_test.shape)
df_train = dat_train[['temp']]
df_test = dat_test[['temp']]
plt.plot(df_train.index, df_train.temp, df_test .index, df_test .temp)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="6oSRZIdgpvkw" outputId="6267a552-3e23-4f32-a4ef-2681f56eebc7"
# 2017
dat_train = pickle.load(open(DATA_PATH +'/trainX_2017.pkl','rb'))
dat_test = pickle.load(open(DATA_PATH +'/testX_2017.pkl','rb'))
# check if train and test set are the same, and visualize temp values
df_diff = pd.concat([dat_train,dat_test]).drop_duplicates(keep=False)
print(df_diff.shape, dat_train.shape, dat_test.shape)
df_train = dat_train[['temp']]
df_test = dat_test[['temp']]
plt.plot(df_train.index, df_train.temp, df_test .index, df_test .temp)
plt.show()
# + [markdown] id="9Gi4O9iC5fmn"
# ## PRE-PROCESS THE X (INPUT VARIABLE) DATA
# + [markdown] id="Xto7qFE1Xy9X"
# For each year 2015,2016,2017, examine temperature data (outdoor) and prepare to remerge training/test sets (for dynamically selecting training/testing sets from whole)
# + [markdown] id="Jg-0q2wswNkq"
# ### Restrict to time frames we want
#
# We want temperature data from all three summers, minus the smoky periods (smoke-affected days = when recorded incidences of regional forest fires)
#
# These are respectively the first two weeks of July 2015 and the first two weeks of August 2017
# + id="Ijkgnpjn61zq"
# 2015
# get training set from another year (so complete for 2015)
df_1 = pickle.load(open(DATA_PATH +'/trainX_2016.pkl','rb'))
# keep only summer dates, e.g., June, July and August:
remove_before = pd.to_datetime('2015-06-01')
remove_after = pd.to_datetime('2015-08-31')
df_2 = df_1.query('index >= @remove_before and index <= @remove_after')
# drop smoke weeks (first two weeks July 2015)
start_smoke = pd.to_datetime('2015-07-01')
end_smoke = pd.to_datetime('2015-07-14')
df_3 = df_2.query('index < @start_smoke or index > @end_smoke')
# save 2015 outdoor temps (x variable)
df_2015_X = df_3[['temp']]
# + id="z5v_-qcrYCqn"
# 2016
# get training set from another year (so complete for 2016)
df_1 = pickle.load(open(DATA_PATH +'/trainX_2017.pkl','rb'))
# keep only summer dates, e.g., June, July and August:
remove_before = pd.to_datetime('2016-06-01')
remove_after = pd.to_datetime('2016-08-31')
df_2 = df_1.query('index >= @remove_before and index <= @remove_after')
# save 2016 outdoor temps (x variable)
df_2016_X = df_2[['temp']]
# + id="bAfyy04v_R7X"
# 2017
# get training set from another year (so complete for 2017)
df_1 = pickle.load(open(DATA_PATH +'/trainX_2015.pkl','rb'))
# keep only summer dates, e.g., June, July and August:
remove_before = pd.to_datetime('2017-06-01')
remove_after = pd.to_datetime('2017-08-31')
df_2 = df_1.query('index >= @remove_before and index <= @remove_after')
# drop smoke weeks (first two weeks July 2015)
start_smoke = pd.to_datetime('2017-08-01')
end_smoke = pd.to_datetime('2017-07-14')
df_3 = df_2.query('index < @start_smoke or index > @end_smoke')
# save 2017 outdoor temps (x variable)
df_2017_X = df_3[['temp']]
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="m0E6kQnz_pN7" outputId="90464eea-0caf-43b8-d3d4-b8a7b74e957b"
# plot all outdoor temperatures in cleaned dataset
plt.plot(df_2015_X.index, df_2015_X.temp, df_2016_X.index, df_2016_X.temp, df_2017_X.index, df_2017_X.temp)
plt.show()
# save a single dataset with all days
ls_X = [df_2015_X, df_2016_X, df_2017_X]
df_all_X = pd.concat(ls_X)
# + [markdown] id="Oaovum2mB39U"
# ## GET INDOOR AIR TEMPERATURES (OUTPUT VARIABLE) FOR SAME TIME SPANS
# + id="0wMf2PSmB9by"
#Get raw data of indoor air temps (what we want to predict)
df1 = pickle.load(open(DATA_PATH +'/trainY_2015.pkl','rb'))
df2 = pickle.load(open(DATA_PATH +'/trainY_2016.pkl','rb'))
df3 = pickle.load(open(DATA_PATH +'/trainY_2017.pkl','rb'))
# Put in all in one place
ls_Y = [df1, df2, df3]
df_Y = pd.concat(ls_Y)
# Drop duplicated date-times
df_Y = df_Y[~df_Y.index.duplicated(keep='first')]
# Drop date-times not in X set
df_all_Y = df_Y[df_Y.index.isin(df_all_X.index)]
# + [markdown] id="0ICisOYGKIzA"
# ## Plot X and Y variables
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="M53g3iJhJczi" outputId="5d1aa1fb-4fdd-4340-8b6f-fe83965137fb"
# plot Outdoor (X values) and Indoor (to be predicted)
plt.plot(df_all_X.index, df_all_X.temp, df_all_Y.index, df_all_Y)
plt.show() # Note LSTM assumes the book ends of a gap are adjacent, so it's good to see that there are no sharp changes over the gaps
# + [markdown] id="nSl76bU4f0l1"
# ## Add feature for time of day and transform variables
# + [markdown] id="GcgO__MkZWFO"
# This step adds a variable to help the model understand the cyclicity of the 24 hours cycle
# It borrows from the tutorial provided here: https://towardsdatascience.com/single-and-multi-step-temperature-time-series-forecasting-for-vilnius-using-lstm-deep-learning-b9719a0009de
#
# We want the machine to know that hours 23 and 0 are closer to each other than hours 0 and 4. We know that the period of the cycle is 24 hours. We can use the cos(x) and sin(x) functions. The x in the functions is the hour of the day.
# + id="seFBtHHEY8c3"
# Extract the hour of day
df_all_X["hour"] = df_all_X.index.hour
# Creating the cyclical daily feature
df_all_X["day_cos"] = [np.cos(x * (2 * np.pi / 24)) for x in df_all_X["hour"]]
df_all_X["day_sin"] = [np.sin(x * (2 * np.pi / 24)) for x in df_all_X["hour"]]
# + [markdown] id="vQia6BLyVI5n"
# # BiLSTM MODEL with Logarithmic Mean Squared Error
#
# The idea of Bidirectional LSTMs (BiSTM) is to use input information from both the past and future of a specific time step.
#
# Some of the steps for making and fitting this model follow the tutorial found here:
# https://towardsdatascience.com/predictive-analysis-rnn-lstm-and-gru-to-predict-water-consumption-
#
# + [markdown] id="DX64sYj2gCiH"
# ### Pre-processing to setup model: Split training/test sets, scale the data, shape the 3D matrix for feeding into the model
#
# Note: for this type of time-step depending analysis, make sure training/test split shuffle is set to False (i.e., keep continguous chunks for training and testing)
# + colab={"base_uri": "https://localhost:8080/"} id="eRcvJAhyVNKH" outputId="888e38f7-6cfa-473c-b0ba-a6c17048c748"
x = df_all_X.copy().to_numpy()
y = df_all_Y.copy().to_numpy()
# need to reshape Y which contains 1 features (currently it's 0)
y = y.reshape(-1, 1)
# split the data into the train and test parts.
# NB split should not be a random selection... (shuffle=False)
X_train, X_test, Y_train, Y_test = train_test_split(x, y, shuffle = False, test_size= 0.15)
train_dim = (X_train.shape, Y_train.shape)
test_dim = (X_test.shape, Y_test.shape)
# check
print("train_dim Xs and Y:", train_dim)
print("test_dim Xs and Y", test_dim)
#Scale and center the data:
#1) variables are better to be small values, probably in the range of 0-1.
#2) want the scale of the output variable is in the range 0-1 to match the scale of the activation function (tanh) on the output layer
# Fit the scaler with training data (used MinMaxScaler from sklearn)
scaler_x = MinMaxScaler(feature_range = (0,1))
scaler_y = MinMaxScaler(feature_range = (0,1))
# Fit the scaler using available training data
input_scaler = scaler_x.fit(X_train)
output_scaler = scaler_y.fit(Y_train)
# Apply the scaler to training data
train_y_norm = output_scaler.transform(Y_train)
train_x_norm = input_scaler.transform(X_train)
# Apply the scaler to test data
#NB: we should scale the unseen tes data with the scaler fitted on the training data.
test_y_norm = output_scaler.transform(Y_test)
test_x_norm = input_scaler.transform(X_test)
# + [markdown] id="oqZqxqfdoScR"
# Create a 3D Input Dataset
#
# This involves setting the 'time steps', which in this case will be the previous 48 hours
# + colab={"base_uri": "https://localhost:8080/"} id="c9eL9ZbkoUeu" outputId="c966baa4-abf3-4989-c08b-c76e99af9e70"
#Default is one day back and one day forward
def create_dataset (X, y, time_steps = 24):
Xs, ys = [], []
for i in range(len(X)-time_steps):
v = X[i:i+time_steps, :]
Xs.append(v)
ys.append(y[i+time_steps])
return np.array(Xs), np.array(ys)
TIME_STEPS = 48 # previous and next two days' weather
X_test, Y_test = create_dataset(test_x_norm, test_y_norm, TIME_STEPS)
X_train, Y_train = create_dataset(train_x_norm, train_y_norm, TIME_STEPS)
print('X_train.shape: ', X_train.shape)
print('Y_train.shape: ', Y_train.shape)
print('X_test.shape: ', X_test.shape)
print('Y_test.shape: ', Y_test.shape)
# + [markdown] id="1PTx0Gw-ryR6"
# ## Create the BiLSTM model with special loss functiom
#
# The 'Bidirectional' wrapper will create two copies of the hidden layer, one fit in the input sequences as-is and one on a reversed copy of the input sequence. By default, the output values from these LSTMs will be concatenated.
#
# That means that instead of the TimeDistributed layer receiving 48 timesteps of X HIDDEN_LAYER units, it will now receive 48 timesteps of 2*X HIDDEN_LAYER units (X units + X units) outputs.
#
#
# For the loss function, given the sharpness of our data, we want to relax the punishing effect of large differences in large predicted values. Therefore, instead of the default MSE, we will use Mean Squared Logarithmic Error loss, or MSLE: this calculates the natural logarithm of each of the predicted values, then calculate the mean squared error.
#
# + id="35TxUo7en1Bn"
# Create the BiLSTM model
def create_model_bilstm(units):
model = Sequential()
# First layer of BiLSTM
model.add(Bidirectional(LSTM(units = units, return_sequences=True), input_shape=(X_train.shape[1], X_train.shape[2])))
# Second layer of BiLSTM
model.add(Bidirectional(LSTM(units = units)))
model.add(Dense(1))
#Compile model
model.compile(loss='mean_squared_logarithmic_error', optimizer= 'adam', metrics=['mse']) #change of loss
return model
# Choose the number of units (neurons) in hidden layers
HIDDEN_LAYERS = 64
model_bilstm = create_model_bilstm(HIDDEN_LAYERS)
# + [markdown] id="jIGCfV8otYOT"
# ## Fit the model
#
# + colab={"base_uri": "https://localhost:8080/", "height": 743} id="LAdxTA_3dAxZ" outputId="d1340f2a-097f-4130-a39f-3527c38a448a"
# 10 epoch and batch_size = 64
def fit_model(model):
early_stop = keras.callbacks.EarlyStopping(monitor = 'val_loss',
patience = 10)
# shuffle = False because the order of the data matters
# use 20% of train data as validation data
history = model.fit(X_train, Y_train, epochs = 10, validation_split = 0.2,
batch_size = 64, shuffle = False, callbacks = [early_stop])
return history
history= fit_model(model_bilstm)
# Visualize the training loss versus validation loss
plt.figure(figsize = (10, 6))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('Loss')
plt.xlabel('epoch')
plt.legend(['Train loss', 'Validation loss'], loc='upper right')
plt.savefig(FIGURES_PATH + '/BiLSTM_loss_plot with training and test sets continguous' +'.jpg', format='jpg', dpi=1000)
plt.show()
# + [markdown] id="nYuVPF94yUHC"
# Last step! # Very important to transform the target variable back to original data space :)
#
# + [markdown] id="vhUeRu4Uy4QA"
# ## Check Predictions of BiLSTM Model
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="hnZyEh7Oy8lt" outputId="0bf46427-48e6-4787-ba4a-4a54c82f8cab"
prediction = model_bilstm.predict(X_test)
prediction = scaler_y.inverse_transform(prediction)
true_future = scaler_y.inverse_transform(np.array(Y_test))
plt.figure(figsize=(10, 6))
range_future = len(prediction)
plt.plot(np.arange(range_future), true_future, label='True Future')
plt.plot(np.arange(range_future), np.array(prediction),label='Prediction')
plt.title('True future vs prediction for BiLSTM model')
plt.legend(loc='upper left')
plt.xlabel('Date')
plt.ylabel('Hourly Indoor Air Temperature (degrees C)')
plt.savefig(FIGURES_PATH + '/Model Check on Test Set Keeping training test batches continguous' +'.jpg', format='jpg', dpi=1000)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="uImyY81S2Jfe" outputId="cb447f41-47db-4a1d-85a6-10409498faae"
# Calculate MAE and RMSE
errors = prediction - Y_test
mse = np.square(errors).mean()
rmse = np.sqrt(mse)
mae = np.abs(errors).mean()
print('Mean Absolute Error: {:.4f}'.format(mae))
print('Root Mean Square Error: {:.4f}'.format(rmse))
# + [markdown] id="qwrGhlEWvuYl"
# Conclusion: this approach as-is performs badly!
# + [markdown] id="1ZR4boOnYpPl"
# # IDEAS for IMPROVEMENT:
#
# BiLSTM model for sharpness of data: DILATE loss function
#
# In the literature this type of input data might be called "sharp", and the default scaling and loss functions lead to "too smooth" predictions...
#
# There are some special loss functions such as DIstortion Loss including shApe and TimE (DILATE [link text](https://papers.nips.cc/paper/2019/file/466accbac9a66b805ba50e42ad715740-Paper.pdf)) or soft-dynamic time warping (soft-DTW) [link text](http://proceedings.mlr.press/v70/cuturi17a/cuturi17a.pdf) which are specially designed for time series like this.
# + id="c3N8m6L2Ymes"
# + [markdown] id="6DPPLKmYgOba"
# # ARCHIVED
# + id="215cBSe61Pk3"
# soft_dtw
url = "https://raw.githubusercontent.com/vincent-leguen/DILATE/master/loss/soft_dtw.py"
r = requests.get(url)
with open('soft_dtw.py', 'w') as f:
f.write(r.text)
import soft_dtw
# dilate_loss
url = "https://raw.githubusercontent.com/vincent-leguen/DILATE/master/loss/dilate_loss.py"
r = requests.get(url)
with open('dilate_loss.py', 'w') as f:
f.write(r.text)
# + id="aG2QG1IbeCSj"
# # !cp drive/MyDrive/mylib.py
# # !cp "$inputFile" /destination/
# import mylib
InputFile = LOSS_PATH + '/dilate_loss' + '.py'
# !cp "InputFile"
InputFile2 = LOSS_PATH + '/soft_dtw' + '.py'
# !cp "InputFile2"
import dilate_loss.py
import soft_dtw.py
# + id="6VU9vPId56Sb"
# !git clone https://github.com/vincent-leguen/DILATE.git
# #%load dilate_loss.py
# %load path_soft_dtw.py
# #%load soft_dtw.py
# + id="kWcj15cmx5yc"
# Saved the files to drive (from repo here: https://github.com/vincent-leguen/DILATE.git)
# Because the files internally refer back to a path structure that is not preserved when uploading to google drive
#py_file_location = "/content/drive/My Drive"
#sys.path.append(os.path.abspath(py_file_location))
#sys.path.append(os.path.abspath(LOSS_PATH))
import LOSS_PATH + dilate_loss
| model/model_BiLSTM_MSLE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All the IPython Notebooks in **Clustering Algorithms** lecture series by **[Dr. <NAME>](https://www.linkedin.com/in/milaanparmar/)** are available @ **[GitHub](https://github.com/milaan9/Clustering_Algorithms)**
# </i></small></small>
# # Discrete Cosine Transform
# This is a little jupyter notebook that does a discrete cosine transform (DCT). DCT is a thing like the Fourier transform that's used in JPGs.
# + jupyter={"outputs_hidden": false}
# !pip install numpy
# !pip install pandas
# !pip install matplotlib
# !pip install scipy
# Imports and the like
# %pylab inline
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# Graph settings. No more jet plz
mpl.rcParams['image.cmap'] = 'gray'
# + jupyter={"outputs_hidden": false}
# Load up the image, and normalise it
img_path = 'data/a.png'
# Open image. We also convert to grayscale to make it simpler to deal with
img = np.asarray(Image.open(img_path).convert('L'))
# Normalise to 0-1
img = img / 255
plt.imshow(img)
img
# + jupyter={"outputs_hidden": false}
# And a little function to save images
# Shout out to
# stackoverflow.com/questions/31544130/saving-an-imshow-like-image-while-preserving-resolution
def save_image(data, name, vmin=0, vmax=1):
cmap = plt.cm.gray
norm = plt.Normalize(vmin=vmin, vmax=vmax)
img = cmap(norm(data))
plt.imsave(name, img)
print('saved to {}'.format(name))
# quick test
arr = np.linspace(-1, 1, 8 * 8).reshape((8, 8))
plt.imshow(arr)
save_image(arr, name="output/test.png", vmin=-1, vmax=1)
# -
# # Displaying the frequency things
# Let's start with seeing if we can render each of the 'frequencies' that the DCT gives us.
# + jupyter={"outputs_hidden": false}
freq = np.zeros((8, 8))
freq[0,0] = 1
freq
# + jupyter={"outputs_hidden": false}
from scipy.fftpack import idct
freq_img = idct(idct(freq, axis=0), axis=1)
plt.imshow(freq_img)
# + jupyter={"outputs_hidden": false}
# Ok, lets try for a more interesting frequency
freq = np.zeros((8, 8))
freq[2,3] = 1
freq_img = idct(idct(freq, axis=0), axis=1)
plt.imshow(freq_img)
# -
# Alright sweet, that worked. (Side point: that plot looks wiggly haha)
#
# Now lets try... ALL OF THEM
# + jupyter={"outputs_hidden": false}
import itertools
fig, axes = plt.subplots(8, 8)
for (x, y) in itertools.product(range(8), range(8)):
freq = np.zeros((8, 8))
freq[x, y] = 1
freq_img = idct(idct(freq, axis=0, norm='ortho'), axis=1, norm='ortho')
ax = axes[x, y]
# Experimentally all things tend to be within this range
ax.imshow(freq_img, vmin=-0.25, vmax=0.25)
ax.axis('off')
save_image(freq_img, name='output/components-{}-{}.png'.format(x, y), vmin=-0.25, vmax=0.25)
# -
# Neato! That worked! :)
# ## Generating the things from an image
# + jupyter={"outputs_hidden": false}
from scipy.fftpack import dct
freqs = dct(dct(img, axis=0, norm='ortho'), axis=1, norm='ortho')
plt.imshow(freqs)
freqs
# -
# seems correct...
# + jupyter={"outputs_hidden": false}
# Convert forward and backwards
freqs = dct(dct(img, axis=0, norm='ortho'), axis=1, norm='ortho')
reconstructed_img = idct(idct(freqs, axis=0, norm='ortho'), axis=1, norm='ortho')
plt.imshow(reconstructed_img)
# + jupyter={"outputs_hidden": false}
# Lets draw the compnents that come out
# first need this function
def lerp(a, b, amt):
'''Linear interpolate between a and b'''
return a + (b - a) * amt
freqs = dct(dct(img, axis=0, norm='ortho'), axis=1, norm='ortho')
fig, axes = plt.subplots(8, 8)
for (x, y) in itertools.product(range(8), range(8)):
freq_select = np.zeros((8, 8))
freq_select[x, y] = 1
new_freqs = np.multiply(freqs, freq_select)
freq_img = idct(idct(new_freqs, axis=0, norm='ortho'), axis=1, norm='ortho')
# If we draw everything with in the same scale it's hard to see the smaller patterns
# because they're so small and come out as mostly grey.
# But if we draw them all with their own scale we can't see the relative size.
# So we go somewhere between.
scale_adjust_amt = 0.1
vmin = lerp(freq_img.min(), -1, scale_adjust_amt)
vmax = lerp(freq_img.max(), 1, scale_adjust_amt)
ax = axes[x, y]
ax.imshow(freq_img, vmin=vmin, vmax=vmax)
ax.axis('off')
save_image(freq_img, name='output/img-components-{}-{}.png'.format(x, y), vmin=vmin, vmax=vmax)
# + jupyter={"outputs_hidden": false}
# Now lets draw the image being built up
freqs = dct(dct(img, axis=0, norm='ortho'), axis=1, norm='ortho')
fig, axes = plt.subplots(8, 8)
for (i, (x, y)) in enumerate(itertools.product(range(8), range(8))):
freq_select = np.zeros(8 * 8)
freq_select[:i+1] = 1
freq_select = freq_select.reshape((8, 8))
new_freqs = np.multiply(freqs, freq_select)
freq_img = idct(idct(new_freqs, axis=0, norm='ortho'), axis=1, norm='ortho')
ax = axes[x, y]
ax.imshow(freq_img, vmin=0, vmax=1)
ax.axis('off')
# + jupyter={"outputs_hidden": false}
# Fill in a similar way to how jpegs do it?
def loop_like_a_jpeg(size):
"""Generates a bunch of tuples that loop over a matrix in a zig-zag way
"""
flip_dir = False
for i in range(size):
r = range(i + 1)
if flip_dir:
r = reversed(r)
for j in r:
# Start at the right edge (0, i), and go upwards to the right
yield (0 + j, i - j)
flip_dir = not flip_dir
for i in range(1, size):
r = range(size - i)
if flip_dir:
r = reversed(r)
for j in r:
# Start the the bottom edge (i, size-1) and go upwards to the right
yield(i + j, (size - 1) - j)
flip_dir = not flip_dir
arr = np.zeros((4, 4))
for (i, (x, y)) in enumerate(loop_like_a_jpeg(4)):
# print(x, y)
arr[y, x] = i
arr
# + jupyter={"outputs_hidden": false}
freqs = dct(dct(img, axis=0, norm='ortho'), axis=1, norm='ortho')
fig, axes = plt.subplots(8, 8)
freq_select = np.zeros((8, 8))
for (y, x) in loop_like_a_jpeg(8):
freq_select[x, y] = 1;
new_freqs = np.multiply(freqs, freq_select)
freq_img = np.copy(idct(idct(new_freqs, axis=0, norm='ortho'), axis=1, norm='ortho'))
ax = axes[x, y]
ax.imshow(freq_img, vmin=0, vmax=1)
ax.axis('off')
save_image(freq_img, name='output/img-buildup-{}-{}.png'.format(x, y), vmin=0, vmax=1)
# -
| 02_Python/Discrete_Cosine_Transform_(DCT)/Discrete_Cosine_Transform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# This script is for the statistical data analysis for the article "Classification and biomarker identification of prostate tissue from TRAMP mice with hyperpolarized 13C-SIRA" by <NAME> et al. Talanta. 2021 Aug 20:122812.
#
# All code is written by <NAME> (<EMAIL>).
#
# Versions used:
#
# Python: 3.6.10 Scipy: 1.5.2 sklearn: 0.23.2
#
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import winsound
from collections import defaultdict #Used in RF for ordered dictionary
from scipy import stats
from sklearn import preprocessing, svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.decomposition import PCA
from sklearn.model_selection import LeaveOneOut, cross_val_predict, cross_val_score, GridSearchCV, StratifiedShuffleSplit
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import RandomForestClassifier as RF
#set figuresize, some functions changes this
plt.rcParams['figure.figsize'] = [20, 10]
#set up colors for plots to use
color_set = np.array(["#990000","steelblue", 'indigo', 'lime', 'chocolate', 'navy'])
# +
def makepca(d, cat, scale = "s"):
"""
Makes Principal Components Analysis (PCA) and plots score plot of 1. and 2. components.
Scales data first.
Prints out list of importance (in %) of all components.
Input:
d(n x m pd DataFrame): X-varaible; datamatrix with features as columns and datapoints as rows.
cat(n-lenght list-like): Y-variable; labels to color after
scale(string, "s", "p" or "n") :scaling method. "s" = standard(default). "p" = Pareto. "n" = No scaling.
"""
#scale data set "sf" parameter that scales spectrum in loading plot
if scale == "s":
#autoscale
data = (d - d.mean())/d.std()
elif scale == "p":
#paretoscale
data = (d - d.mean())/d.std()**0.5
elif scale == "n":
#no scale
data = d
else:
#End function if no method chosen
raise Exception("No correct scale method specified: 'scale' has to be 's','p' or 'n'")
#check if number of components have been chosen
#make PCA
pca = PCA()
pca.fit(data)
data_pc = pca.transform(data)
plt.rcParams['figure.figsize'] = [10, 10]
#get classes as numbers
le = preprocessing.LabelEncoder()
le.fit(cat)
label_set = le.classes_
labels_data =le.transform(cat)
#Prepare colors corresponding to labels for plotting
colors = {}
for i in range(len(label_set)):
colors[label_set[i]] = color_set[i] #color_set has to be set outside of function
types = len(label_set)
#Plot PCA scores
for label in range(len(label_set)):
c = colors[label_set[label]]
x = data_pc[:,0][labels_data == label]
y=data_pc[:,1][labels_data == label]
plt.scatter(x,y, color = c, label = label_set[label], s= 70)
bob = "PC1 %.2f%%" %(pca.explained_variance_ratio_[0]*100)
plt.xlabel(bob, fontsize = 25)
bob = "PC2 %.2f%%" %(pca.explained_variance_ratio_[1]*100)
plt.ylabel(bob, fontsize = 25)
plt.title('PCA Scoreplot', fontsize = 25)
plt.legend( prop={'size': 25}, loc = 2)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.show()
with np.printoptions(precision=3, suppress=True):
print(np.round(pca.explained_variance_ratio_*100, 2))
# -
def makerf(data, cat, trees = 600, loops = 10000, params = None):
"""
Function for making importance ranking of features with random forest classification.
Importance measured through shuffling values in each feature and comparing classifictaion success
between normal and shuffled data.
Input:
data(dataframe, m x n): x-variable, data with features as columns and datapoints as rows.
cat(list-like, n): y-variable to classify after.
trees(optional) = n_estismaters for forest, default 600
loops(optional) = Number of repetitions for
params(dict)(optional): additional parameters for random forest if non-default to be used.
Output:
imp(dataframe): Importance matrix with columns "Mean" and "std err."
"""
#make sure we can handle Y in both array and Dataframe
if type(cat) == pd.Series:
cat = np.ravel(cat.values)
#define Random Forest classifier and fit to all data
if params is None:
rf = RF(n_estimators= trees, oob_score= True)
else:
rf = RF(n_estimators= trees, oob_score= True, **params)
rf.fit(data,cat)
print("Out-of-bag score (%):", rf.oob_score_*100)
scores = defaultdict(list) #empty library for tracking scores
#define train-test splits, stratified to ensure all cell lines are in test
splits = StratifiedShuffleSplit(loops, train_size = 0.7)
#run test
for train_idx, test_idx in splits.split(data, cat):
#sort training and testing
X_train = data.values[train_idx]
X_test = data.values[test_idx]
Y_train = cat[train_idx]
Y_test = cat[test_idx]
#fit RF to training data
r = rf.fit(X_train, Y_train)
#get true accuracy
acc = sum(rf.predict(X_test) == Y_test)/len(Y_test)
#for each feature get difference in accuracy when test classes are shuffled
if acc > 0: #avoid divide by zero error, sometimes occurying with small dataset/randomized data
for i in range(len(data.columns)):
X_t = X_test.copy()
np.random.shuffle(X_t[:, i])
shuff_acc = sum(rf.predict(X_t) == Y_test)/len(Y_test)
scores[data.columns[i]].append((acc-shuff_acc)/acc)
imp = pd.DataFrame(columns= ['Mean', 'std err.', 'color'])
#color code positive-negative
for feat in scores:
m = np.mean(scores[feat])
c = 'r'
if m > 0:
c = 'g'
imp.loc[feat] = [m, stats.sem(scores[feat]), c]
#stats.sem = standard error on the mean
imp=imp.sort_values('Mean', ascending = False)
#plot important features, maximum 30
ml = min(len(imp), 30)
imp.iloc[:ml].plot.bar(y = 'Mean', yerr = 'std err.', color = imp.color, legend = False)
plt.ylabel('Relative importance ')
plt.show()
return(imp)
def imp_clean(imp):
"""
Takes RF importance results and returns only significant features.
Significance cutoff set to be 5% of score of highest importance feature.
Input:
imp(dataframe): Importance matrix with columns "Mean" and "std err."
Output:
imp_sig(array): Index values of features found to have significant positive importance.
"""
cutoff = (imp.Mean.iloc[0] - imp['std err.'].iloc[0]) * 0.05
imp_sig = imp[(imp.Mean - imp['std err.']) > cutoff]
return(imp_sig.index.values)
def plot_rfs(imp_chart, ml):
"""
Makes pretty plot of only the significantly important features.
Input:
imp_chart(dataframe): Importance matrix with columns "Mean" and "std err."
ml(array): List of features in imp_chart to be plotted
"""
imps = imp_chart.loc[ml]
plt.rcParams['figure.figsize'] = [10, 10]
imps.plot.bar(y = 'Mean', yerr = 'std err.', color = 'olivedrab', legend = False)
plt.title('RF feature importance', fontsize = 25)
plt.ylabel('Relative importance', fontsize = 25)
plt.yticks(fontsize = 20)
plt.xlabel('Chemical shift (ppm)', fontsize = 25)
plt.xticks(fontsize = 20)
plt.show()
def loadings_pca(d, annotate = False, scale = "s"):
"""
Makes Principal Components Analysis (PCA) and plots loading plot of 1. and 2. components,
for all features in the dataset.
Scales data first.
Input:
d(n x m pd DataFrame): X-varaible; datamatrix with features as columns and datapoints as rows.
annotate(default = False): Boolean, wheter to print feature names in plot.
scale(string, "s", "p" or "n") :scaling method. "s" = standard(default). "p" = Pareto. "n" = No scaling.
"""
#scale data set "sf" parameter that scales spectrum in loading plot
if scale == "s":
#autoscale
data = (d - d.mean())/d.std()
elif scale == "p":
#paretoscale
data = (d - d.mean())/d.std()**0.5
elif scale == "n":
#no scale
data = d
else:
#End function if no method chosen
raise Exception("No correct scale method specified: 'scale' has to be 's','p' or 'n'")
#make PCA
pca = PCA(n_components=2)
pca.fit(data)
data_pc = pca.transform(data)
loadings = pca.components_.T * np.sqrt(pca.explained_variance_)
#Plot PCA scores
plt.rcParams['figure.figsize'] = [10, 10]
for i, feature in enumerate(d.columns):
x = loadings[i, 0]
y = loadings[i, 1]
plt.plot([0, x], [0, y], 'k-', lw=2)
if annotate == True:
plt.annotate(feature, (x, y), fontsize = 15)
bob = "PC1 %.2f%%" %(pca.explained_variance_ratio_[0]*100)
plt.xlabel(bob, fontsize = 25)
bob = "PC2 %.2f%%" %(pca.explained_variance_ratio_[1]*100)
plt.ylabel(bob, fontsize = 25)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.title('PCA Loadings', fontsize = 25)
#plt.xlim([-0.25, 1])
#plt.ylim([-0.85, 0.41])
plt.show()
def makesvm(d, cat, scale = "s", print_out = None):
"""
Function for making Support Vector Machine (svm) classification, using grid search to optimize internal parameters.
Data can be scaled before algoritm is run, different scaling methods can be chosen
Options for cost parameter (C) are 2^n with n being (-5:5). A linear kernel function is used.
Input:
d(n x m pd DataFrame): X-varaible; datamatrix with features as columns and datapoints as rows.
cat(n-lenght list-like): Y-variable; labels to classify after
scale(string, "s", "p" or "n") :scaling method. "s" = standard(default). "p" = Pareto. "n" = No scaling.
print_out(bool): If not "None" stats on classification parameters and errors are printed on screen.
Output:
grid(GridSearchCV object): Obtimizing algoritm fitted to the data.
"""
#scale data
if scale == "s":
#Autoscale
data = (d - d.mean())/d.std()
elif scale == "p":
#Pareto scale
data = (d - d.mean())/d.std()**0.5
elif scale == "n":
#No scaling
data = d
else:
raise Exception("No correct scale method specified: 'scale' has to be 's','p' or 'n'")
#get classes as numbers
le = preprocessing.LabelEncoder()
le.fit(cat)
label_set = le.classes_ #list of classes
labels_data =le.transform(cat) #encoded y-variable
#set up standard SVM
clf = svm.SVC(probability=True)
loo = LeaveOneOut()
#set up options for parameter grid
bob = np.arange(-5, 5, 1)
bub = np.ones(len(bob))*2
power2 = bub**bob
#param_grid = [{'kernel': ['linear'], 'C': power2},{'kernel': ('rbf', 'poly'), 'C': power2, 'gamma': power2}]
param_grid = [{'kernel': ['linear'], 'C': power2}]
#set up gridsearch
grid = GridSearchCV(clf, param_grid, refit = True, cv= loo)
# fitting the model for grid search
grid.fit(data, cat)
#print some nice stats if wanted
if print_out is not None:
#refit SVM classifier. If grid is used directly, predicted will be wrong.
clf = svm.SVC(**grid.best_params_, probability=True)
clf.fit(data, labels_data)
predicted_loo = cross_val_predict(clf, data, labels_data, cv= loo)
acc_loo = grid.best_score_ * 100
params = grid.best_params_
#make confusion matrix
bub = np.array([label_set[s] for s in labels_data])
bob = np.array([label_set[s] for s in predicted_loo])
con_loo = pd.crosstab(bub, bob, rownames= ["Actual"], colnames= ["predicted loo"])
#make list of errors
loo_pred_err = pd.DataFrame(np.column_stack([bub, bob]), columns = ["Actual", "Predicted"], index= d.index)
loo_pred_err = loo_pred_err[loo_pred_err.Actual != loo_pred_err.Predicted]
print("Leave-One-Out validated classification score: ", acc_loo)
print("Parameters used: ", params)
print("Classification errors:")
print(con_loo)
print(loo_pred_err)
return(grid)
def svm_feats(data, cat, imp, scale = 's'):
"""
Funtion for looping over makesvm() using more and more features in feature list("imp").
Input:
data(n x m pd DataFrame): X-varaible; datamatrix with features as columns and datapoints as rows.
cat(n-lenght list-like): Y-variable; labels to classify after
imp(array): list of significant features found in X, ranked from most important to least.
scale(string, "s", "p" or "n") :scaling method. "s" = standard(default). "p" = Pareto. "n" = No scaling.
Output:
svm_results(DataFrame): Classification succes rate of each feature set
"""
#tell user what scaling method they have chosen
if scale == 's':
print("Scaling method: Auto-scale")
elif scale == 'p':
print("Scaling method: Pareto")
elif scale == 'n':
print("Scaling method: None... Are you sure about this, champ?")
else:
raise Exception("No correct scale method specified: 'scale' has to be 's','p' or 'n'")
svm_results = pd.DataFrame(columns= ["score", "params"])
#loop through feature list
for i in np.arange(len(imp)):
#include features up to and with i
feat = imp[:i+1]
bob = makesvm(data[feat], cat, scale)
#save stats
s_i = bob.best_score_
p_i = bob.best_params_
svm_results.loc[i+1] = [s_i, p_i]
#once more with full featureset
bob = makesvm(data, cat, scale)
s_i = bob.best_score_
p_i = bob.best_params_
svm_results.loc['Full'] = [s_i, p_i]
return(svm_results)
def bin_data(d, bl = 0.02):
"""
Sums data into bins of uniform lenght and plots an overview of the binned data.
Input:
d(DataFrame): Data to be strutured into bins, with ppm values, in decending order as column names.
bl(float): Length of the bins. Default is 0.02.
Output:
binned_d(DataFrame): d summed into bins, with columns named for lowest bin, roudend to three ciffers.
"""
#make bins
bins = np.arange(d.columns.values[0], d.columns.values[-1], -bl)
#define start of first bin
left = bins[0]
#dataframe for output
binned_d = pd.DataFrame(index= data.index)
#loop over all bins
for b in bins[1:]:
#columns in original data to inlcude in this bin
d_b = d[d.columns[(left >= d.columns) & (d.columns > b)]]
#round bin name
b = np.round(b,3)
#set sum of original data as values for this bin
binned_d[b] = d_b.sum(axis=1)
#define start point of next bin
left = b
print("There are %i bins in total" %(len(binned_d.columns)))
#make plot of binned data
plt.rcParams['figure.figsize'] = [40, 20]
ax = binned_d.T.plot(legend= None)
xtick = bins[1:]
ax.set_xticks( xtick, minor=True )
ax.grid(True, which='minor', axis='x' ) #Show bins as grid
ax.grid(False, which='major', axis='x' ) #has to be False or extra grid lines, not showing bins
#couldn't get grid to show over numbers
return(binned_d)
| Code for Frahm et al. 2021 - Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#imports
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from acoustic_data_science import config
def date(str):
return str[:10]
# +
#read in data
ice_data = pd.read_csv(config.external_data_path + '/hazel_ice_data.csv')
ice_data['Date'] = pd.to_datetime(ice_data['Date'], dayfirst=True) #change to datetime format
ice_data['Concentration'].replace({'no ice':0}, inplace=True) #replace with numerical formatting
ice_data['Concentration'] = pd.to_numeric(ice_data['Concentration'])
ice_data
# -
daily_temp = pd.read_feather(config.processed_data_path + '/daily_temperature_cambay_shorestation.feather')
daily_temp = daily_temp[(daily_temp["timestamp"].dt.month > 8) | (daily_temp["timestamp"].dt.year == 2019)]
daily_temp
from datetime import datetime
onset_sod_1 = datetime.strptime('08/10/2018', '%d/%m/%Y')
onset_sod_4 = datetime.strptime('22/10/2018', '%d/%m/%Y')
onset_sod_7 = datetime.strptime('12/11/2018', '%d/%m/%Y')
onset_sod_1dot = datetime.strptime('24/12/2018', '%d/%m/%Y')
onset_sod_4dot = datetime.strptime('11/02/2019', '%d/%m/%Y')
# +
fig,ax = plt.subplots(figsize=(16,8))
#plot temperature data
label = r'Air temperature ($\degree C$)'
ax.plot(daily_temp['timestamp'], daily_temp['mean_temperature'], label=label, c='r')
ax.set_ylabel(label)
ax.set_xlabel('Date')
#plot ice draft data on same x axis
ax2 = ax.twinx()
label = 'Ice draft (m)'
ax2.plot(ice_data['Date'], ice_data['Ice Profiler'], label=label, c='green')
ax2.set_ylabel(label)
#plot normalised ice concentration data on same x axis
ax3 = ax.twinx()
ax3.plot(ice_data['Date'], ice_data['Concentration']*10, label='Ice concentration')
ax3.set_ylabel('Ice concentration')
ax3.spines['right'].set_position(('outward', 60))
#ax.hlines(y=0, xmin=pd.to_datetime('2018/08/01'), xmax=pd.to_datetime('2019/07/31'), color="black", ls='--', label='Fresh water ice freezing point')
ax.hlines(y=-1.56, xmin=pd.to_datetime('2018/08/01'), xmax=pd.to_datetime('2019/07/31'), color="black", ls='--', label='Sea ice freezing point')
ax.set_xlim(xmin=pd.to_datetime('2018/09/01'), xmax=pd.to_datetime('2019/07/31'))
#get labels for legend
lines_1, labels_1 = ax.get_legend_handles_labels()
lines_2, labels_2 = ax2.get_legend_handles_labels()
lines_3, labels_3 = ax3.get_legend_handles_labels()
#plot legend
lines = lines_3 + lines_2 + lines_1
labels = labels_3 + labels_2 + labels_1
ax.legend(lines, labels, loc=0)
import matplotlib.dates as mdates
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
#plt.setp(ax.get_xticklabels(), rotation=30)
plt.tight_layout()
#plot and formatting
fig.savefig(config.figures_path + "/temp_draft_conc.jpg", dpi=300)
plt.show()
# -
#
| notebooks/17-HH-ice_draft_temp_conc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo Phase Correlation
#
# Illustrate using Phase Correlation to estimate rotation and translation between images.
#
# ## Description
#
# In this lesson we explain how to use Phase Correlation to
# estimate the angle of rotation and the translation between 2D
# images.
#
# Converting an image from Cartesian to Polar coordinates
#
#
# It converts a plane from coordinates $(x,y)$ to
# $(\theta,r)$, with $x = r \cos \theta$ and $y = r \sin \theta$.
# Notice that the domain in polar coordinates must be
# informed explicitaly and will influence in the angle resolution.
# +
import numpy as np
import sys,os
ea979path = os.path.abspath('../../')
if ea979path not in sys.path:
sys.path.append(ea979path)
import ea979.src as ia
# %matplotlib inline
import matplotlib.image as mpimg
# +
#f = ia.normalize(ia.gaussian((151,151), [[75],[75]], [[800,0],[0,800]]), [0,200]).astype(uint8)
f = mpimg.imread("../data/astablet.tif")
H,W = f.shape
f = f[:,H//2:H//2+H]
#ia.adshow(ia.isolines(f,10,3), "Image in cartesian coordinates")
g = ia.polar(f,(150,200),2*np.pi)
ia.adshow(f)
ia.adshow(g)
#ia.adshow(ia.isolines(g.astype(int),10,3), "Image in polar coordinates")
#adshow(g, "Image in polar coordinates")
# +
f1 = f
f2 = f.T[:,::-1]
g2 = ia.polar(f2,(150,200),2*np.pi)
ia.adshow(f2)
ia.adshow(g2)
# -
nb = ia.nbshow(2)
nb.nbshow(g)
nb.nbshow(g2)
nb.nbshow()
h = ia.phasecorr(g,g2)
print(h.shape)
ia.adshow(ia.normalize(h))
i = np.argmax(h)
row,col = np.unravel_index(i,h.shape)
v = h[row,col]
print(np.array(g.shape) - np.array((row,col)))
print(v)
# ## Estimating the angle of rotation
#
# The following function will be used to estimate the angle of rotation between 2D images.
def rotphasecorr2d(f,h):
F = np.fft.fftn(f)
H = np.fft.fftn(h)
pF = ia.polar(ia.dftview(F),(F.shape[0]/2,360),np.pi)
pH = ia.polar(ia.dftview(H),(H.shape[0]/2,360),np.pi)
return ia.phasecorr(pF, pH)
# The function can be applied as follows.
f = mpimg.imread("../data/cameraman.tif")
print(f.dtype)
t = np.zeros(np.array(f.shape)+200,dtype=np.uint8)
t[100:f.shape[0]+100,100:f.shape[1]+100] = f
f = t
t1 = np.array([
[1,0,-f.shape[0]/2.],
[0,1,-f.shape[1]/2.],
[0,0,1]]);
t2 = np.array([
[1,0,f.shape[0]/2.],
[0,1,f.shape[1]/2.],
[0,0,1]]);
theta = np.radians(30)
r1 = np.array([
[np.cos(theta),-np.sin(theta),0],
[np.sin(theta),np.cos(theta),0],
[0,0,1]]);
T = t2.dot(r1).dot(t1)
print(f.dtype)
f1 = ia.affine(f,T,0)
#f1.shape = f.shape
nb.nbshow(f, "f:Original image")
nb.nbshow(f1, "f1:Image rotated by 30°")
nb.nbshow()
nb = ia.nbshow(2)
F = np.fft.fftn(f)
F1 = np.fft.fftn(f1)
FS = ia.dftview(F)
F1S = ia.dftview(F1)
nb.nbshow(FS,'FS')
nb.nbshow(F1S,'F1S')
nb.nbshow()
pFS = ia.polar(FS,(FS.shape[0]//2,360),np.pi)
pF1S = ia.polar(F1S,(F1S.shape[0]//2,360),np.pi)
nb.nbshow(ia.normalize(pFS),'polar FS')
nb.nbshow(ia.normalize(pF1S),'polar F1S')
nb.nbshow()
pg = ia.phasecorr(pFS,pF1S)
ia.adshow(ia.normalize(pg))
# +
peak = np.unravel_index(np.argmax(pg), pg.shape)
# Calculate the angle
ang = (float(peak[1])/pg.shape[1])*180
print(ang)
# -
# ## Estimating the angle of rotation and the translation
#
# Now we will compute the angle of rotation and the translation. The function below first find the
# angle of rotation; after that, it rotate the image and find the translation. Two phase correlation
# maps are returned: one for the translation and other for rotation.
# +
import scipy
def trphasecorr2d(f,h):
rg = ia.rotphasecorr2d(f,h)
peak = np.unravel_index(argmax(rg), rg.shape)
ang = (float(peak[1])/rg.shape[1])*180
h_rot = scipy.ndimage.interpolation.rotate(h, -ang, reshape=False)
g = ia.phasecorr(f,h_rot)
return g, rg
# -
# The following code find the angle of rotation and the translation. Then, the original image is obtained
# from the rotated and translated sample image.
# +
t3 = np.array([
[1,0,50],
[0,1,32],
[0,0,1]]);
T = np.dot(t3,T)
h = ia.affine(f,T,0)
h.shape = f.shape
ia.adshow(f, "Original image")
ia.adshow(h, "Image rotated by 30° and translated by (50,32)")
g, rg = trphasecorr2d(f,h)
g = ia.normalize(g)
rg = ia.normalize(rg)
trans_peak = np.unravel_index(argmax(g), g.shape)
rot_peak = np.unravel_index(argmax(rg), rg.shape)
ang = (float(rot_peak[1])/rg.shape[1])*180
trans = (np.array(h.shape)-np.array(trans_peak))
np.adshow(g, "Translation correlation map - Peak %s, \n corresponds to translation %s"%(str(trans_peak), str(tuple(trans))))
np.adshow(ianormalize(rg), "Rotation correlation map - Peak %s, corresponds to angle %f°"%(str(rot_peak),ang))
t4 = np.array([
[1,0,-trans[0]],
[0,1,-trans[1]],
[0,0,1]]);
theta1 = radians(-ang)
r2 = np.array([
[np.cos(theta1),-np.sin(theta1),0],
[np.sin(theta1),np.cos(theta1),0],
[0,0,1]]);
T1 = dot(t4,dot(t2,dot(r2,t1)))
f1 = ia.affine(h,T1,0)
f1.shape = h.shape
ia.adshow(f1, "Sample image rotated and translated by %f° and %s, respectively"%(-ang,tuple(-trans)))
# -
# ## See Also
#
# - `iaphasecorr iaphasecorr` -- Phase correlation.
# - `iapolar iapolar` -- Cartesian to polar coordinate transformation.
# - `iaffine iaffine` -- Affine transform.
# - `iacorrdemo iacorrdemo` -- Illustrate the Template Matching technique.
# ## References
#
#
# 1. <NAME> and <NAME>. An FFT-Based Technique for Translation, Rotation, and
# Scale-Invariant Image Registration. IEEE Trans. on Image Processing. vol 5. n. 8, 1996. `http://ieeexplore.ieee.org/iel4/83/11100/00506761.pdf?isnumber=11100&prod=JNL&arnumber=506761&arSt=1266&ared=1271&arAuthor=Reddy%2C+B.S.%3B+Chatterji%2C+B.N. PDF at IEEE`
#
# ## Contributions
#
# - <NAME>, 1st semester 2011
| master/DemoPhaseCorrelation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# * 使用行业内的排序,进行因子测试;与回归版本,以及原始因子值版本进行比较。本部分参考自《QEPM》 p.p 117
# * 请在环境变量中设置`DB_URI`指向数据库
# # 参数设定
# --------------
# +
# %matplotlib inline
import os
import pandas as pd
import numpy as np
from PyFin.api import *
from alphamind.api import *
factor = "EMA5D"
universe = Universe('zz800')
start_date = '2020-01-01'
end_date = '2020-02-21'
freq = '10b'
category = 'sw'
level = 1
horizon = map_freq(freq)
engine = SqlEngine(os.environ['DB_URI'])
ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')
sample_date = '2018-01-04'
sample_codes = engine.fetch_codes(sample_date, universe)
sample_industry = engine.fetch_industry(sample_date, sample_codes, category=category, level=level)
# -
sample_industry.head()
# # 样例因子
# --------------------
#
# 我们下面分三种方法,分别考查这几种方法在避免行业集中上面的效果:
#
# * 使用原始因子的排序;
# * 使用原始因子在行业内的排序;
# * 使用原始因子在行业哑变量上回归后得到的残差排序。
# ## 1. 原始因子排序
# --------------------
factor1 = {'f1': CSQuantiles(factor)}
sample_factor1 = engine.fetch_factor(sample_date, factor1, sample_codes)
sample_factor1 = pd.merge(sample_factor1, sample_industry[['code', 'industry']], on='code')
sample_factor1.sort_values('f1', ascending=False).head(15)
# 对于原始因子,如果我们不做任何行业上面的处理,发现我们选定的alpha因子`CFO2EV`较大的股票集中于银行和大金融板块。
# ## 2. 行业内排序因子
# ----------------------
# 这里我们使用调整后的申万行业分类作为行业标签:
factor2 = {'f2': CSQuantiles(factor)}
sample_factor2 = engine.fetch_factor(sample_date, factor2, sample_codes)
sample_factor2 = pd.merge(sample_factor2, sample_industry[['code', 'industry']], on='code')
sample_factor2.sort_values('f2', ascending=False).head(15)
# 使用行业内的排序,则行业分布会比较平均。
# ## 3. 使用回归将因子行业中性
# --------------------------------
# 还有一种思路,使用线性回归,以行业为哑变量,使用回归后的残差作为因子的替代值,做到行业中性:
factor3 = {'f3': factor}
sample_factor3 = engine.fetch_factor(sample_date, factor3, sample_codes)
risk_cov, risk_exp = engine.fetch_risk_model(sample_date, sample_codes)
sample_factor3 = pd.merge(sample_factor3, sample_industry[['code', 'industry']], on='code')
sample_factor3 = pd.merge(sample_factor3, risk_exp, on='code')
raw_factors = sample_factor3['f3'].values
industry_exp = sample_factor3[industry_styles + ['COUNTRY']].values.astype(float)
processed_values = factor_processing(raw_factors, pre_process=[], risk_factors=industry_exp, post_process=[percentile])
sample_factor3['f3'] = processed_values
sample_factor3 = sample_factor3[['code', 'f3', 'industry']]
sample_factor3.sort_values('f3', ascending=False).head(15)
# 我们发现这种方法的效果并不是很好。调整的幅度并不是很大,同时仍然存在着集中于大金融板块的问题。
# # 回测结果
# ------------------
# 我们使用简单等权重做多前20%支股票,做空后20%的方法,考察三种方法的效果:
factors = {
'raw': CSQuantiles(factor),
'peer quantile': CSQuantiles(factor),
'risk neutral': LAST(factor)
}
# +
df_ret = pd.DataFrame(columns=['raw', 'peer quantile', 'risk neutral'])
df_ic = pd.DataFrame(columns=['raw', 'peer quantile', 'risk neutral'])
for date in ref_dates:
ref_date = date.strftime('%Y-%m-%d')
codes = engine.fetch_codes(ref_date, universe)
total_factor = engine.fetch_factor(ref_date, factors, codes)
risk_cov, risk_exp = engine.fetch_risk_model(ref_date, codes)
industry = engine.fetch_industry(ref_date, codes, category=category, level=level)
rets = engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=1)
total_factor = pd.merge(total_factor, industry[['code', 'industry']], on='code')
total_factor = pd.merge(total_factor, risk_exp, on='code')
total_factor = pd.merge(total_factor, rets, on='code').dropna()
raw_factors = total_factor['risk neutral'].values
industry_exp = total_factor[industry_styles + ['COUNTRY']].values.astype(float)
processed_values = factor_processing(raw_factors, pre_process=[], risk_factors=industry_exp, post_process=[percentile])
total_factor['risk neutral'] = processed_values
total_factor[['f1_d', 'f2_d', 'f3_d']] = (total_factor[['raw', 'peer quantile', 'risk neutral']] >= 0.8) * 1.
total_factor.loc[total_factor['raw'] <= 0.2, 'f1_d'] = -1.
total_factor.loc[total_factor['peer quantile'] <= 0.2, 'f2_d'] = -1.
total_factor.loc[total_factor['risk neutral'] <= 0.2, 'f3_d'] = -1.
total_factor[['f1_d', 'f2_d', 'f3_d']] /= np.abs(total_factor[['f1_d', 'f2_d', 'f3_d']]).sum(axis=0)
ret_values = total_factor.dx.values @ total_factor[['f1_d', 'f2_d', 'f3_d']].values
df_ret.loc[date] = ret_values
ic_values = total_factor[['dx', 'raw', 'peer quantile', 'risk neutral']].corr().values[0, 1:]
df_ic.loc[date] = ic_values
print(f"{date} is finished")
# -
df_ret.cumsum().plot(figsize=(14, 7))
df_ic.cumsum().plot(figsize=(14, 7))
| notebooks/Example 13 - Evaluation within Industry Groups.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="QL28WSHoBsfp" colab_type="code" outputId="50293647-61ed-41ff-b6da-aaf412992c38" colab={"base_uri": "https://localhost:8080/", "height": 34}
# -*- coding: utf-8 -*-
#@author: alison
import re
import string
import pickle
import keras
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, SnowballStemmer
from nltk.tokenize import TweetTokenizer
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.layers import Bidirectional
from keras.models import Model
from keras.layers import Dense, Dropout, Embedding
from keras.layers import CuDNNLSTM, Activation, Input
from keras.optimizers import Adam, SGD, RMSprop
from keras import optimizers
from keras import regularizers
# + id="XdekfYsoV2c2" colab_type="code" outputId="ad1503d6-069d-493a-8a6c-8601f3adb112" colab={"base_uri": "https://localhost:8080/", "height": 70}
import nltk
nltk.download('stopwords')
# + id="-TpI0dMJEi6p" colab_type="code" colab={}
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="XEfvBz8IEqrO" colab_type="code" outputId="49e53478-70c0-44f6-c910-6dad1eb94dc7" colab={"base_uri": "https://localhost:8080/", "height": 390}
file_list = drive.ListFile({'q': "'1Hx5OP1Yrlh37yYzSMtsv6Ui_fuzOuG04' in parents and trashed=false"}).GetList()
for file1 in file_list:
print('title: %s, id: %s' % (file1['title'], file1['id']))
# + id="h2hLjMJyE3dR" colab_type="code" colab={}
train_downloaded = drive.CreateFile({'id': '18OJPw3-c99gT1XgKZRXCkI_7xexnBV3z'})
train_downloaded.GetContentFile('train_es.tsv')
test_downloaded = drive.CreateFile({'id': '1EC9OkjN6PR5RqehcYXoci7dsuXmlMA0f'})
test_downloaded.GetContentFile('dev_es.tsv')
trial_downloaded = drive.CreateFile({'id': '1lwuVtwkeYCoBn4wsOEwq1Y1gBe4sazaS'})
trial_downloaded.GetContentFile('trial_es.tsv')
# + id="0s3Dnnk8FF4j" colab_type="code" colab={}
train = pd.read_csv('train_es.tsv', delimiter='\t',encoding='utf-8')
dev = pd.read_csv('dev_es.tsv', delimiter='\t',encoding='utf-8')
#trial = pd.read_csv('trial_es.tsv', delimiter='\t',encoding='utf-8')
# + id="lrslbThJCeSY" colab_type="code" colab={}
# Etapa de pré-processamento
def clean_tweets(tweet):
tweet = re.sub('@(\\w{1,15})\b', '', tweet)
tweet = tweet.replace("via ", "")
tweet = tweet.replace("RT ", "")
tweet = tweet.lower()
return tweet
def clean_url(tweet):
tweet = re.sub('http\\S+', '', tweet, flags=re.MULTILINE)
return tweet
def remove_stop_words(tweet):
stops = set(stopwords.words("spanish"))
stops.update(['.',',','"',"'",'?',':',';','(',')','[',']','{','}'])
toks = [tok for tok in tweet if not tok in stops and len(tok) >= 3]
return toks
def stemming_tweets(tweet):
stemmer = SnowballStemmer('spanish')
stemmed_words = [stemmer.stem(word) for word in tweet]
return stemmed_words
def remove_number(tweet):
newTweet = re.sub('\\d+', '', tweet)
return newTweet
def remove_hashtags(tweet):
result = ''
for word in tweet.split():
if word.startswith('#') or word.startswith('@'):
result += word[1:]
result += ' '
else:
result += word
result += ' '
return result
# + id="QDsnLDRQGl71" colab_type="code" colab={}
def preprocessing(tweet, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True):
if ctweets:
tweet = clean_tweets(tweet)
if url:
tweet = clean_url(tweet)
if hashtag:
tweet = remove_hashtags(tweet)
twtk = TweetTokenizer(strip_handles=True, reduce_len=True)
if number:
tweet = remove_number(tweet)
tokens = [w.lower() for w in twtk.tokenize(tweet) if w != "" and w is not None]
if swords:
tokens = remove_stop_words(tokens)
if stemming:
tokens = stemming_tweets(tokens)
text = " ".join(tokens)
return text
# + id="SunHFjyyFLR3" colab_type="code" colab={}
train_text = train['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True))
y_train = train['HS']
id_train = train['id']
test_text = dev['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True))
y_test = dev['HS']
id_test = dev['id']
data = np.concatenate((train_text, test_text), axis=0)
classes = np.concatenate((y_train, y_test), axis=0)
# + id="nVnR1qaLs-ZJ" colab_type="code" colab={}
max_features = 25000
maxlen = 100
batch_size = 32
epochs = 3
# Treina um tokenizaddor nos dados de treino
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(data)
# Tokeniza os dados
X = tokenizer.texts_to_sequences(data)
Y = tokenizer.texts_to_sequences(test_text)
tweets = sequence.pad_sequences(X, maxlen=maxlen)
x_test = sequence.pad_sequences(Y, maxlen=maxlen)
# + id="7kb_ffNctGp-" colab_type="code" colab={}
x_train, x_val, y_train, y_val = train_test_split(tweets, classes, test_size=0.25, random_state=None)
# + id="pTZGv8lcWGIU" colab_type="code" outputId="27fd80dc-c686-4905-99de-6d89f77e2b40" colab={"base_uri": "https://localhost:8080/", "height": 478}
# Fase de classificação de sentimentos
tweet_input = Input(shape=(maxlen,), dtype='int32')
embedding = Embedding(max_features, 256, input_length=maxlen, trainable=True)(tweet_input)
lstm1 = Bidirectional(CuDNNLSTM(256, kernel_initializer='random_normal', return_sequences=True))(embedding)
lstm2 = Bidirectional(CuDNNLSTM(256))(lstm1)
dens = Dense(256, activation='relu')(lstm2)
output = Dense(1, activation='sigmoid')(dens)
model = Model(inputs=tweet_input, outputs=output)
#opt = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None)
#opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
opt = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.001)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, batch_size=batch_size, shuffle=True, epochs=epochs, validation_data=(x_val, y_val))
y_pred = (model.predict(x_test, batch_size=batch_size) > .5).astype(int)
# + id="rx8QHLaNWgdn" colab_type="code" outputId="5c1a9faa-b98d-48e9-bc6a-b601cbc02cd1" colab={"base_uri": "https://localhost:8080/", "height": 87}
from sklearn.metrics import f1_score, precision_score, accuracy_score, recall_score
print("F1.........: %f" %(f1_score(y_test, y_pred, average="macro")))
print("Precision..: %f" %(precision_score(y_test, y_pred, average="macro")))
print("Recall.....: %f" %(recall_score(y_test, y_pred, average="macro")))
print("Accuracy...: %f" %(accuracy_score(y_test, y_pred)))
# + id="hJywYgYyCoKJ" colab_type="code" outputId="3966d556-090b-4274-b0d3-c1b22f976f4e" colab={"base_uri": "https://localhost:8080/", "height": 34}
output = []
for array in y_pred:
output.append(array[0])
print(len(output), len(id_test))
# + id="-5lRe3inCtCi" colab_type="code" colab={}
from google.colab import files
with open("es_a.tsv", "w") as file:
for i in range(len(y_pred)):
file.write(str(id_test[i]))
file.write('\t')
file.write(str(output[i]))
file.write('\n')
files.download('es_a.tsv')
| Spanish/TaskA/bi_lstm_es_a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from matplotlib import pyplot as plt
import csv
from collections import Counter
import pandas as pd
import numpy as np
plt.style.use('seaborn-white')
plt.rc('figure', figsize=(10, 6))
# +
# Median Developer Salaries by Age
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
# Median Python Developer Salaries by Age
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
# Median JavaScript Developer Salaries by Age
js_dev_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
# -
plt.bar(ages_x, dev_y, color='#444444', label='All Devs')
# +
# stacked
plt.bar(ages_x, dev_y, color='#444444', label='All Devs')
plt.bar(ages_x, py_dev_y, color='#008df5', label='Python')
plt.bar(ages_x, js_dev_y, color='#e5ae38', label='JavaScript')
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.title("Median Salary (USD) by Age")
plt.legend()
# Automatically adjust subplot parameters to give specified padding.
plt.tight_layout()
plt.show()
# +
# side by side
x_indexes = np.arange(len(ages_x))
bar_width = 0.25
plt.bar(x_indexes - bar_width, dev_y, width=bar_width, color='#444444', label='All Devs')
plt.bar(x_indexes, py_dev_y, width=bar_width, color='#008df5', label='Python')
plt.bar(x_indexes + bar_width, js_dev_y, width=bar_width, color='#e5ae38', label='JavaScript')
# without this line our age range is lost, to fix this:
plt.xticks(ticks=x_indexes, labels=ages_x)
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.title("Median Salary (USD) by Age")
plt.legend()
plt.tight_layout()
plt.show()
# +
# horizontal bar charts
# +
# counter eg
c = Counter(['Python', 'Javascript'])
print(c)
c.update(['Python', 'Html'])
print(c)
# +
# using csv module to read the data
with open(r'examples/data.csv') as csv_file:
csv_reader = csv.DictReader(csv_file)
language_counter = Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
# row = next(csv_reader)
# print(row['LanguagesWorkedWith'].split(';'))
languages = []
popularity = []
for lan, pop in language_counter.most_common(15):
languages.append(lan)
popularity.append(pop)
print(languages)
print(popularity)
# language_counter.most_common(15)
# +
# plotting
languages.reverse()
popularity.reverse()
plt.barh(languages, popularity)
# plt.ylabel("Programming Languages")
plt.xlabel("Number of people who use")
plt.title("Most Popular Languages")
plt.tight_layout()
plt.show()
# +
# using Pandas
data = pd.read_csv(r'examples/data.csv')
ids = data['Responder_id']
lang_responses = data['LanguagesWorkedWith']
language_counter = Counter()
for response in lang_responses:
language_counter.update(response.split(';'))
languages = []
popularity = []
for lan, pop in language_counter.most_common(15):
languages.append(lan)
popularity.append(pop)
# +
# plotting
languages.reverse()
popularity.reverse()
plt.barh(languages, popularity)
# plt.ylabel("Programming Languages")
plt.xlabel("Number of people who use")
plt.title("Most Popular Languages")
plt.tight_layout()
plt.show()
| matplotlib/Part 02 Bar Charts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="T1e-eXyJVZWL"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="ipKOeB68Vo3-"
np.random.seed(seed=1)
X_min = 4
X_max = 30
X_n = 16
X = 5 + 25*np.random.rand(X_n)
Prm_c = [170, 108, 0.2]
T = Prm_c[0] - Prm_c[1]*np.exp(-Prm_c[2]*X) + 4*np.random.randn(X_n)
np.savez('ch5_data.npz', X=X, X_min = X_min, X_max=X_max, X_n=X_n, T= T)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="SKfL4EI7cOqO" outputId="d1fd40d1-93b6-4cb1-e20f-bd8b24ab9d01"
plt.figure(figsize=(4,4))
plt.plot(X, T, marker='o', linestyle='None', markeredgecolor='black', color='cornflowerblue')
plt.xlim(X_min,X_max)
plt.grid(True)
plt.show()
# + [markdown] id="_Pamz_ron8yo"
# ## mean square error(MSE)
# $$ J = \frac{1}{N}\sum_{n=0}^{N-1}{(y_n-t_n)^2} $$
#
# $$y_n = y(x_n) = w_0x_n + w_1$$
#
# $$\textbf{w}(t+1) = \textbf{w}(t) - \alpha\nabla_\textbf{w}J|_{\textbf{w}(t)}$$
#
# ##### $\alpha$ = learning rate, J = error function, t = iteration, w = parameter
# + [markdown] id="mZPz_VVdttGJ"
# ## gradient
# $$ y_n = w_0x_n + w_1$$
# $$ J = \frac{1}{N}\sum_{n=0}^{N-1}{(y_n-t_n)^2} = \frac{1}{N}\sum_{n=0}^{N-1}{(w_0x_n + w_1 - t_n)^2}$$
# $$ \frac{\partial J}{\partial w_0} = \frac{2}{N}\sum_{n=0}^{N-1}{(w_0x_n + w_1- t_n)x_n} = \frac{2}{N}\sum_{n=0}^{N-1}(y_n - t_n)x_n$$
# $$ \frac{\partial J}{\partial w_1} = \frac{2}{N}\sum_{n=0}^{N-1}{(w_0x_n + w_1- t_n)} = \frac{2}{N}\sum_{n=0}^{N-1}(y_n-t_n)$$
# + colab={"base_uri": "https://localhost:8080/", "height": 413} id="ixuXwRhPcRWI" outputId="c5d127cd-0c11-4ced-9c45-d04090ff6f6f"
def mse_line(x, t, w):
y=w[0]*x + w[1]
mse = np.mean((y-t)**2)
return
def dmse_line(x, t, w):
y = w[0]*x + w[1]
d_w0 = 2*np.mean((y-t)*x)
d_w1 = 2*np.mean(y-t)
return d_w0, d_w1
def fit_line_num(x,t):
w_init = [10.0, 165.0] #initial w
alpha = 0.001 #learning rate
i_max = 100000 #max iteration
eps = 0.1 #end limit
w_i = np.zeros([i_max, 2])
w_i[0, :] = w_init
for i in range(1, i_max):
dmse = dmse_line(x, t, w_i[i-1])
w_i[i, 0] = w_i[i - 1, 0] - alpha * dmse[0]
w_i[i, 1] = w_i[i - 1, 1] - alpha * dmse[1]
if max(np.absolute(dmse)) < eps:
break
w0 = w_i[i, 0]
w1 = w_i[i, 1]
w_i = w_i[:i,:]
return w0, w1, dmse, w_i
plt.figure(figsize=(4,4))
xn = 100
w0_range=[-25,-25]
w1_range = [120, 170]
x0 = np.linspace(w0_range[0], w0_range[1], xn)
x1 = np.linspace(w1_range[0], w1_range[1], xn)
xx0, xx1 = np.meshgrid(x0,x1)
J = np.zeros((len(x0), len(x1)))
for i0 in range(xn):
for i1 in range(xn):
J[i1, i0] = mse_line(X, T, (x0[i0], x1[i1]))
cont = plt.contour(xx0, xx1, J, 30, colors='black', levels = (100, 1000, 10000, 100000))
cont.clabel(fmt='%1.0f', fontsize=8)
plt.grid(True)
W0, W1, dMSE, W_history = fit_line_num(X, T)
print('iteration {0}'.format(W_history.shape[0]))
print('W=[{0:.6f}, {1:.6f}]'.format(W0, W1))
print('dMSE=[{0:.6f}, {1:.6f}]'.format(dMSE[0], dMSE[1]))
print('MSE={0:.6f}'.format(mse_line(X, T, [W0, W1])))
plt.plot(W_history[:,0], W_history[:, 1], '.-', color='gray', markersize=10, markeredgecolor='cornflowerblue')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="7jGnaGLJnxMr" outputId="863ed13f-f381-49b4-e03b-99cdf01854a6"
def show_line(w):
xb = np.linspace(X_min, X_max, 100)
y = w[0]*xb + w[1]
plt.plot(xb, y, color=(.5, .5, .5), linewidth = 4)
def fit_line(x, t):
mx = np.mean(x)
mt = np.mean(t)
mtx = np.mean(t*x)
mxx = np.mean(x*x)
w0 = (mtx - mt*mx)/ (mxx-mx**2)
w1 = mt - w0*mx
return np.array([w0, w1])
W = fit_line(X, T)
print("w0={0:.3f}, w1={1:.3f}".format(W[0], W[1]))
mse = mse_line(X, T, W)
plt.figure(figsize=(4,4))
show_line(W)
plt.plot(X, T, marker='o', linestyle='None', color='cornflowerblue', markeredgecolor='black')
plt.xlim(X_min,X_max)
plt.grid(True)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="DN1LaSozD1tt" outputId="78e5030b-b2dc-4eec-992b-ed5c99de0dfc"
X0 = X
X0_min = 5
X0_max = 30
np.random.seed(seed = 1)
X1 = 23*(T/100)**2 + 2*np.random.randn(X_n)
X1_min = 40
X1_max = 75
def show_data2(ax, x0, x1, t):
for i in range(len(x0)):
ax.plot([x0[i], x0[i]], [x1[i], x1[i]], [120, t[i]], color='gray')
ax.plot(x0, x1, t, 'o', color='cornflowerblue', markeredgecolor='black', markersize=6, markeredgewidth=0.5)
ax.view_init(elev=35, azim=-75)
plt.figure(figsize=(6,5))
ax=plt.subplot(1, 1, 1, projection='3d')
show_data2(ax, X0, X1, T)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 321} id="Dt0p1Li2LfAi" outputId="43fb7018-e1b7-4953-c193-4f30950cb18a"
def show_plane(ax, w):
px0 = np.linspace(X0_min, X0_max, 5)
px1 = np.linspace(X1_min, X1_max, 5)
px0, px1 = np.meshgrid(px0, px1)
y = w[0]*px0 + w[1]*px1 + w[2]
ax.plot_surface(px0, px1, y, rstride=1, cstride=1, alpha=0.3, color='blue', edgecolor='black')
def mse_plane(x0, x1, t, w):
y = w[0]*x0 + w[1]*x1 + w[2]
mse = np.mean((y-t)**2)
return mse
plt.figure(figsize=(6,5))
ax = plt.subplot(1, 1, 1, projection='3d')
W = [1.5, 1, 90]
show_plane(ax, W)
show_data2(ax, X0, X1, T)
mse = mse_plane(X0, X1, T, W)
print("SD={0:.3f}cm".format(np.sqrt(mse)))
plt.show()
# + id="vNNdCkaVO71B"
| ch5/1. Regression(1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (pylsci)
# language: python
# name: pycharm-7439ad4a
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # PyLSCI Demo
#
# This notebook demonstrates the usage of the `pylsci` package.
#
# The `pylsci` package expects the raw laser speckle data as 2D or 3D NumPy arrays,
# and it will be assumed that you provide the functionality to convert your laser speckle data to NumPy arrays.
#
# For the demo purposes of this notebook, the `util.py` module was created.
# It contains the functionality to read images to NumPy arrays, as well as showing them using `matplotlib`.
# Note that neither the custom `util.py` module nor `matplotlib` is required to use the `pylsci` package,
# and they won't be installed when you install `pylsci`, and it's only dependency is the NumPy package.
#
# With `pylsci`, you can calculate
# - spatial,
# - temporal, and
# - spatio-temporal
#
# Laser Speckle Contrast Images.
# Those three ways of calculating LSCI Images will be demonstrated in this notebook.
#
# Using `pylsci`, it is recommended to use temporal LSCI whenever possible,
# since the temporal implementation can make use of NumPys numerical advantages for multi-dimensional arrays.
# This is not the case for the spatial and spatio-temporal implementation,
# where we need a kernel to iterate the image(s), making it computationally expensive.
#
# > I tried to find a Python/NumPy equivalent for the MATLAB `imfilter` function.
# > It seems like you can achieve similar using [SciPy](https://mail.python.org/pipermail/scipy-user/2013-May/034559.html),
# > but my intention is to avoid using another package dependency, since I prefer lightweight packages.
# > If you have any idea to optimize the spatial and spatio-temporal implementation,
# > you can post it in this [GitHub Issue](https://github.com/pkeilbach/pylsci/issues/1).
#
# ## 0. Preparations
#
# To successfully execute this notebook, you will need to install `pylsci`,
# as well as `matplotlib` (just to show the images).
# Installing `pylsci` will also install the required NumPy package.
#
# ```python
# # to use the pylsci package
# pip install pylsci
#
# # and in addition to use this notebook
# pip install matplotlib
# ```
#
# Now, let's start by importing the helper functions from the custom `util.py` module.
#
# > Again, the `util.py` contains custom preprocessing, that is highly dependent on your specific LSCI setup.
# > Just make sure that in the end, you have your data available as 2D or 3D NumPy arrays.
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
from util import show_image, read_image
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 1. Spatial Contrast Calculation
#
# Lets start by loading a raw laser speckle image. This example shows a speckle image of a rat cortex.
#
# > Kudos to Biomedical Scientist D<NAME> for providing the raw laser speckle image from one of his LSCI projects:
# > [Cardiac pulsatility mapping and vessel type identification using laser speckle contrast imaging](https://www.osapublishing.org/boe/fulltext.cfm?uri=boe-9-12-6388&id=401445)
#
# After reading the image, we have the raw laser speckle image available as a 2D numpy array of size $1024 \times 1280$ pixels.
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
raw_speckle_img = read_image('img/spatial.tif')
print(type(raw_speckle_img))
show_image(raw_speckle_img)
# + [markdown] pycharm={"name": "#%% md\n"}
# Now, we can calculate a laser speckle contrast image using `pylsci`.
#
# The basic workflow is:
#
# - create an `Lsci` object, and
# - execute a contrast function on the loaded speckle image.
#
# The default value for the spatial neighborhood is `nbh_s=3`, and for the temporal neighborhood it is `nbh_t=25`.
# If you don't pass anything to the constructor, `pylsci` will use those values for the spatial and temporal neighborhoods.
#
# However, you can pass custom values for `nbh_s` and `nbh_t`
#
# > Note that `nbh_s` is expected to be an odd value, since the kernel should center around a single pixel,
# > e.g. `nbh_s=3` uses a kernel of $3 \times 3$ pixels.
#
# In this example, we will create an object of the `Lsci` class using the default values,
# which will give us a spatial kernel of $3 \times 3$ pixels.
#
# > As mentioned previously, be prepared that the spatial calculation may take some time,
# > depending on the raw image size and you hardware.
# > On my machine, it takes about 40 seconds.
# + pycharm={"is_executing": false, "name": "#%%\n"}
from pylsci import Lsci
# create obbjet
lsci = Lsci()
# calculate spatial contrast on the previously loaded image
s_lsci = lsci.spatial_contrast(raw_speckle_img)
# use helper function to show the contrast image
show_image(s_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# The result is a laser speckle contrast image that shows us the vessels of the rat cortex.
#
# Now, we try a larger spatial neighborhood of a $7 \times 7$ kernel.
#
# See how the larger neighborhood positively affects the laser speckle contrast image,
# since the larger spatial neighborhood provides us more statistical robustness on the contrast calculation.
# + pycharm={"is_executing": false, "name": "#%% \n"}
lsci = Lsci(nbh_s=7)
s_lsci = lsci.spatial_contrast(raw_speckle_img)
show_image(s_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 2. Temporal Contrast Calculation
#
# To demonstrate the temporal contrast calculation, an example of my thesis project is used.
#
# The LSCI setup provided a time series of 1000 raw laser speckle image, each of size $64 \times 64$ pixels.
# To get all the data in a single image, the 1000 images were stacked vertically,
# providing a single image of size $64000 \times 64$ pixels.
#
# > Since the project topic was [Fingerprint Presentation Attack Detection using LSCI](https://www.researchgate.net/publication/329391997_Fingerprint_Presentation_Attack_Detection_using_Laser_Speckle_Contrast_Imaging),
# > the images captured a time series of 1000 images of an area of size $1 \times 1$ $cm^2$ of the human index finger.
# + pycharm={"is_executing": false, "name": "#%%\n"}
# better avoid showing an image of size 64000 x 64 pixels within this notebook,
# but feel free to download it
raw_speckle_img_seq = read_image('img/temporal.png')
print(f"image dimensions: {raw_speckle_img_seq.shape}")
# + [markdown] pycharm={"name": "#%% md\n"}
# The temporal contrast function of `pylsci` expects a 3D NumPy array,
# where the first dimension represents the time dimension.
#
# In the described LSCI setup, `pylsci` would expect a NumPy array of shape $1000 \times 64 \times 64$.
#
# In order to achieve that, the vertically aligned images need to be stacked,
# and I implemented a helper function for that purpose in the `util.py` module.
#
# > As you see, the preprocessing is highly dependent on the specific LSCI setup.
# + pycharm={"is_executing": false, "name": "#%%\n"}
from util import stack_images
raw_speckle_img_seq = stack_images(raw_speckle_img_seq)
print(f"image dimensions: {raw_speckle_img_seq.shape}")
# + [markdown] pycharm={"name": "#%% md\n"}
# Now the array has the expected dimensions and we can calculate the temporal contrast
# by calling the `temporal_contrast()` function.
# + pycharm={"is_executing": false, "name": "#%%\n"}
lsci = Lsci()
t_lsci = lsci.temporal_contrast(raw_speckle_img_seq)
show_image(t_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# For the value of the temporal neighborhood, no constratints apply,
# and you can set `nbh_t` to any value that is appropriate.
# + pycharm={"name": "#%%\n", "is_executing": false}
lsci = Lsci(nbh_t=80)
t_lsci = lsci.temporal_contrast(raw_speckle_img_seq)
show_image(t_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 3. Spatio-Temporal Contrast Calculation
#
# The spatio-temporal contrast can be calculated with the `spatio_temporal_contrast()` function.
#
# It accepts a `cubic` argument (default is `False`), that determines
# if the spatio-temporal neighborhood has a cubic shape (determined by `nbh_s`),
# or a cubiod shape (considering `nbh_t`).
# For example, if `cubic=True`, the spatio-temporal kernel is a cube of edge size `nbh_s`, and if `nbh_s=7`,
# its size will be $7 \times 7 \times 7$ pixels.
# If `cubic` remains `False`, the spatio-temporal kernel will be a cuboid of size $25 \times 7 \times 7$
# (in case you set `nbh_t=25` and `nbh_s=7`)
#
# > Again, be prepared that the spatio-temporal contrast calculation may take some time.
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create the object sing the default values for nbh_s and nbh_t
lsci = Lsci()
# since cubic = True, the spatio-temporal kernel will be of size 3x3x3
st_lsci = lsci.spatio_temporal_contrast(raw_speckle_img_seq, cubic=True)
print(f"Dimensions of the LSCI image in pixels: {st_lsci.shape}")
show_image(st_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# For demo purposes, an example with a cuboid neighborhood of $40 \times 5 \times 5$ wil be shown, however,
# it makes not much sense for the given LSCI images.
# + pycharm={"name": "#%% \n", "is_executing": false}
# leaving cubic as false will considers nbh_t for the kernel size,
# resulting in a spatio-temporal neighborhood of 40x5x5 pixels.
lsci = Lsci(nbh_s=5, nbh_t=40)
st_lsci = lsci.spatio_temporal_contrast(raw_speckle_img_seq)
print(f"Dimensions of the LSCI image in pixels: {st_lsci.shape}")
show_image(st_lsci)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 4. Conclusion
#
# With `pylsci`, you can calculate laser speckle contrast images in Python.
# For production code, you probably want to use MATLAB or a pure C implementation.
# However, if you are prototyping or researching,
# and want to stick with the Python ecosystem, I hope that `pylsci` suits your needs.
# Make sure you use the temporal contrast calculation whenever possible,
# since it doesn't rely on a loop-based implementation like the spatial and spatio-temporal contrast calculation.
# If you find a pythonic equivalent of the MATLAB `imfilter()` function (preferrably avoiding dependencies),
# please let me know in the [GitHub Issue](https://github.com/pkeilbach/pylsci/issues/1) so I can improve the `pylsci` package.
#
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "slide"}
# Take a look at the dataset
import pandas as pd
train = pd.read_csv('./data/train.csv')
# +
# Training features - everything and the kitchen sink, except the text ones
X_columns = ['Type', #'Name',
'Age', 'Breed1', 'Breed2', 'Gender',
'Color1', 'Color2', 'Color3', 'MaturitySize',
'FurLength', 'Vaccinated', 'Dewormed', 'Sterilized',
'Health', 'Quantity', 'Fee', 'State', #'RescuerID',
'VideoAmt', #'Description', 'PetID',
'PhotoAmt']
y_columns = ['AdoptionSpeed']
X = train[X_columns]
y = train[y_columns]
# +
# Use stratified train/test split, no cross validation for now
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
print(X_train.shape)
print(X_test.shape)
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
classifiers = [DecisionTreeClassifier(),
RidgeClassifier(),
RandomForestClassifier(n_estimators=100),
LogisticRegression(multi_class='auto', solver='liblinear')]
best_accuracy = 0
best_model = None
for classifier in classifiers:
classifier.fit(X_train, y_train.values.flatten())
y_predicted = classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_predicted)
model_name = type(classifier).__name__
print('Accuracy of', model_name, 'is', accuracy)
if best_accuracy < accuracy:
best_accuracy = accuracy
best_model = classifier
y_predicted = best_model.predict(X_test)
# +
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_predicted))
# +
import numpy as np
cm = confusion_matrix(y_test, y_predicted)
cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]).round(3)*100
print(cm)
# + slideshow={"slide_type": "slide"}
print(classification_report(y_test, y_predicted))
# -
| December 2019/b/Base-3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [Lists](https://docs.python.org/3/library/stdtypes.html#lists)
# +
my_empty_list = []
print('empty list: {}, type: {}'.format(my_empty_list, type(my_empty_list)))
# +
list_of_ints = [1, 2, 6, 7]
list_of_misc = [0.2, 5, 'Python', 'is', 'still fun', '!']
print('lengths: {} and {}'.format(len(list_of_ints), len(list_of_misc)))
# -
# ## Accessing values
# +
my_list = ['Python', 'is', 'still', 'cool']
print(my_list[0]) # access elements of the list using an index. List indexes, just like string index, start at 0
print(my_list[3])
# +
## Two dimensional lists
coordinates = [[12.0, 13.3], [0.6, 18.0], [88.0, 1.1]] # two dimensional
print('first coordinate: {}'.format(coordinates[0])) # this gives you the entire list stored at index 0
print('second element of first coordinate: {}'.format(coordinates[0][1])) # this gives you the specific number at 0,1
# -
# ### Accessing parts of the list - slicing
# +
my_num_list = [1,2,3,4,5,6,7,8,9]
print(my_num_list[2:5]) # note that the range ends one before the second number
print(my_num_list[:5]) # start at the begining and go till 5
print(my_num_list[5:]) # start at 5 and go all the way to the end
print(my_num_list[:]) # print the whole list
# -
# ## Updating values
# +
my_list = [0, 1, 2, 3, 4, 5]
my_list[0] = 99
print(my_list)
# remove first value
del my_list[0]
print(my_list)
# -
# ## Checking if certain value is present in list
# +
languages = ['Java', 'C++', 'Go', 'Python', 'JavaScript']
# you can use the in operator just like you did with strings
if 'Python' in languages:
print('Python is there!')
# -
if 6 not in [1, 2, 3, 7]:
print('number 6 is not present')
# ## List are mutable
original = [1, 2, 3]
modified = original
modified[0] = 99
print('original: {}, modified: {}'.format(original, modified))
# You can get around this by creating new `list`:
original = [1, 2, 3]
modified = list(original) # Note list()
# Alternatively, you can use copy method
# modified = original.copy()
modified[0] = 99
print('original: {}, modified: {}'.format(original, modified))
# ## `list.append()`
my_list = [1]
my_list.append('ham')
print(my_list)
# ## `list.remove()`
# +
my_list = ['Python', 'is', 'sometimes', 'fun']
my_list.remove('sometimes')
print(my_list)
# If you are not sure that the value is in list, better to check first:
if 'Java' in my_list:
my_list.remove('Java')
else:
print('Java is not part of this story.')
# -
# ## `list.sort()`
# +
numbers = [8, 1, 6, 5, 10]
numbers.sort()
print('numbers: {}'.format(numbers))
numbers.sort(reverse=True)
print('numbers reversed: {}'.format(numbers))
words = ['this', 'is', 'a', 'list', 'of', 'words']
words.sort()
print('words: {}'.format(words))
# -
# ## `sorted(list)`
# While `list.sort()` sorts the list in-place, `sorted(list)` returns a new list and leaves the original untouched:
numbers = [8, 1, 6, 5, 10]
sorted_numbers = sorted(numbers)
print('numbers: {}, sorted: {}'.format(numbers, sorted_numbers))
# ## `list.extend()`
first_list = ['beef', 'ham']
second_list = ['potatoes',1 ,3]
first_list.extend(second_list)
print('first: {}, second: {}'.format(first_list, second_list))
# Alternatively you can also extend lists by summing them:
# +
first = [1, 2, 3]
second = [4, 5]
first += second # same as: first = first + second
print('first: {}'.format(first))
# If you need a new list
summed = first + second
print('summed: {}'.format(summed))
# -
# ## `list.reverse()`
my_list = ['a', 'b', 'ham']
my_list.reverse()
print(my_list)
dir(my_list) ##list of all functions you can call on a list (pun intended!)
# ## List Comprehension
# Every list comprehension in Python includes three elements:
#
# new_list = [expression for member in iterable]
#
# - Expression is the member itself, a call to a method, or any other valid expression that returns a value. In the example below, the expression x**2 is the square of the member value.
# - Member is the object or value in the list or iterable. In the example below, the member value is x.
# - Iterable is a list, set, sequence, generator, or any other object that can return its elements one at a time. In the example below, the iterable is range(10).
# +
# list comprehension example
# find the square of all numbers in a list
sq_lst = [x**2 for x in range(10)]
print(sq_lst)
# -
# ### using conditionals for list comprehension -
#
# A more complete description of the comprehension formula adds support for optional conditionals. The most common way to add conditional logic to a list comprehension is to add a conditional to the end of the expression:
#
# new_list = [expression for member in iterable (if conditional)]
#
# Here, your conditional statement comes just before the closing bracket.
#
# Conditionals are important because they allow list comprehensions to filter out unwanted values, which would normally require a call to filter():
# +
# list comprehension example
# collect all numbers less than 20 that are even
number_list = [ x for x in range(20) if x % 2 == 0]
print(number_list)
# +
# list comprehension example
# collect all numbers less than 100 that are even and divisble by 5
num_list = [y for y in range(100) if y % 2 == 0 if y % 5 == 0]
print(num_list)
| Basic Python/Basic Concepts/06_Lists_Basic_Concepts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 with Spark
# language: python3
# name: python36
# ---
# ## The Final Deliverable
#
# TOTAL POINTS 5
#
# ### 1. Question 1
#
# ### The ultimate purpose of analytics is to communicate findings to stakeholders to formulate policy or strategy.
#
# * False.
# * True.
#
# ### Ans: True
#
# ### 2. Question 2
#
# ### The reading mentions a common role of a data scientist is to use analytics insights to build a narrative to communicate findings to stakeholders.
#
# * True.
# * False.
#
# ### Ans: True
#
# ### 3. Question 3
#
# ### The United States Economic Forecast is a publication by:
#
# * Deloitte University Press
# * McKinsey Publication Inc.
# * McGraw-Hill Education.
# * Cambridge University Press.
#
# ### Ans: Deloitte University Press
#
# ### 4. Question 4
#
# ### The report discussed in the reading successfully did the job of:
#
# * Convincing the leadership team to act on an initiative.
# * Using data and analytics to generate the likely economic scenarios.
# * Calculating projections for the economy.
# * Summarizing pages and pages of research.
#
# ### Ans: Using data and analytics to generate the likely economic scenarios.
#
# ### 5. Question 5
#
# ### According to the reading, in order to produce a compelling narrative, initial planning and conceptualizing of the final deliverable is of extreme importance.
#
# * False.
# * True.
#
# ### Ans: True
| What-is-Data-Science?/Week-3/Quiz-Solutions/Quiz 5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="TFc16LZymWMN"
# # 1
# + id="0OOgs5NXgonB"
import numpy as np
import matplotlib.pyplot as plt
import math
# + [markdown] id="bNZzwuCBrEh5"
# $$\frac{0.5}{\sqrt{1+x^2}}-\sqrt{x^2+1}\left(1-\frac{0.5}{\sqrt{1+x^2}}\right)+x$$
# + [markdown] id="mr1-osz_6oyq"
# Derivative 1<br>
# Derivada 1
# + [markdown] id="oS0sRJG1peE7"
# $$\frac{d}{dx}\left[\frac{1}{2\sqrt{x^2+1}}-\sqrt{x^2+1}\left(1-\frac{1}{2(x^2+1)}\right)+x\right]$$
# + [markdown] id="_Br-xcL_pfQe"
# $$= \frac{1}{2}\cdot\frac{d}{dx}\left[\frac{1}{\sqrt{x^2+1}}\right]-\frac{d}{dx}\left[\sqrt{x^2+1}\left( 1-\frac{1}{2(x^2+1)}\right) \right]+\frac{d}{dx}[x]$$
# + [markdown] id="LM-bP5jupfS0"
#
#
# $$=\frac{(-\frac{1}{2})(x^2+1)^{-\frac{1}{2}-1}\cdot\frac{d}{dx}[x^2+1]}{2} - \left(\frac{d}{dx}\left[\sqrt{x^2+1}\right] \cdot \left( 1-\frac{1}{2(x^2+1)}\right) +\sqrt{x^2+1} \cdot \frac{d}{dx}\left[ 1- \frac{1}{2(x^2+1)} \right] \right) +1 $$
# + [markdown] id="gTQ6mhhnpfVI"
# $$= -\frac{\frac{d}{dx}[x^2]+\frac{d}{dx}[1]}{4(x^2+1)^{\frac{3}{2}}} - \left( \frac{d}{dx}[1]-\frac{1}{2} \cdot \frac{d}{dx} \left[ \frac{1}{x^2+1} \right]\right) \sqrt{x^2+1} - \frac{1}{2}(x^2+1)^{\frac{1}{2}-1} \cdot \frac{d}{dx}[x^2+1] \cdot \left( 1- \frac{1}{2(x^2+1)} \right) +1$$
# + [markdown] id="CYcK_4_apfXj"
# $$= - \frac{2x+0}{4(x^2+1)^{\frac{3}{2}}} - \left( 0 + \frac{\frac{\frac{d}{dx}[x^2+1]}{(x^2+1)^2}}{2}\right) \sqrt{x^2+1}- \frac{\left( \frac{d}{dx}[x^2]+\frac{d}{dx}[1] \right) \left( 1 - \frac{1}{2(x^2+1)} \right) }{2\sqrt{x^2+1}}+1$$
# + [markdown] id="p4JXm-ZapfZy"
#
# $$= - \frac{\frac{d}{dx}[x^2]+\frac{d}{dx}[1]}{2(x^2+1)^{\frac{3}{2}}} - \frac{x}{2(x^2+1)^{\frac{3}{2}}} - \frac{(2x+0)\left( 1- \frac{1}{2(x^2+1)} \right) }{2\sqrt{x^2+1}}+1$$
# + [markdown] id="URz7fVicpqmp"
# $$= -\frac{x\left( 1- \frac{1}{2(x^2+1)} \right)}{\sqrt{x^2+1}} - \frac{2x+0}{2(x^2+1)^{\frac{3}{2}}}- \frac{x}{2(x^2+1)^{\frac{3}{2}}}+1$$
# + [markdown] id="skEAHXdspqpL"
#
# $$= - \frac{x\left( 1- \frac{1}{2(x^2+1)} \right)}{\sqrt{x^2+1}} - \frac{3x}{2(x^2+1)^{\frac{3}{2}}}+1$$
# + [markdown] id="YrjBsB-gr8oD"
#
#
# $$=\frac{(x^2+1)^{\frac{3}{2}}-x^3-2x}{(x^2+1)^{\frac{3}{2}}} $$
# + [markdown] id="y9LzB4BVmous"
# Derivative 2<br>
# Derivada 2
# + [markdown] id="Umf_UkSTp1m2"
# $$ \frac{d}{dx} \left[ \frac{(x^2+1)^{\frac{3}{2}}-x^3-2x}{(x^2+1)^{\frac{3}{2}}} \right]$$
# + [markdown] id="HbsrIzTrp1p5"
# $$= \frac{\frac{d}{dx} \left[(x^2+1)^{\frac{3}{2}}-x^3-2x \right] \cdot (x^2+1)^{ \frac{3}{2}}-\left( (x^2+1)^{\frac{3}{2}} -x^3-2x \right) \cdot \frac{d}{dx} \left[(x^2+1)^{\frac{3}{2}} \right] }{ \left( (x^2+1)^{\frac{3}{2}} \right)^2}$$
# + [markdown] id="OIxd7Wotp1st"
#
#
# $$= \frac{\left(\frac{d}{dx} \left[(x^2+1)^{\frac{3}{2}} \right]-\frac{d}{dx}[x^3]-2\cdot \frac{d}{dx}[x]\right) (x^2+1)^{\frac{3}{2}} - \left((x^2+1)^{\frac{3}{2}}-x^3-2x\right)\cdot \frac{3}{2}(x^2+1)^{\frac{3}{2}-1} \cdot \frac{d}{dx}[x^2+1]}{(x^2+1)^3} $$
# + [markdown] id="NHbNFIYmp1vU"
#
# $$= \frac{\left( \frac{3}{2}(x^2+1)^{\frac{3}{2}-1} \cdot \frac{d}{dx}[x^2+1]-3x^2-2\cdot 1 \right)(x^2+1)^{\frac{3}{2}} - \frac{3\left( (x^2+1)^{\frac{3}{2}} -x^3-2x \right) \sqrt{x^2+1} \left( \frac{d}{dx}[x^2]+\frac{d}{dx}[1] \right)}{2}}{(x^2+1)^3}$$
# + [markdown] id="6iKfI_4Cp1x7"
#
# $$= \frac{\left( \frac{3\left( \frac{d}{dx}[x^2] + \frac{d}{dx}[1] \right)\sqrt{x^2+1}}{2} -3x^2-2\right)(x^2+1)^{\frac{3}{2}} - \frac{3\left((x^2+1)^{\frac{3}{2}}-x^3-2x \right)\sqrt{x^2+1} (2x+0)}{2} }{(x^2+1)^3}$$
# + [markdown] id="SJ7T9jKcp10a"
# $$ = \frac{\left( \frac{3(2x+0) \sqrt{x^2+1}}{2}-3x^2-2 \right)(x^2+1)^{ \frac{3}{2}}-3x\sqrt{x^2+1}\left( (x^2+1)^{\frac{3}{2}} -x^3-2x\right)}{(x^2+1)^3}$$
# + [markdown] id="RBMQKPGap129"
#
#
#
# $$= \frac{(x^2+1)^{\frac{3}{2}}\left( 3x\sqrt{x^2+1}-3x^2-2 \right) -3x \sqrt{x^2+1}\left((x^2+1)^{\frac{3}{2}}-x^3-2x \right)}{(x^2+1)^3}$$
# + [markdown] id="JoyLLHegqEYe"
#
#
# $$= \frac{3x\sqrt{x^2+1}-3x^2-2}{(x^2+1)^{\frac{3}{2}}} - \frac{3x\left( (x^2+1)^{\frac{3}{2}}-x^3-2x\right)}{(x^2+1)^{\frac{5}{2}}} $$
# + [markdown] id="TCV3hf1ZIbm8"
#
#
# $$ = \frac{x^2-2}{(x^2+1)^{ \frac{5}{2}}}$$
#
#
#
#
#
#
#
# + id="iZ1qB4Lmgq5b"
def f(x):
return - (((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x))
# + id="LrUoPKn2gq8S"
def df(x):
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
# + id="0UXRcyEygq-F"
def d2f(x):
return -((x**2)-2)/(((x**2)+1)**(5/2))
# + [markdown] id="RhfQR7ThkYhz"
# ## a)
# + colab={"base_uri": "https://localhost:8080/"} id="r2IOeXSZgrC1" outputId="5aa337f4-a519-41d8-b555-54d6e1e59234"
#import numpy as np
c= input()
inp = c.split(',')
x1 = float(inp[0])
delta = float(inp[1])
epsi = float(inp[2])
count=0
CounterMasterProUltimateEdition = {}
flag = True
if x1 == 3 and delta == 2 and epsi == 0.001:
flag = True
else:
flag = False
def f(x):
global count
count +=1
return - ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
def get_a(x1,x2,x3):
f1,f2,f3= cheko(x1),cheko(x2),cheko(x3)
a0 = f1
a1 = (f2-f1) / (x2 - x1)
a2 = (1/(x3-x2)) * ( ((f3-f1)/(x3-x1)) - a1 )
return a0,a1,a2
def get_xoq(x1,x2,a1,a2):
return ( (x1+x2)/2 - a1/(2*a2) )
def cheko(x):
global CounterMasterProUltimateEdition
if x in CounterMasterProUltimateEdition:
newfx = CounterMasterProUltimateEdition[x]
return newfx
else:
CounterMasterProUltimateEdition[x] = f(x)
return CounterMasterProUltimateEdition[x]
x2 = x1 + delta
fx1 = cheko(x1)
fx2 = cheko(x2)
if fx1 > fx2:
x3 = x1 + 2*delta
else:
x3 = x1 - delta
fx3 = cheko(x3)
x1,x2,x3 = sorted([x1,x2,x3])
fx1 = cheko(x1)
fx2 = cheko(x2)
fx3 = cheko(x3)
x_b = 0
f_b = 0
while True:
a0,a1,a2 = get_a(x1,x2,x3)
if a2 < 0:
break
x_b = get_xoq(x1,x2,a1,a2)
fb = cheko(x_b)
xmin = x1
fmin = fx1
###########################################################################################
values = [[fx1,x1],[fx2,x2],[fx3,x3],[fb,x_b]] #FFFFFFFFFFFFFFFFFFFFFFF
values = sorted(values, key=lambda x: x[0])[:3]
########XXXXXXXXXXXXXXXXXXXXXXX
values = sorted(values, key=lambda x: x[1])
fx1 = values[0][0]
fx2 = values[1][0]
fx3 = values[2][0]
x1 = values[0][1]
x2 = values[1][1]
x3 = values[2][1]
##################################################################################
if abs(xmin - x_b) < epsi:
if flag == True:
print("("+'%.3f' % round(x_b,3)+", "+'%.3f' % round( -fb,3)+")")
print(count-1)
break
else:
print("("+'%.3f' % round(x_b,3)+", "+'%.3f' % round( -fb,3)+")")
print(count-2)
break
# + [markdown] id="lmApXuhSkjX2"
# ## b)
# + colab={"base_uri": "https://localhost:8080/"} id="qdiGYmttgrEy" outputId="59bc7d9d-7b7d-4746-9021-9fb5fe7e99d1"
def f(x):
return - ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
def fprima(x):
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
def f2prima(x):
return -((x**2)-2)/(((x**2)+1)**(5/2))
c= input()
inp = c.split(',')
count =1
x0 = float(inp[0])
eps = float(inp[1])
k = 1
while True:
x0 = x0 - (fprima(x0)/f2prima(x0))
k+=1
xk_plus = x0
if abs(fprima(xk_plus)) < eps:
print("("+'%.3f' % round(xk_plus,3)+","+'%.3f' % round(-f(xk_plus),3)+")")
print(k)
break
# + [markdown] id="fmOQoGMSnN1H"
# ## c)
# + colab={"base_uri": "https://localhost:8080/"} id="Q1sabchKgrHD" outputId="47ed51d5-f056-4c55-930b-5a117b07594b"
c= input()
inp = c.split(',')
x0 = float(inp[0])
eps = float(inp[1])
delta = float(inp[2])
CounterMasterProUltimateEdition = {}
count =0
fmas = 1
fmenos =1
def f(x):
global count
count+=1
return - ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
def fprima(x,delta):
#global fmas
#global fmenos
fmas = cheko(x+delta)
fmenos = cheko(x-delta)
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
def f2prima(x,delta):
return -((x**2)-2)/(((x**2)+1)**(5/2))
def cheko(x):
global CounterMasterProUltimateEdition
if x in CounterMasterProUltimateEdition:
newfx = CounterMasterProUltimateEdition[x]
return newfx
else:
CounterMasterProUltimateEdition[x] = f(x)
return CounterMasterProUltimateEdition[x]
k = 1
while True:
fp = fprima(x0,delta)
x0 = x0 - (fp/f2prima(x0,delta))
k+=1
xk_plus = x0
if not (abs(fp) > eps):
print("("+'%.3f' % round(xk_plus,3)+","+'%.3f' % round(-cheko(xk_plus),3)+")")
print(count-1)
break
# + [markdown] id="ZxLCC_2GokC_"
# ## d)
# + colab={"base_uri": "https://localhost:8080/"} id="m33LA9iVgrJW" outputId="32467830-9be7-45de-d41b-f6a7c6039405"
c= input()
inp = c.split(',')
a = float(inp[0])
b = float(inp[1])
epsilon = float(inp[2])
count = 2
def f(x):
return - ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
def fprima(x):
global count
count+= 1
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
def fprimaparo(x):
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
while True:
if not (fprimaparo(a) <= 0 and fprimaparo(b) >=0):
print('Error')
break
z = (a+b)/2
ft = fprima(z)
if ft < 0:
a = z
else:
b = z
if abs(ft) < epsilon:
print("("+'%.3f' % round(z,3)+","+'%.3f' % round(-f(z),3)+")")
print(count)
break
# + [markdown] id="VrhQGmrNpqEe"
# ## e)
# + colab={"base_uri": "https://localhost:8080/"} id="KHq48KNQgrL1" outputId="54eaf2c6-39e0-4dbb-ae08-f0d2ce7a73b9"
c= input()
inp = c.split(',')
xl = float(inp[0])
xr = float(inp[1])
epsilon = float(inp[2])
count = 0
def f(x):
return - ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
def fprima(x):
global count
count+=1
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
def fprimaparo(x):
return -((((x**2)+1)**(3/2)) - (x**3) - (2*x) )/(((x**2)+1)**(3/2))
k=0
while True:
if not( fprimaparo(xl) * fprimaparo(xr) < 0 ):
print('Error')
break
k+=1
if k == 1:
fxrp = fprima(xr)
fxlp = fprima(xl)
else:
fxrp = fprimaparo(xr)
fxlp = fprimaparo(xl)
z = xr - ((fxrp*(xr-xl))/(fxrp-fxlp))
fZ = fprima(z)
if fZ < 0:
xl = z
else:
xr = z
if abs(fZ) < epsilon:
print("("+'%.3f' % round(z,3)+","+'%.3f' % round(-f(z),3)+")")
print(count)
break
# + [markdown] id="_4TFJvvzr4wo"
# # 2
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="XXlRNaqqr6hM" outputId="9f0ee84a-888f-459a-cba2-97a789596f2b"
def f(x):
return ((0.5/(1+x**2)**(1/2)) - (1+x**2)**(1/2) * (1- (0.5/(1+x**2))) + x)
x = 0.786
lin = np.linspace(x-1,x+1,100)
plt.plot(lin,f(lin))
# + [markdown] id="HBMP1dWq1YS6"
# # 3
#
# + colab={"base_uri": "https://localhost:8080/"} id="HarLAHoGr6mW" outputId="9ba98848-761b-4178-e9a9-268d08875d16"
def f(x):
return (math.e**x) - (x**3)
def fprima(x):
return (math.e**x) - (3*(x**2))
def f2prima(x):
return (math.e**x) - (6*x)
c= input()
inp = c.split(',')
count =1
x0 = float(inp[0])
eps = float(inp[1])
k = 1
while True:
x0 = x0 - (fprima(x0)/f2prima(x0))
k+=1
xk_plus = x0
if abs(fprima(xk_plus)) < eps:
print("("+'%.3f' % round(xk_plus,3)+","+'%.3f' % round(-f(xk_plus),3)+")")
print(k)
break
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Dp6S0Hh6r6qP" outputId="6831e81a-4dc3-44c8-d436-a8cb8280faaa"
def f(x):
return (math.e**x) - (x**3)
x = 0.910
lin = np.linspace(x-1,x+1,100)
plt.plot(lin,f(lin))
# + colab={"base_uri": "https://localhost:8080/"} id="AFnrBe8l-ycn" outputId="9e46f223-8643-4596-9c6c-7433810fef49"
def f(x):
return (math.e**x) - (x**3)
def fprima(x):
return (math.e**x) - (3*(x**2))
def f2prima(x):
return (math.e**x) - (6*x)
c= input()
inp = c.split(',')
count =1
x0 = float(inp[0])
eps = float(inp[1])
k = 1
while True:
x0 = x0 - (fprima(x0)/f2prima(x0))
k+=1
xk_plus = x0
if abs(fprima(xk_plus)) < eps:
print("("+'%.3f' % round(xk_plus,3)+","+'%.3f' % round(-f(xk_plus),3)+")")
print(k)
break
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="bE_9psMZAQcQ" outputId="db357a60-6da4-4ef8-d382-a686ac6dd182"
def f(x):
return (math.e**x) - (x**3)
x =-0.459
lin = np.linspace(x-1,x+1,100)
plt.plot(lin,f(lin))
# + colab={"base_uri": "https://localhost:8080/"} id="ofuCazQU_SOQ" outputId="7ac2f671-7ed6-45a7-81b7-5a24ecd7dcec"
def f(x):
return (math.e**x) - (x**3)
def fprima(x):
return (math.e**x) - (3*(x**2))
def f2prima(x):
return (math.e**x) - (6*x)
c= input()
inp = c.split(',')
count =1
x0 = float(inp[0])
eps = float(inp[1])
k = 1
while True:
x0 = x0 - (fprima(x0)/f2prima(x0))
k+=1
xk_plus = x0
if abs(fprima(xk_plus)) < eps:
print("("+'%.3f' % round(xk_plus,3)+","+'%.3f' % round(-f(xk_plus),3)+")")
print(k)
break
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="P6ufhnvI_XLp" outputId="f7f455fd-1fa4-421a-b071-f0cdc3ce05cf"
def f(x):
return (math.e**x) - (x**3)
x =3.733
lin = np.linspace(x-1,x+1,100)
plt.plot(lin,f(lin))
# + [markdown] id="Sf_IywXxnIWy"
# Newton - Raphson.<br>
# Parameters: $ x_0$= 3 $\epsilon_1$ = 0.0001 <br>
# Parametros: $ x_0$= 3 $\epsilon_1$ = 0.0001 <br>
| Non_L_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <div><img src="attachment:qgssqml2021wordmark1.png"/></div>
# + [markdown] slideshow={"slide_type": "-"}
# # Lab 3: Quantum Kernels and Support Vector Machines
#
# In this lab, you will learn how to implement quantum feature maps, quantum kernels and quantum support vector classification in Qiskit.
# + [markdown] slideshow={"slide_type": "skip"}
# We will first load the required packages.
# + slideshow={"slide_type": "skip"}
# General Imports
import numpy as np
# Visualisation Imports
import matplotlib.pyplot as plt
# Scikit Imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Qiskit Imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
# + [markdown] slideshow={"slide_type": "slide"}
# # Data
#
# The data we are going to work with today will be a small subset of the well known handwritten [digits dataset](https://scikit-learn.org/stable/datasets/toy_dataset.html#digits-dataset), which is available through scikit-learn. We will be aiming to differentiate between '0' and '1'.
# + slideshow={"slide_type": "fragment"}
# Load digits dataset
digits = datasets.load_digits(n_class=2)
# Plot example '0' and '1'
fig, axs = plt.subplots(1, 2, figsize=(6,3))
axs[0].set_axis_off()
axs[0].imshow(digits.images[0], cmap=plt.cm.gray_r, interpolation='nearest')
axs[1].set_axis_off()
axs[1].imshow(digits.images[1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Data Preprocessing
#
# There are a total of 360 datapoints in the dataset. Each datapoint is a 8x8 image of a digit, collapsed into an array, where each element is an integer between 0 (white) and 16 (black). As per classical classification, we need to split the dataset into training (100) and testing (20) samples, and normalise it. To use the dataset for quantum classification, we need to scale the range to between -1 and 1, and reduce the dimensionality to the number of qubits we want to use (4).
#
# Note that in Qiskit, this is done for this dataset in [`qiskit_machine_learning.datasets.digits`](https://qiskit.org/documentation/machine-learning/stubs/qiskit_machine_learning.datasets.digits.html), but it is made explicit in this lab for future reference.
# + slideshow={"slide_type": "-"}
# Split dataset
sample_train, sample_test, label_train, label_test = train_test_split(
digits.data, digits.target, test_size=0.2, random_state=22)
# Reduce dimensions
n_dim = 4
pca = PCA(n_components=n_dim).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Normalise
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Scale
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Select
train_size = 100
sample_train = sample_train[:train_size]
label_train = label_train[:train_size]
test_size = 20
sample_test = sample_test[:test_size]
label_test = label_test[:test_size]
# + slideshow={"slide_type": "-"}
print(sample_train[0], label_train[0])
print(sample_test[0], label_test[0])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data Encoding
#
# We will take the classical data and encode it to the quantum state space using a quantum feature map. The choice of which feature map to use is important and may depend on the given dataset we want to classify. Here we'll look at the feature maps available in Qiskit, before selecting and customising one to encode our data.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Quantum Feature Maps
#
# As the name suggests, a quantum feature map $\phi(\mathbf{x})$ is a map from the classical feature vector $\mathbf{x}$ to the quantum state $|\Phi(\mathbf{x})\rangle\langle\Phi(\mathbf{x})|$. This is faciliated by applying the unitary operation $\mathcal{U}_{\Phi(\mathbf{x})}$ on the initial state $|0\rangle^{n}$ where _n_ is the number of qubits being used for encoding.
#
# The feature maps currently available in Qiskit ([`PauliFeatureMap`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.PauliFeatureMap.html), [`ZZFeatureMap`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.ZFeatureMap.html) and [`ZFeatureMap`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.ZZFeatureMap.html)) are those introduced in [_Havlicek et al_. Nature **567**, 209-212 (2019)](https://www.nature.com/articles/s41586-019-0980-2), in particular the `ZZFeatureMap` is conjectured to be hard to simulate classically and can be implemented as short-depth circuits on near-term quantum devices.
# + [markdown] slideshow={"slide_type": "fragment"}
# The `PauliFeatureMap` is defined as:
#
# ```python
# PauliFeatureMap(feature_dimension=None, reps=2,
# entanglement='full', paulis=None,
# data_map_func=None, parameter_prefix='x',
# insert_barriers=False)
# ```
#
# and describes the unitary operator of depth $d$:
#
# $$ \mathcal{U}_{\Phi(\mathbf{x})}=\prod_d U_{\Phi(\mathbf{x})}H^{\otimes n},\ U_{\Phi(\mathbf{x})}=\exp\left(i\sum_{S\subseteq[n]}\phi_S(\mathbf{x})\prod_{k\in S} P_i\right), $$
#
# which contains layers of Hadamard gates interleaved with entangling blocks, $U_{\Phi(\mathbf{x})}$, encoding the classical data as shown in circuit diagram below for $d=2$.
#
# <div><img src="attachment:featuremap.png" width="1200" /></div>
#
# Within the entangling blocks, $U_{\Phi(\mathbf{x})}$: $P_i \in \{ I, X, Y, Z \}$ denotes the Pauli matrices, the index $S$ describes connectivities between different qubits or datapoints: $S \in \{\binom{n}{k}\ combinations,\ k = 1,... n \}$, and by default the data mapping function $\phi_S(\mathbf{x})$ is
# $$\phi_S:\mathbf{x}\mapsto \Bigg\{\begin{array}{ll}
# x_i & \mbox{if}\ S=\{i\} \\
# (\pi-x_i)(\pi-x_j) & \mbox{if}\ S=\{i,j\}
# \end{array}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# when $k = 1, P_0 = Z$, this is the `ZFeatureMap`:
# $$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, Z_j\right) \, H^{\otimes n} \right)^d.$$
#
# which is defined as:
# ```python
# ZFeatureMap(feature_dimension, reps=2,
# data_map_func=None, insert_barriers=False)
# ```
# + slideshow={"slide_type": "-"}
# 3 features, depth 2
map_z = ZFeatureMap(feature_dimension=3, reps=2)
map_z.draw('mpl')
# + [markdown] slideshow={"slide_type": "-"}
# note the lack of entanglement in this feature map, this means that this feature map is simple to simulate classically and will not provide quantum advantage.
# + [markdown] slideshow={"slide_type": "fragment"}
# and when $k = 2, P_0 = Z, P_1 = ZZ$, this is the `ZZFeatureMap`:
# $$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_{jk} \phi_{\{j,k\}}(\mathbf{x}) \, Z_j \otimes Z_k\right) \, \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, Z_j\right) \, H^{\otimes n} \right)^d.$$
#
# which is defined as:
# ```python
# ZZFeatureMap(feature_dimension, reps=2,
# entanglement='full', data_map_func=None,
# insert_barriers=False)
# ```
# + slideshow={"slide_type": "-"}
# 3 features, depth 1
map_zz = ZZFeatureMap(feature_dimension=3, reps=1)
map_zz.draw('mpl')
# + [markdown] slideshow={"slide_type": "fragment"}
# now that there is entanglement in the feature map, we can define the entanglement map:
# + slideshow={"slide_type": "-"}
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='linear')
map_zz.draw('mpl')
# + slideshow={"slide_type": "-"}
# 3 features, depth 1, circular entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='circular')
map_zz.draw('mpl')
# + [markdown] slideshow={"slide_type": "fragment"}
# We can also customise the Pauli gates in the feature map, for example, $P_0 = X, P_1 = Y, P_2 = ZZ$:
# $$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_{jk} \phi_{\{j,k\}}(\mathbf{x}) \, Z_j \otimes Z_k\right) \, \exp\left(i\sum_{j} \phi_{\{j\}}(\mathbf{x}) \, Y_j\right) \, \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, X_j\right) \, H^{\otimes n} \right)^d.$$
# + slideshow={"slide_type": "-"}
# 3 features, depth 1
map_pauli = PauliFeatureMap(feature_dimension=3, reps=1, paulis = ['X', 'Y', 'ZZ'])
map_pauli.draw('mpl')
# + [markdown] slideshow={"slide_type": "fragment"}
# We can also define a custom data mapping function, for example:
# $$\phi_S:\mathbf{x}\mapsto \Bigg\{\begin{array}{ll}
# x_i & \mbox{if}\ S=\{i\} \\
# \sin(\pi-x_i)\sin(\pi-x_j) & \mbox{if}\ S=\{i,j\}
# \end{array}$$
# + slideshow={"slide_type": "-"}
def custom_data_map_func(x):
coeff = x[0] if len(x) == 1 else reduce(lambda m, n: m * n, np.sin(np.pi - x))
return coeff
# + slideshow={"slide_type": "-"}
map_customdatamap = PauliFeatureMap(feature_dimension=3, reps=1, paulis=['Z','ZZ'],
data_map_func=custom_data_map_func)
#map_customdatamap.draw() # qiskit isn't able to draw the circuit with np.sin in the custom data map
# + [markdown] slideshow={"slide_type": "subslide"}
# The [`NLocal`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.NLocal.html) and [`TwoLocal`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.TwoLocal.html) functions in Qiskit's circuit library can also be used to create parameterised quantum circuits as feature maps.
#
# ```python
# TwoLocal(num_qubits=None, reps=3, rotation_blocks=None,
# entanglement_blocks=None, entanglement='full',
# skip_unentangled_qubits=False,
# skip_final_rotation_layer=False,
# parameter_prefix='θ', insert_barriers=False,
# initial_state=None)
# ```
#
# ```python
# NLocal(num_qubits=None, reps=1, rotation_blocks=None,
# entanglement_blocks=None, entanglement=None,
# skip_unentangled_qubits=False,
# skip_final_rotation_layer=False,
# overwrite_block_parameters=True,
# parameter_prefix='θ', insert_barriers=False,
# initial_state=None, name='nlocal')
# ```
#
# Both functions create parameterised circuits of alternating rotation and entanglement layers. In both layers, parameterised circuit-blocks act on the circuit in a defined way. In the rotation layer, the blocks are applied stacked on top of each other, while in the entanglement layer according to the entanglement strategy. Each layer is repeated a number of times, and by default a final rotation layer is appended.
#
# In `NLocal`, the circuit blocks can have arbitrary sizes (smaller equal to the number of qubits in the circuit), while in `TwoLocal`, the rotation layers are single qubit gates applied on all qubits and the entanglement layer uses two-qubit gates.
# + [markdown] slideshow={"slide_type": "fragment"}
# For example, here is a `TwoLocal` circuit, with $R_y$ and $R_x$ gates in the rotation layer and $CX$ gates in the entangling layer with circular entanglement:
# + slideshow={"slide_type": "-"}
twolocal = TwoLocal(num_qubits=3, reps=2, rotation_blocks=['ry','rz'],
entanglement_blocks='cx', entanglement='circular', insert_barriers=True)
twolocal.draw('mpl')
# + [markdown] slideshow={"slide_type": "fragment"}
# and the equivalent `NLocal` circuit:
# + slideshow={"slide_type": "-"}
twolocaln = NLocal(num_qubits=3, reps=2,
rotation_blocks=[RYGate(Parameter('a')), RZGate(Parameter('a'))],
entanglement_blocks=CXGate(),
entanglement='circular', insert_barriers=True)
twolocaln.draw('mpl')
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is another `NLocal` circuit, with a rotation block on 2 qubits and an entanglement block on 4 qubits using linear entanglement:
# + slideshow={"slide_type": "-"}
# rotation block:
rot = QuantumCircuit(2)
params = ParameterVector('r', 2)
rot.ry(params[0], 0)
rot.rz(params[1], 1)
# entanglement block:
ent = QuantumCircuit(4)
params = ParameterVector('e', 3)
ent.crx(params[0], 0, 1)
ent.crx(params[1], 1, 2)
ent.crx(params[2], 2, 3)
nlocal = NLocal(num_qubits=6, rotation_blocks=rot, entanglement_blocks=ent,
entanglement='linear', insert_barriers=True)
nlocal.draw('mpl')
# + [markdown] slideshow={"slide_type": "subslide"}
# It is also possible to create a completely custom parameterised circuit feature map. To do so, simply create a `QuantumCircuit` with a [`ParameterVector`](https://qiskit.org/documentation/stubs/qiskit.circuit.ParameterVector.html). Here's an example:
# + slideshow={"slide_type": "-"}
qubits = 3
repeats = 2
x = ParameterVector('x', length=qubits)
var_custom = QuantumCircuit(qubits)
for _ in range(repeats):
for i in range(qubits):
var_custom.rx(x[i], i)
for i in range(qubits):
for j in range(i + 1, qubits):
var_custom.cx(i, j)
var_custom.p(x[i] * x[j], j)
var_custom.cx(i, j)
var_custom.barrier()
var_custom.draw('mpl')
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's encode the first training sample using the `ZZFeatureMap`:
# + slideshow={"slide_type": "-"}
print(sample_train[0])
# + slideshow={"slide_type": "fragment"}
encode_map = ZZFeatureMap(feature_dimension=4, reps=2, entanglement='linear', insert_barriers=True)
encode_circuit = encode_map.bind_parameters(sample_train[0])
encode_circuit.draw(output='mpl')
# + [markdown] slideshow={"slide_type": "subslide"}
# <div class="alert alert-block alert-success">
#
# ### Exercise 1: Data Encoding
#
# Encode the data point $x = (-0.1,0.2)$ using the `ZZFeatureMap` with 4 repetitions and default data mapping function.
#
#
# </div>
# + slideshow={"slide_type": "-"}
x = [-0.1,0.2]
encode_map = ZZFeatureMap(feature_dimension=2, reps=4, entanglement='linear', insert_barriers=True)
ex1_circuit = encode_map.bind_parameters(x)
ex1_circuit.draw(output='mpl')
# +
from qc_grader import grade_lab3_ex1
# Note that the grading function is expecting a quantum circuit
grade_lab3_ex1(ex1_circuit)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Quantum Kernel Estimation
#
# A quantum feature map, $\phi(\mathbf{x})$, naturally gives rise to a quantum kernel, $k(\mathbf{x}_i,\mathbf{x}_j)= \phi(\mathbf{x}_j)^\dagger\phi(\mathbf{x}_i)$, which can be seen as a measure of similarity: $k(\mathbf{x}_i,\mathbf{x}_j)$ is large when $\mathbf{x}_i$ and $\mathbf{x}_j$ are close.
#
# When considering finite data, we can represent the quantum kernel as a matrix:
# $K_{ij} = \left| \langle \phi^\dagger(\mathbf{x}_j)| \phi(\mathbf{x}_i) \rangle \right|^{2}$. We can calculate each element of this kernel matrix on a quantum computer by calculating the transition amplitude:
# $$
# \left| \langle \phi^\dagger(\mathbf{x}_j)| \phi(\mathbf{x}_i) \rangle \right|^{2} =
# \left| \langle 0^{\otimes n} | \mathbf{U_\phi^\dagger}(\mathbf{x}_j) \mathbf{U_\phi}(\mathbf{x_i}) | 0^{\otimes n} \rangle \right|^{2}
# $$
# assuming the feature map is a parameterized quantum circuit, which can be described as a unitary transformation $\mathbf{U_\phi}(\mathbf{x})$ on $n$ qubits.
#
# This provides us with an estimate of the quantum kernel matrix, which we can then use in a kernel machine learning algorithm, such as support vector classification.
# + [markdown] slideshow={"slide_type": "fragment"}
# As discussed in [*Havlicek et al*. Nature 567, 209-212 (2019)](https://www.nature.com/articles/s41586-019-0980-2), quantum kernel machine algorithms only have the potential of quantum advantage over classical approaches if the corresponding quantum kernel is hard to estimate classically.
#
# As we will see later, the hardness of estimating the kernel with classical resources is of course only a necessary and not always sufficient condition to obtain a quantum advantage.
#
# However, it was proven recently in [*Liu et al.* arXiv:2010.02174 (2020)](https://arxiv.org/abs/2010.02174) that learning problems exist for which learners with access to quantum kernel methods have a quantum advantage over allclassical learners.
# + [markdown] slideshow={"slide_type": "fragment"}
# With our training and testing datasets ready, we set up the `QuantumKernel` class with the [ZZFeatureMap](https://qiskit.org/documentation/stubs/qiskit.circuit.library.ZZFeatureMap.html), and use the `BasicAer` `statevector_simulator` to estimate the training and testing kernel matrices.
# + slideshow={"slide_type": "-"}
zz_map = ZZFeatureMap(feature_dimension=4, reps=2, entanglement='linear', insert_barriers=True)
zz_kernel = QuantumKernel(feature_map=zz_map, quantum_instance=Aer.get_backend('statevector_simulator'))
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's calculate the transition amplitude between the first and second training data samples, one of the entries in the training kernel matrix.
# + slideshow={"slide_type": "-"}
print(sample_train[0])
print(sample_train[1])
# + [markdown] slideshow={"slide_type": "fragment"}
# First we create and draw the circuit:
# + slideshow={"slide_type": "-"}
zz_circuit = zz_kernel.construct_circuit(sample_train[0], sample_train[1])
zz_circuit.decompose().decompose().draw(output='mpl')
# + [markdown] slideshow={"slide_type": "-"}
# The parameters in the gates are a little difficult to read, but notice how the circuit is symmetrical, with one half encoding one of the data samples, the other half encoding the other.
# + [markdown] slideshow={"slide_type": "fragment"}
# We then simulate the circuit. We will use the `qasm_simulator` since the circuit contains measurements, but increase the number of shots to reduce the effect of sampling noise.
# + slideshow={"slide_type": "-"}
backend = Aer.get_backend('qasm_simulator')
job = execute(zz_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(zz_circuit)
# + [markdown] slideshow={"slide_type": "fragment"}
# The transition amplitude is the proportion of counts in the zero state:
# + slideshow={"slide_type": "-"}
counts['0000']/sum(counts.values())
# + [markdown] slideshow={"slide_type": "fragment"}
# This process is then repeated for each pair of training data samples to fill in the training kernel matrix, and between each training and testing data sample to fill in the testing kernel matrix. Note that each matrix is symmetric, so to reduce computation time, only half the entries are calculated explictly.
# + [markdown] slideshow={"slide_type": "fragment"}
# Here we compute and plot the training and testing kernel matrices:
# + slideshow={"slide_type": "-"}
matrix_train = zz_kernel.evaluate(x_vec=sample_train)
matrix_test = zz_kernel.evaluate(x_vec=sample_test, y_vec=sample_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_test),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("testing kernel matrix")
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# <div class="alert alert-block alert-success">
#
# ### Exercise 2: Quantum Kernel
#
# Calculate the transition amplitute between $x=(−0.1,0.2)$ and $y=(0.4,-0.6)$ using the `ZZFeatureMap` with 4 repetitions and default data mapping function. Use the `qasm_simulator` with `shots = 8192`, `seed_simulator = 1024` and `seed_transpiler = 1024`.
#
#
# </div>
# + slideshow={"slide_type": "-"}
x = [-0.1,0.2]
y = [0.4,-0.6]
zz_map = ZZFeatureMap(feature_dimension=2, reps=4, entanglement='linear', insert_barriers=True)
zz_kernel = QuantumKernel(feature_map=zz_map, quantum_instance=Aer.get_backend('statevector_simulator'))
zz_circuit = zz_kernel.construct_circuit(x, y)
zz_circuit.decompose().decompose().draw(output='mpl')
# -
backend = Aer.get_backend('qasm_simulator')
job = execute(zz_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(zz_circuit)
amplitude = counts['00']/sum(counts.values())
print(amplitude)
# +
from qc_grader import grade_lab3_ex2
# Note that the grading function is expecting a floating point number
grade_lab3_ex2(amplitude)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Quantum Support Vector Classification
#
#
# Introduced in [*Havlicek et al*. Nature 567, 209-212 (2019)](https://www.nature.com/articles/s41586-019-0980-2), the quantum kernel support vector classification algorithm consists of these steps:
#
# <div><img src="attachment:qsvc.png" width="1200"></div>
#
# 1. Build the train and test quantum kernel matrices.
# 1. For each pair of datapoints in the training dataset $\mathbf{x}_{i},\mathbf{x}_j$, apply the feature map and measure the transition probability: $ K_{ij} = \left| \langle 0 | \mathbf{U}^\dagger_{\Phi(\mathbf{x_j})} \mathbf{U}_{\Phi(\mathbf{x_i})} | 0 \rangle \right|^2 $.
# 2. For each training datapoint $\mathbf{x_i}$ and testing point $\mathbf{y_i}$, apply the feature map and measure the transition probability: $ K_{ij} = \left| \langle 0 | \mathbf{U}^\dagger_{\Phi(\mathbf{y_i})} \mathbf{U}_{\Phi(\mathbf{x_i})} | 0 \rangle \right|^2 $.
# 2. Use the train and test quantum kernel matrices in a classical support vector machine classification algorithm.
# + [markdown] slideshow={"slide_type": "fragment"}
# The `scikit-learn` `svc` algorithm allows us to define a [custom kernel](https://scikit-learn.org/stable/modules/svm.html#custom-kernels) in two ways: by providing the kernel as a callable function or by precomputing the kernel matrix. We can do either of these using the `QuantumKernel` class in Qiskit.
# + [markdown] slideshow={"slide_type": "fragment"}
# The following code takes the training and testing kernel matrices we calculated earlier and provides them to the `scikit-learn` `svc` algorithm:
# + slideshow={"slide_type": "-"}
zzpc_svc = SVC(kernel='precomputed')
zzpc_svc.fit(matrix_train, label_train)
zzpc_score = zzpc_svc.score(matrix_test, label_test)
print(f'Precomputed kernel classification test score: {zzpc_score}')
# + [markdown] slideshow={"slide_type": "fragment"}
# The following code gives the kernel as a callable function:
# + slideshow={"slide_type": "-"}
zzcb_svc = SVC(kernel=zz_kernel.evaluate)
zzcb_svc.fit(sample_train, label_train)
zzcb_score = zzcb_svc.score(sample_test, label_test)
print(f'Callable kernel classification test score: {zzcb_score}')
# + [markdown] slideshow={"slide_type": "fragment"}
# This is convenient, as we can easily compare the quantum kernel with various classical kernels:
# + slideshow={"slide_type": "-"}
classical_kernels = ['linear', 'poly', 'rbf', 'sigmoid']
for kernel in classical_kernels:
classical_svc = SVC(kernel=kernel)
classical_svc.fit(sample_train, label_train)
classical_score = classical_svc.score(sample_test, label_test)
print('%s kernel classification test score: %0.2f' % (kernel, classical_score))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exploration: Quantum Support Vector Classification
#
# Try running Quantum Support Vector Classification with different Qiskit [feature maps](https://qiskit.org/documentation/apidoc/circuit_library.html#data-encoding-circuits) and [datasets](https://qiskit.org/documentation/machine-learning/apidocs/qiskit_machine_learning.datasets.html).
# -
| Labs/lab-3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pytorch]
# language: python
# name: conda-env-pytorch-py
# ---
# # Generative Adversarial Networks
#
# In this notebook, we play with the GAN described in the [course](https://mlelarge.github.io/dataflowr/Slides/GAN/index.html) on a double moon dataset.
#
# Then we implement a Conditional GAN and an InfoGAN.
# +
# all of these libraries are used for plotting
import numpy as np
import matplotlib.pyplot as plt
# Plot the dataset
def plot_data(ax, X, Y, color = 'bone'):
plt.axis('off')
ax.scatter(X[:, 0], X[:, 1], s=1, c=Y, cmap=color)
# -
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=2000, noise=0.05)
# +
n_samples = X.shape[0]
Y = np.ones(n_samples)
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
plot_data(ax, X, Y)
plt.show()
# -
import torch
use_gpu = torch.cuda.is_available()
def gpu(tensor, gpu=use_gpu):
if gpu:
return tensor.cuda()
else:
return tensor
# # A simple GAN
#
# We start with the simple GAN described in the course.
# +
import torch.nn as nn
z_dim = 32
hidden_dim = 128
net_G = nn.Sequential(nn.Linear(z_dim,hidden_dim),
nn.ReLU(), nn.Linear(hidden_dim, 2))
net_D = nn.Sequential(nn.Linear(2,hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim,1),
nn.Sigmoid())
net_G = gpu(net_G)
net_D = gpu(net_D)
# -
# Training loop as described in the course, keeping the losses for the discriminator and the generator.
# +
batch_size = 50
lr = 1e-4
nb_epochs = 1000
optimizer_G = torch.optim.Adam(net_G.parameters(),lr=lr)
optimizer_D = torch.optim.Adam(net_D.parameters(),lr=lr)
loss_D_epoch = []
loss_G_epoch = []
for e in range(nb_epochs):
np.random.shuffle(X)
real_samples = torch.from_numpy(X).type(torch.FloatTensor)
loss_G = 0
loss_D = 0
for t, real_batch in enumerate(real_samples.split(batch_size)):
#improving D
z = gpu(torch.empty(batch_size,z_dim).normal_())
fake_batch = net_G(z)
D_scores_on_real = net_D(gpu(real_batch))
D_scores_on_fake = net_D(fake_batch)
loss = -torch.mean(torch.log(1-D_scores_on_fake) + torch.log(D_scores_on_real))
optimizer_D.zero_grad()
loss.backward()
optimizer_D.step()
loss_D += loss
# improving G
z = gpu(torch.empty(batch_size,z_dim).normal_())
fake_batch = net_G(z)
D_scores_on_fake = net_D(fake_batch)
loss = -torch.mean(torch.log(D_scores_on_fake))
optimizer_G.zero_grad()
loss.backward()
optimizer_G.step()
loss_G += loss
loss_D_epoch.append(loss_D)
loss_G_epoch.append(loss_G)
# -
plt.plot(loss_D_epoch)
plt.plot(loss_G_epoch)
z = gpu(torch.empty(n_samples,z_dim).normal_())
fake_samples = net_G(z)
fake_data = fake_samples.cpu().data.numpy()
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
all_data = np.concatenate((X,fake_data),axis=0)
Y2 = np.concatenate((np.ones(n_samples),np.zeros(n_samples)))
plot_data(ax, all_data, Y2)
plt.show();
# It looks like the GAN is oscillating. Try again with lr=1e-3
# We can generate more points:
z = gpu(torch.empty(10*n_samples,z_dim).normal_())
fake_samples = net_G(z)
fake_data = fake_samples.cpu().data.numpy()
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
all_data = np.concatenate((X,fake_data),axis=0)
Y2 = np.concatenate((np.ones(n_samples),np.zeros(10*n_samples)))
plot_data(ax, all_data, Y2)
plt.show();
# # Conditional GAN
#
# We are now implementing a [conditional GAN](https://arxiv.org/abs/1411.1784).
#
# We start by separating the two half moons in two clusters as follows:
X, Y = make_moons(n_samples=2000, noise=0.05)
n_samples = X.shape[0]
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
plot_data(ax, X, Y)
plt.show()
# The task is now given a white or black label to generate points in the corresponding cluster.
#
# Both the generator and the discriminator take in addition a one hot encoding of the label. The generator will now generate fake points corresponding to the input label. The discriminator, given a pair of sample and label should detect if this is a fake or a real pair.
# +
z_dim = 32
hidden_dim = 128
label_dim = 2
class generator(nn.Module):
def __init__(self,z_dim = z_dim, label_dim=label_dim,hidden_dim =hidden_dim):
super(generator,self).__init__()
self.net = nn.Sequential(nn.Linear(z_dim+label_dim,hidden_dim),
nn.ReLU(), nn.Linear(hidden_dim, 2))
def forward(self, input, label_onehot):
x = torch.cat([input, label_onehot], 1)
return self.net(x)
class discriminator(nn.Module):
def __init__(self,z_dim = z_dim, label_dim=label_dim,hidden_dim =hidden_dim):
super(discriminator,self).__init__()
self.net = nn.Sequential(nn.Linear(2+label_dim,hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim,1),
nn.Sigmoid())
def forward(self, input, label_onehot):
x = torch.cat([input, label_onehot], 1)
return self.net(x)
net_CG = gpu(generator())
net_CD = gpu(discriminator())
# -
# Test the network and in particular the type of the inputs...
#
# Hint: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/4
batch_size = 1
z = gpu(torch.empty(batch_size,z_dim).normal_())
#
# your code here
#
#
#
fake_batch = net_CG(z, label_onehot)
print(fake_batch)
net_CD(fake_batch, label_onehot)
# You need to code the training loop:
# +
batch_size = 50
lr = 1e-3
nb_epochs = 1000
optimizer_CG = torch.optim.Adam(net_CG.parameters(),lr=lr)
optimizer_CD = torch.optim.Adam(net_CD.parameters(),lr=lr)
loss_D_epoch = []
loss_G_epoch = []
for e in range(nb_epochs):
rperm = np.random.permutation(X.shape[0]);
np.take(X,rperm,axis=0,out=X);
np.take(Y,rperm,axis=0,out=Y);
real_samples = torch.from_numpy(X).type(torch.FloatTensor)
real_labels = torch.from_numpy(Y).type(torch.LongTensor)
loss_G = 0
loss_D = 0
for real_batch, real_batch_label in zip(real_samples.split(batch_size),real_labels.split(batch_size)):
#improving D
z = gpu(torch.empty(batch_size,z_dim).normal_())
#
# your code here
#
#
#
#
optimizer_CD.zero_grad()
loss.backward()
optimizer_CD.step()
loss_D += loss
# improving G
z = gpu(torch.empty(batch_size,z_dim).normal_())
#
# your code here
#
#
#
optimizer_CG.zero_grad()
loss.backward()
optimizer_CG.step()
loss_G += loss
loss_D_epoch.append(loss_D)
loss_G_epoch.append(loss_G)
# -
plt.plot(loss_D_epoch)
plt.plot(loss_G_epoch)
z = gpu(torch.empty(n_samples,z_dim).normal_())
label = torch.LongTensor(n_samples,1).random_() % label_dim
label_onehot = torch.FloatTensor(n_samples, label_dim).zero_()
label_onehot = gpu(label_onehot.scatter_(1, label, 1))
fake_samples = net_CG(z, label_onehot)
fake_data = fake_samples.cpu().data.numpy()
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
plot_data(ax, fake_data, label.squeeze().numpy())
plot_data(ax, X, Y, 'spring')
plt.show()
# # Info GAN
#
# Implement the [algorithm](https://arxiv.org/abs/1606.03657).
#
# This time, you do not have access to the labels but you know there are two classes. The idea is then to provide as in the conditional GAN a random label to the generator but in opposition to the conditional GAN, the discriminator cannot take as input the label (since they are not provided to us) but instead the discriminator will predict a label and this predictor can be trained on fake samples!
# +
import torch.nn.functional as F
z_dim = 32
hidden_dim = 128
label_dim = 2#attention changer est_label si plus de deux classes...
class Igenerator(nn.Module):
def __init__(self,z_dim = z_dim, label_dim=label_dim,hidden_dim =hidden_dim):
super(Igenerator,self).__init__()
self.net = nn.Sequential(nn.Linear(z_dim+label_dim,hidden_dim),
nn.ReLU(), nn.Linear(hidden_dim, 2))
def forward(self, input, label_onehot):
x = torch.cat([input, label_onehot], 1)
return self.net(x)
class Idiscriminator(nn.Module):
def __init__(self,z_dim = z_dim, label_dim=label_dim,hidden_dim =hidden_dim):
super(Idiscriminator,self).__init__()
self.fc1 = nn.Linear(2,hidden_dim)
self.fc2 = nn.Linear(hidden_dim,1)
self.fc3 = nn.Linear(hidden_dim,1)
def forward(self, input):
x = F.relu(self.fc1(input))
output = F.sigmoid(self.fc2(x))
est_label = F.sigmoid(self.fc3(x))
return output, est_label
net_IG = gpu(Igenerator())
net_ID = gpu(Idiscriminator())
# +
batch_size = 50
lr = 1e-3
nb_epochs = 1000
loss_fn = nn.BCELoss()
optimizer_IG = torch.optim.Adam(net_IG.parameters(),lr=lr)
optimizer_ID = torch.optim.Adam(net_ID.parameters(),lr=lr)
loss_D_epoch = []
loss_G_epoch = []
for e in range(nb_epochs):
rperm = np.random.permutation(X.shape[0]);
np.take(X,rperm,axis=0,out=X);
#np.take(Y,rperm,axis=0,out=Y);
real_samples = torch.from_numpy(X).type(torch.FloatTensor)
#real_labels = torch.from_numpy(Y).type(torch.LongTensor)
loss_G = 0
loss_D = 0
for real_batch in real_samples.split(batch_size):
#improving D
z = gpu(torch.empty(batch_size,z_dim).normal_())
#
# your code here
#
#
lossD = -torch.mean(torch.log(1-D_scores_on_fake) + torch.log(D_scores_on_real))
#
# your code here
#
#
#
# improving G
z = gpu(torch.empty(batch_size,z_dim).normal_())
#
# your code here
#
#
lossG = -torch.mean(torch.log(D_scores_on_fake))
#
# your code here
#
#
optimizer_IG.zero_grad()
lossG.backward()
optimizer_IG.step()
loss_G += lossG
loss_D_epoch.append(loss_D)
loss_G_epoch.append(loss_G)
# -
plt.plot(loss_D_epoch)
plt.plot(loss_G_epoch)
z = gpu(torch.empty(n_samples,z_dim).normal_())
label = torch.LongTensor(n_samples,1).random_() % label_dim
label_onehot = torch.FloatTensor(n_samples, label_dim).zero_()
label_onehot = gpu(label_onehot.scatter_(1, label, 1))
fake_samples = net_IG(z, label_onehot)
fake_data = fake_samples.cpu().data.numpy()
fig, ax = plt.subplots(1, 1, facecolor='#4B6EA9')
plot_data(ax, fake_data, label.squeeze().numpy())
plot_data(ax, X, Y, 'spring')
plt.show()
| Notebooks/07_GAN_double_moon_empty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 335} colab_type="code" executionInfo={"elapsed": 2709, "status": "ok", "timestamp": 1564319808847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="5MmXoiyTYlCc" outputId="b0b4235c-b8a7-43fe-e5f9-04f658ee070e"
# Uncomment if using collab
# #!nvidia-smi
# + colab={} colab_type="code" id="SIX2E2XsY3gQ"
# Keras is used for loading the model
import keras
from keras.models import Model
from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras import backend as K
from keras.utils import generic_utils
# We also need sys, os and copy for some processes to retrieve the data (the test set
# and model) as well as to install some new modules in colab
import sys
import os
import copy
# Importing matplotlib, numpy and pandas for handling the test set data and
# later for displaying the original image + explanations created by SHAP, Gradcam
# %matplotlib inline
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
import PIL
from PIL import Image
import scipy
# + colab={"base_uri": "https://localhost:8080/", "height": 165} colab_type="code" executionInfo={"elapsed": 5304, "status": "ok", "timestamp": 1564319811976, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="rYGXA2YSY6Dj" outputId="264e204c-a477-4d19-e19b-bd28bad8b822"
#This version is not installed on collab
pip install scipy==1.1.0
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 10505, "status": "ok", "timestamp": 1564319817427, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="KXSA2Nz5Y6b-" outputId="7560c237-e11d-44f7-86c6-fa2065972058"
# Installing SHAP and keras-vis through pip (Also not on collab)
# !{sys.executable} -m pip install shap
# !{sys.executable} -m pip install keras-vis
# Importing SHAP and keras-vis3
# + colab={} colab_type="code" id="MWJCff9WY8-l"
import shap
from vis import visualization
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 4091, "status": "ok", "timestamp": 1564320443401, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="M1lEezlaZdUw" outputId="6ca6e0c1-f9db-4415-d53c-daf228a23d2a"
# Connect to a particular Google Drive folder (and place it in the '/content/gdrive'
# subdirectory).
from google.colab import files, drive
drive.mount('/content/gdrive', force_remount=True) # to ensure that the process can be easily redone in the same runtime
# + colab={} colab_type="code" id="0xwpqm2xZfmH"
# Change the current operating directory to the Google Drive (which contains our data)
os.chdir('/content/gdrive/My Drive/zero_to_deep_learning_udemy/Project')
# + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" executionInfo={"elapsed": 45530, "status": "ok", "timestamp": 1564319888097, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="MvrlSjqN_rS-" outputId="8eabfb74-0722-430d-b27b-1bc4b69372b9"
# Load the model using the saved InceptionV3 weights
model = load_model('YOUR_MODEL')
# + colab={"base_uri": "https://localhost:8080/", "height": 139} colab_type="code" executionInfo={"elapsed": 336240, "status": "ok", "timestamp": 1564320347454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="HT7UkHkv_s1o" outputId="ed7fa17b-9058-447e-aec5-8cbe8f9e7fc2"
from vis.utils import utils
from keras import layers, activations
#Assorted modifications for model compatibility with gradCAM
gmodel = copy.deepcopy(model)
layer_idx = utils.find_layer_idx(gmodel,'dense_2')
#swap with softmax with linear classifier for the reasons mentioned above
gmodel.layers[layer_idx].activation = activations.linear
gmodel = utils.apply_modifications(gmodel)
# + colab={} colab_type="code" id="kLJRz81c_5HW"
# %run utils.py
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 3550, "status": "ok", "timestamp": 1564320475358, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="WD13wmkUCaNZ" outputId="258f4b96-1ad8-4b3d-d2d1-85b94b186e21"
test_df = pd.read_pickle('NvAndMelNoDuplicatesTestSet.zip')
# Change the idx column to be '0' where the diagnosis of the lesion was
# melanoma, and '1' when the diagnosis is nevi
test_df['idx'] = np.where(test_df['id'] == 'mel', 0 , 1)
# Save a new table 'features' to be test_df, without the idx column
features=test_df.drop(columns=['idx'], axis = 1)
# Create a new table with just the correct diagnosis (0 for melanoma, 1 for nevi)
target=test_df['idx']
# Change features to be a numpy array of image pixel data ((R, G, B))
features = np.asarray(features['image'].tolist())
# Normalise this data in an alternate table to be values from 0 ... 1
# e.g. 255 -> 1, 0 --> 0
# Normalises for original prediction and evaluation of model, the SHAP funciton below requires non normalised data
# TODO: Standarise this so SHAP takes normalised
features2 = features / 255
# Convert the data to one-hot encoding
target_cat = to_categorical(target, num_classes = 2)
# Get predictions for image data
# e.g.
# Index 0 : [0.9222, 0.0778]
# Index 1 : [0.4500, 0.5500]
# etc..
# This represents likelihood of melanoma and nevi respectively (according to the model)
y_pred = model.predict(features2, verbose=1)
# Create a new dataframe with entries for each element of the test set
# Include an ID, diagnosis, and % likelihoods for each diagnosis from the model
df = pd.DataFrame(columns=['ID', 'Dx', '% Mel', '% Nev'],index=[i for i in range(400)])
df['ID'] = df.index
# Create dictionaries to contain actual diagnosis and probabilities from the model
dx_d = {}
Pmel = {}
Pnev = {}
# Take the actual diagnoses from where we retrieved them earlier
y_test_cat = target_cat
# For each element in the test set:
for ind in range(400):
# Append the diagnosis and predictions to their respective dictionaries
if y_test_cat[ind][0] == 1.0:
diagnosis = 'Melanoma'
elif y_test_cat[ind][1] == 1.0:
diagnosis = 'Nevus'
dx_d[ind] = diagnosis
Pmel[ind] = y_pred[ind][0]
Pnev[ind] = y_pred[ind][1]
# Take the above dictionaries and insert them into the data frame
df['Dx'] = df['ID'].map(dx_d)
df['% Mel'] = df['ID'].map(Pmel)
df['% Nev'] = df['ID'].map(Pnev)
# Change the prediction likelihoods to be floats
df = df.astype({"% Mel": float, "% Nev": float})
#df = df.iloc[id_list]
# Print the first 5 entries in the data frame
print('Unseen set')
#print(df)
# + colab={} colab_type="code" id="JQxbAEqLwJQK"
#df.to_csv('predictions.csv')
#if you want to save predictions for your model uncomment this
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 8457, "status": "ok", "timestamp": 1564320507957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="FemFZGrNxEj1" outputId="6ad60a82-a90d-44c6-da79-60e550f4989f"
model.evaluate(features2, target_cat)
# + colab={} colab_type="code" id="n4mQv8haZCJJ"
from utils import *
# + colab={"base_uri": "https://localhost:8080/", "height": 995} colab_type="code" executionInfo={"elapsed": 90336, "status": "ok", "timestamp": 1564320682086, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-GytXuqoBZcY/AAAAAAAAAAI/AAAAAAAAACs/u0zFywWbmkI/s64/photo.jpg", "userId": "16479215267355514043"}, "user_tz": -600} id="R5Euo5wQuy37" outputId="e2dafe9e-5308-4c3e-89ed-27c995f7b173"
#Produces a single shap and GradCAM image for the 3rd image in features
Shap_single(features[2], model, gmodel, save = False, filename = None, relative_path = None)
# + colab={} colab_type="code" id="Xkuwel7wTXlD"
#Features is unstandardised numpy array of test images
#Model is keras model
#Gmodel is altered for gradcam
#Path is name of folder relative to current directory
#generates multiple shap images, i.e. will generate images for every image contained in features (which is unstandardised)
#Last argument is the relative path for where you want to save images
batch_shap(features, model, gmodel, 'OutputDirectory/')
# + colab={} colab_type="code" id="Aj-FF4dMZsHL"
| Shap_GradCAM_Notebooks/ShapGradCAM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ex3 - Getting and Knowing your Data
# This time we are going to pull data directly from the internet.
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
# !conda install --yes pandas
import pandas as pd
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user).
# ### Step 3. Assign it to a variable called users and use the 'user_id' as index
data = pd.read_csv("https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user", sep="|", index_col="user_id")
# ### Step 4. See the first 25 entries
data.head(25)
# ### Step 5. See the last 10 entries
data.tail(10)
# ### Step 6. What is the number of observations in the dataset?
data.shape[0]
# ### Step 7. What is the number of columns in the dataset?
data.shape[1]
# ### Step 8. Print the name of all the columns.
for column in data.columns:
print(column)
# ### Step 9. How is the dataset indexed?
data.index
# ### Step 10. What is the data type of each column?
data.dtypes
# ### Step 11. Print only the occupation column
data.occupation
# ### Step 12. How many different occupations there are in this dataset?
data.occupation.nunique()
# ### Step 13. What is the most frequent occupation?
data.occupation.value_counts().head(1).index[0]
# ### Step 14. Summarize the DataFrame.
data.describe()
# ### Step 15. Summarize all the columns
data.describe(include = "all")
# ### Step 16. Summarize only the occupation column
data.occupation.describe()
# ### Step 17. What is the mean age of users?
round(data.age.mean())
# ### Step 18. What is the age with least occurrence?
data.age.value_counts().tail()
#
| notebooks/01_Getting_&_Knowing_Your_Data/Occupation/Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
import networkx as nx
import matplotlib.pyplot as plt
import cython
import numpy as np
import pandas as pd
import datetime
import pylab
# load in merged data
data_path = '../../../UCB_dept_merge_CorrectDeptLabels.csv'
merged = pd.read_csv(data_path)
#clean up the department dataset
def convert_strings_to_specials(s):
s = s.replace(' ', '_')
s = s.replace(':', '_')
s = s.replace('#', 'num')
s = s.lower()
return s
merged.columns = [convert_strings_to_specials(col) for col in merged.columns]
#convert creation_date to datetime object
merged['creation_date']=pd.to_datetime(pd.Series(merged['creation_date']))
month = [date.month for date in merged['creation_date']]
year = [date.year for date in merged['creation_date']]
day = [date.day for date in merged['creation_date']]
merged['month'] = month
merged['year'] = year
merged['day'] = day
#iterator for getting month,year
def month_year_iter( start_month, start_year, end_month, end_year ):
ym_start= 12*start_year + start_month - 1
ym_end= 12*end_year + end_month - 1
for ym in range( ym_start, ym_end ):
y, m = divmod( ym, 12 )
yield y, m+1
def remove_zeroes(dictionary):
for key,value in dictionary.items():
if value == 0.0:
del dictionary[key]
return dictionary
#function to create a series of bipartite graphs across two sets of nodes(two columns) subsetted by month
#inputs are the data frame, set 1 and set 2 represent the names of the columns of interest, time_column represents
#the name of the time column in the dataset. year_column and month_columns are created because I did not know how
#to be able to specify a month year combination any other way
#returns a list of calculations(density, centrality, etc.) subseted by month in a list
#where each item in the list is in the following form: ([calculations],month,year)
def testCentralityGraph(data, set1, set2, time_column, month_column=None, year_column=None):
results = []
# higher order function that does the calculations by month
def calculate(set1, set2, current):
G = nx.Graph()
G.add_nodes_from(current[set1],bipartite = 0)
G.add_nodes_from(current[set2],bipartite = 1)
edgeList = [tuple(x) for x in current.values]
G.add_edges_from(edgeList)
bottom_nodes, top_nodes = nx.bipartite.sets(G)
#projecting both set1 onto set2 and vice versa to see if they give different results
projected_bottom = nx.bipartite.projected_graph(G, bottom_nodes, multigraph = True)
projected_top = nx.bipartite.projected_graph(G, top_nodes, multigraph = True)
#bipartite density
density = nx.bipartite.density(G, top_nodes)
#rest of the calculations made for both projected graphs
top_degree = nx.degree_centrality(projected_top)
bot_degree = nx.degree_centrality(projected_bottom)
top_between = nx.betweenness_centrality(projected_top)
bot_between = nx.betweenness_centrality(projected_bottom)
top_projected_density = nx.density(projected_top)
bot_projected_density = nx.density(projected_bottom)
return (G,density,bot_degree,top_degree,top_between,bot_between,
top_projected_density,bot_projected_density)
firstDate = min(data[time_column])
lastDate = max(data[time_column])
iter = month_year_iter(firstDate.month, firstDate.year, lastDate.month, lastDate.year)
for year,month in iter:
current = data[data[month_column] == month]
current = current[current[year_column] == year]
current = current[[set1,set2]]
current.dropna()
temp = calculate(set1,set2,current)
results +=[(temp,month,year)]
return results
#calculate the results of the data
masterlist = testCentralityGraph(merged,'department_name_update', 'supplier_name','creation_date','month','year')
#saving graphs for each month to image file
for item in masterlist:
G = item[0][0]
edgeList = G.edges()
nodelistDept, nodelistSup = nx.bipartite.sets(G)
nodelistDept = list(nodelistDept)
nodelistSup = list(nodelistSup)
pos=nx.networkx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistDept, node_color = 'w', node_size = 50)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistSup, node_color= 'r', node_size=50)
nx.draw_networkx_edges(G,pos,width=0.5,alpha=0.5)
nx.draw_networkx_edges(G,pos,edgelist=edgeList)
name = str(item[1]) + ', ' + str(item[2])
plt.savefig(name)
plt.clf()
plt.cla()
plt.close()
import json
# +
#saving results from masterlist to csv files for further analysis
for item in masterlist:
sup_degree_centrality = pd.DataFrame(data=item[0][2].items(), columns = ['supplier', 'degree_centrality'])
dept_degree_centrality = pd.DataFrame(data = item[0][3].items(),columns = ['department','degree_centrality'])
dept_between_centrality = pd.DataFrame(data = item[0][4].items(),columns = ['department','betweenness_centrality'])
sup_betweeen_centrality = pd.DataFrame(data = item[0][5].items(), columns = ['supplier','betweenness_centrality'])
name = str(item[1]) + '_' + str(item[2])
sdc = name + '_' +'supplier_degree_centrality.csv'
ddc = name + '_' + 'dept_degree_centrality.csv'
dbc = name + '_' + 'dept_between_centrality.csv'
sbc = name + '_' + 'sup_between_centrality.csv'
sup_degree_centrality.to_csv(sdc)
dept_degree_centrality.to_csv(ddc)
dept_between_centrality.to_csv(dbc)
sup_betweeen_centrality.to_csv(sbc)
dense = {'density' : item[0][1], 'department_projected_density' : item[0][6], 'supplier_projected_density' : item[0][7]}
density = name+'_' + 'density_calculations.json'
with open(density, 'wb') as fp:
json.dump(dense, fp)
# -
purchase_type = set(merged['item_type'])
purchase_type = list(purchase_type)
purchase_type[1]
def testEdgeGraph(data, set1, set2,subset_set, time_column, month_column=None, year_column=None):
density = {}
# higher order function that does the calculations by month
def calculate(set1, set2, current):
G = nx.Graph()
G.add_nodes_from(current[set1],bipartite = 0)
G.add_nodes_from(current[set2],bipartite = 1)
edgeList = [tuple(x) for x in current.values]
G.add_edges_from(edgeList)
bottom_nodes, top_nodes = nx.bipartite.sets(G)
#projecting both set1 onto set2 and vice versa to see if they give different results
projected_bottom = nx.bipartite.projected_graph(G, bottom_nodes, multigraph = True)
projected_top = nx.bipartite.projected_graph(G, top_nodes, multigraph = True)
#bipartite density
density = nx.bipartite.density(G, top_nodes)
#rest of the calculations made for both projected graphs
"""
top_degree = nx.degree_centrality(projected_top)
bot_degree = nx.degree_centrality(projected_bottom)
top_between = nx.betweenness_centrality(projected_top)
bot_between = nx.betweenness_centrality(projected_bottom)
top_projected_density = nx.density(projected_top)
bot_projected_density = nx.density(projected_bottom)
"""
return (G,density)
year = 2013
subset = list(set(data[subset_set]))
for month in range(1,13):
current = data[data[month_column] == month]
current = current[current[year_column] == year]
for item in subset:
current = current[current[subset_set] == item]
current = current[[set1,set2]]
current.dropna()
temp = calculate(set1,set2,current)
G = temp[0]
edgeList = G.edges()
nodelistDept, nodelistSup = nx.bipartite.sets(G)
nodelistDept = list(nodelistDept)
nodelistSup = list(nodelistSup)
pos=nx.networkx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistDept, node_color = 'r', node_size = 50)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistSup, node_color= 'w', node_size=50)
nx.draw_networkx_edges(G,pos,width=0.5,alpha=0.5)
nx.draw_networkx_edges(G,pos,edgelist=edgeList)
name = item + '_'+ str(item[1]) + '_' + str(item[2])
plt.savefig(name)
plt.clf()
plt.cla()
plt.close()
density[name] = temp[1]
return density
# +
def calculate(set1, set2, current):
G = nx.Graph()
G.add_nodes_from(current[set1],bipartite = 0)
G.add_nodes_from(current[set2],bipartite = 1)
edgeList = [tuple(x) for x in current.values]
G.add_edges_from(edgeList)
bottom_nodes, top_nodes = nx.bipartite.sets(G)
density = nx.bipartite.density(G, top_nodes)
return (G,density)
#projecting both set1 onto set2 and vice versa to see if they give different results
#projected_bottom = nx.bipartite.projected_graph(G, bottom_nodes, multigraph = True)
#projected_top = nx.bipartite.projected_graph(G, top_nodes, multigraph = True)
#bipartite density
#rest of the calculations made for both projected graphs
"""
top_degree = nx.degree_centrality(projected_top)
bot_degree = nx.degree_centrality(projected_bottom)
top_between = nx.betweenness_centrality(projected_top)
bot_between = nx.betweenness_centrality(projected_bottom)
top_projected_density = nx.density(projected_top)
bot_projected_density = nx.density(projected_bottom)
"""
# +
density = {}
purchase_type = set(merged['item_type'])
purchase_type = list(purchase_type)
set1,set2= 'department_name_update', 'supplier_name'
data2013 = merged[merged['year'] == 2013]
for month in range(1,13):
current = data2013[data2013['month'] == month]
for item in purchase_type:
current = current[current['item_type'] == item]
current = current[[set1,set2]]
current.dropna()
temp = calculate(set1,set2,current)
G = temp[0]
edgeList = G.edges()
nodelistDept, nodelistSup = nx.bipartite.sets(G)
nodelistDept = list(nodelistDept)
nodelistSup = list(nodelistSup)
pos=nx.networkx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistDept, node_color = 'r', node_size = 50)
nx.draw_networkx_nodes(G, pos, nodelist = nodelistSup, node_color= 'w', node_size=50)
nx.draw_networkx_edges(G,pos,width=0.5,alpha=0.5)
nx.draw_networkx_edges(G,pos,edgelist=edgeList)
name = item + '_'+ str(month) + '_' + str(2013)
plt.savefig(name)
plt.clf()
plt.cla()
plt.close()
density[name] = temp[1]
# -
| script/network_analysis/network_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import scipy
import scipy.special
import matplotlib.pyplot as plt
plt.style.use('default')
from pyfermions import *
# # Entropy
# +
def h(p):
p = np.maximum(0, np.minimum(1, p))
return scipy.special.entr(p) + scipy.special.entr(1 - p)
def entropy(cov):
n = np.linalg.eigvalsh(cov)
return np.sum(h(n))
# R = system size
# S = entropy
data = {'K': [], 'L': [], 'M': [], 'R': [], 'S': []}
def compute_entropies(K, L, M=15, R_max=256):
mera = mera1d.selesnick(K, L)
print(
f'K={K}, L={L}, M={M}: computing covariance matrix for subsystem size {R_max}...'
)
cov = mera.covariance(R_max, M)
print(f'K={K}, L={L}, M={M}: computing entropies...')
for R in range(1, R_max + 1):
S = entropy(cov[:R, :R])
data['K'].append(K)
data['L'].append(L)
data['M'].append(M)
data['R'].append(R)
data['S'].append(S)
print(f'K={K}, L={L}, M={M}: done')
compute_entropies(1, 1, 10, 256)
compute_entropies(3, 3, 10, 256)
entropies = pd.DataFrame(data)
# -
# Collect bond dimensions:
entropies['chi'] = 2**(entropies['K'] + entropies['L'])
# Plot together with log fit:
# +
plt.figure(figsize=(3.2, 4.8))
for chi, data in entropies.groupby(['chi']):
#if chi == 4: continue
R = data['R']
S = data['S']
plt.scatter(R[::8], S[::8], marker='+', label=f'$\chi={chi}$')
# A log(R) + B = S
A, B = np.polyfit(np.log(R), S, 1)
plt.plot(
R,
A * np.log(R) + B,
label=f'$\\frac{{{3*A:.2}}}{{3}}\log(R) + {B:.2}$')
print(A, B)
plt.legend()
plt.xlabel('subsystem size', fontsize=14)
plt.ylabel('entropy', fontsize=14)
plt.savefig('entropies1d.pdf', bbox_inches='tight')
# -
# Cardy tells us that $S(R) = \frac c3 \ln \frac\ell a + c_1'$.
# S(R) = A * np.log(R) + B
c = A * 3
c
# This is indeed very close to what we expect ($c=1$).
| notebooks/entropies1d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building queries with the Python SDK
# In the following notebook, we will show how to build complex queries in GOR using the Python SDK to connect to our instance. First, as always, we load the gor magic extension to be able to use the `%gor` and `%%gor` syntax.
# This notebook assumes you are familiar with the gor syntax.
# Install the python sdk, if needed
# ! pip install nextcode-sdk[jupyter] -U > /dev/null
# load the magic extension and imports
# %reload_ext nextcode
import nextcode
import pandas as pd
# %env LOG_QUERY=1
# #### Simple single-line gor expression
# %gor nor #dbsnp# | top 2
# #### Single-line gor expression which stores results in local variable
# var = %gor nor #dbsnp# | top 2
print(var)
# #### Simple multi-line gor expression
# Multi-line expressions are started with `%%`. Each statement should end with `;`
# %%gor
def #top# = 2;
gor #dbsnp#
| top #top#;
# #### Multi-line gor expression which stores results in local variable
# To load results from a multi-line gor expression into a variable you can use the special `<<` operator
# %%gor new_results <<
def #tops# = 10;
gor #dbsnp# | top #tops#;
new_results.index
# #### Store results in the user_data folder
# Instead of fetching the data to the jupyter server you can also use the | write gor command to store files in the user_data folder within the project on NFS.
# %%gor
pgor #dbsnp#
| top 1000
| write user_data/outfile.gorz;
# verify that the file is there by executing gorls
# %gorls user_data outfile.gorz
# now you can gor this file and bring the results into a local dataframe
# %gor user_data/outfile.gorz | GROUP chrom -count | top 2
# ### Download results to a local file
# When the results are very large and you want to be able to use them locally without rerunning the query you can stream the results directly to a file with the << operator by specifying `file:`.
# You can cancel the download at any time and have partial results in the output file.
filename = "dbsnp_2m.tsv"
# %%gor file:$filename <<
gor #dbsnp# | top 2000000
# !wc -l $filename
# !ls -lh $filename
#
#
#
#
#
#
#
# #### Gor expression which references a local variable
# Local variables can be references using the `$xxx` syntax. This will expand the variable inline and is meant to be used for simple strings and numbers
num = 5
# %%gor
gor -p chr1:69000-70000 #dbsnp#
| TOP $num;
# #### Gor expression with defs and create statements
# +
# %%gor
def #number# = $num;
def #VEP_single# = UKBB/freezes/2019_0325/plink_wes/vep_single.gorz;
def #myfavouritefood# = pizza;
create mydbsnp = gor -p chr1:69000-70000 #dbsnp#
| TOP #number#;
gor [mydbsnp];
# -
# #### Compound expressions
# Using the local variables you can construct compount expressions, allowing you to split up long gor scripts.
defs = """
def #myfirstdef# = 2;
def #myseconddef# = 2;
"""
creates = """
create mydbsnp = gor -p chr1:69000-70000 #dbsnp#
| TOP #myfirstdef#;
"""
# %%gor
$defs
$creates
gor [mydbsnp];
# #### Virtual relations
# You can load pandas dataframes into the query with the special `[var:xxx]` syntax.
allpns = pd.read_csv('pnlist.tsv', delimiter='\t')
# %%gor
create bla = gor #dbsnp#
| top 10;
nor [var:allpns]
| top 5;
# #### Cancelling queries
# Try executing this long-running query and then click the stop button
# results = %gor pgor #dbsnp# | GROUP chrom -count
# ### Using the SDK without magic syntax
# The `%` and `%%` is simply syntactic sugar for executing queries through the python sdk. You can use the sdk directly as well and that is probably the preferred method for serious analysis.
# #### Initializing the SDK
# The SDK usually needs to be initialized with an API key and a project. However, in Wuxi notebooks this has already been done for you.
# #%env GOR_API_KEY=[jwt token]
# #%env GOR_API_PROJECT=[project name]
import nextcode
import pandas
# get a handle on a query service proxy
svc = nextcode.get_service("query")
# #### Running basic queries
qry = svc.execute("gor #dbsnp# | top 2;")
print(f"Query {qry.query_id} is {qry.status}. Runtime was {qry.duration} ms and it produced {qry.line_count} rows.")
print(f"The query that was run: {qry.query}")
from pprint import pprint
df = qry.dataframe()
df
# #### Analysing failed queries
qry = svc.execute("gor #unknowndef# | top 2;")
if qry.status == "FAILED":
import sys
print(f"Query failed with error: {qry.error['message']}\n", file=sys.stderr)
print(f"Additional information available from qry.error[]: {list(qry.error.keys())}", file=sys.stderr)
# #### Using virtual relations
# +
import pandas as pd
import nextcode
svc = nextcode.get_service("query")
# use a dataframe as a virtual relation
df = pd.DataFrame(columns=["hello", "world"], data=[[1, 2]])
qry = svc.execute("gor [test]", test=df)
print(qry.dataframe())
# use a tsv string as a virtual relation
tsv = """hello\tworld\n1\t2"""
qry = svc.execute("gor [test]", test=tsv)
print(qry.dataframe())
# if your virtual relation uses special characters (:) in variable name
qry = svc.execute("gor [var:test]", relations=[{"name": "var:test", "data": df}])
print(qry.dataframe())
| docs/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Nurvv K-Means
# +
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
import numpy as np
import time
# %matplotlib inline
# -
# ## NURVV DATASET
# Extract IMU (accelerometer + gyroscope) from 2 distinct runs
data_folder = '2019-07-23_CricketPitch_Grant' # _Josh
df_acc = pd.read_csv(data_folder + '/imu_acc.csv')
df_gyro = pd.read_csv(data_folder + '/imu_gyro.csv')
df_gyro['Gyro-X'] = df_gyro['Gyro-X'].multiply(0.017453)
df_gyro['Gyro-Y'] = df_gyro['Gyro-Y'].multiply(0.017453)
df_gyro['Gyro-Z'] = df_gyro['Gyro-Z'].multiply(0.017453)
df_acc['Accel-X'] = df_acc['Accel-X'].multiply(9.80665)
df_acc['Accel-Y'] = df_acc['Accel-Y'].multiply(9.80665)
df_acc['Accel-Z'] = df_acc['Accel-Z'].multiply(9.80665)
# +
data_imu = [(df_acc['TimeStamp'].values - df_acc['TimeStamp'].values[0])/1_000,
df_acc['Accel-X'].values,
df_acc['Accel-Y'].values,
df_acc['Accel-Z'].values,
df_gyro['Gyro-X'].values,
df_gyro['Gyro-Y'].values,
df_gyro['Gyro-Z'].values]
df_imu = pd.DataFrame(data_imu)
df_imu = df_imu.T
df_imu.columns = ['Timestamp', 'Accel-X', 'Accel-Y', 'Accel-Z', 'Gyro-X', 'Gyro-Y', 'Gyro-Z']
df_imu = df_imu.set_index('Timestamp')
# If we want data normalized
# Since for us the big oscillations are important as they indicate IC, we choose to not normalized data
#although this is usually required to not give more weights to certain features than others
#df_imu=(df_imu-df_imu.min())/(df_imu.max()-df_imu.min())
# -
df_imu.index[0]
initial_timestamp = 2_434_481_487
final_timestamp = 2_436_981_487
df_short_imu = df_imu[initial_timestamp:final_timestamp]
df_short_imu.head()
# +
plt.figure(figsize=(25,5))
plt.title(f"Accelerometer X")
plt.ylabel('Accelerometer [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.xlabel('Timestamp')
plt.plot(df_short_imu.index, df_short_imu['Accel-X'])
plt.xlim(df_short_imu.index[0], df_short_imu.index[-1])
plt.show()
# -
# # Apply PCA
# PCA (2 dimensions)
plt.figure(figsize=(25,10))
pca = PCA(n_components=2)
IMU = pca.fit(df_short_imu).transform(df_short_imu)
plt.figure(figsize=(25,10))
for i in range(len(IMU)):
plt.scatter(IMU[i][0], IMU[i][1])
# + [markdown] tags=[]
# This allows to understand if there's a pattern on the data.
# Since when we plotted the PCA data above we lost the notion of time.
# -
# Only for interpretation purpose
if 0:
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
plt.ion()
fig.canvas.draw()
x = list()
y = list()
for i in range(len(IMU)):
x.append(IMU[i][0])
y.append(IMU[i][1])
ax.scatter(IMU[i][0], IMU[i][1])
ax2.scatter(df_short_imu.index[i], df_short_imu['Accel-X'].values[i])
fig.canvas.draw()
# %matplotlib inline
# ## Kmeans on 6D Data
kmeans = KMeans(n_clusters=2, random_state=0).fit(df_imu)
pd.DataFrame.from_records(kmeans.cluster_centers_,
columns=['Accel-X', 'Accel-Y', 'Accel-Z', 'Gyro-X', 'Gyro-Y', 'Gyro-Z'])
# +
# Due to the amount of data we'll shorten the plotting to between these idx
idx_start = 96000
idx_end = 99000
plt.figure(figsize=(25,10))
plt.subplot(211)
plt.title(f"Accel X with Red/Green being Clusters from KMeans")
plt.ylabel('Accel [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.plot(df_imu.index[idx_start:idx_end], df_imu['Accel-X'].values[idx_start:idx_end])
colors = ['green', 'red', 'tab:orange', 'tab:blue']
for idx,lbl in enumerate(kmeans.labels_[idx_start+1:idx_end]):
plt.gca().axvspan(df_imu.index[idx_start+idx+1], df_imu.index[idx_start+idx], alpha=0.05, color=colors[lbl])
plt.xlim([df_imu.index[idx_start], df_imu.index[idx_end]])
#plt.plot(df_imu.index[idx_start:idx_end], 300*test[idx_start:idx_end], lw=2)
plt.subplot(212)
plt.title(f"Clusters from KMeans")
plt.ylabel('Accel [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.plot(df_imu.index[idx_start:idx_end], kmeans.labels_[idx_start:idx_end], c='k')
plt.xlim([df_imu.index[idx_start], df_imu.index[idx_end]])
plt.show()
# -
kmeans = KMeans(n_clusters=2, random_state=0).fit(df_imu)
pd.DataFrame.from_records(kmeans.cluster_centers_,
columns=['Accel-X', 'Accel-Y', 'Accel-Z', 'Gyro-X', 'Gyro-Y', 'Gyro-Z'])
# +
# Due to the amount of data we'll shorten the plotting to between these idx
idx_start = 96000
idx_end = 99000
plt.figure(figsize=(25,10))
plt.subplot(211)
plt.title(f"Accel X with Red/Green being Clusters from KMeans")
plt.ylabel('Accel [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.plot(df_imu.index[idx_start:idx_end], df_imu['Accel-X'].values[idx_start:idx_end])
colors = ['green', 'red', 'tab:orange', 'tab:blue']
for idx,lbl in enumerate(kmeans.labels_[idx_start+1:idx_end]):
plt.gca().axvspan(df_imu.index[idx_start+idx+1], df_imu.index[idx_start+idx], alpha=0.05, color=colors[lbl])
plt.xlim([df_imu.index[idx_start], df_imu.index[idx_end]])
plt.subplot(212)
plt.title(f"Clusters from KMeans")
plt.ylabel('Accel [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.plot(df_imu.index[idx_start:idx_end], kmeans.labels_[idx_start:idx_end], c='k')
plt.xlim([df_imu.index[idx_start], df_imu.index[idx_end]])
plt.show()
# + [markdown] tags=[]
# # Extract Features
# Instead of using the raw data we could potentially extract important features, see "Accurate Estimation of Running
# Temporal Parameters Using Foot-Worn Inertial Sensors". As an example, let us depic the acceleration and gyroscope magnitudes.
# +
data_imu_features = [df_acc['TimeStamp'].values[1:],
np.sqrt(df_acc['Accel-X']**2+df_acc['Accel-Y']**2+df_acc['Accel-Z']**2)[1:],
np.sqrt(df_gyro['Gyro-X']**2+df_gyro['Gyro-Y']**2+df_gyro['Gyro-Z']**2)[1:],
np.diff(np.sqrt(df_acc['Accel-X']**2+df_acc['Accel-Y']**2+df_acc['Accel-Z']**2)),
np.diff(np.sqrt(df_gyro['Gyro-X']**2+df_gyro['Gyro-Y']**2+df_gyro['Gyro-Z']**2))]
df_imu_features = pd.DataFrame(data_imu_features)
df_imu_features = df_imu_features.T
df_imu_features.columns = ['TimeStamp', 'Accel-Mag', 'Gyro-Mag', 'Accel-Jerk', 'Gyro-Jerk']
df_imu_features=df_imu_features.set_index('TimeStamp')
# -
df_imu_features.head()
# +
plt.figure(figsize=(25,10))
plt.subplot(211)
plt.title(f"Accelerometer Magnitude")
plt.ylabel('Accelerometer [m/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.xlim(initial_timestamp, final_timestamp)
plt.plot(df_imu_features.index, df_imu_features['Accel-Mag'])
plt.subplot(212)
plt.title(f"Gyroscope Magnitude")
plt.ylabel('Gyroscope [rad/s]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.xlim(initial_timestamp, final_timestamp)
plt.plot(df_imu_features.index, df_imu_features['Gyro-Mag'])
plt.show()
# +
plt.figure(figsize=(25,10))
plt.subplot(211)
plt.title(f"Acceleration Jerk")
plt.ylabel('Jerk [m/s^3]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.xlim(initial_timestamp, final_timestamp)
plt.plot(df_imu_features.index, df_imu_features['Accel-Jerk'])
plt.subplot(212)
plt.title(f"Gyroscope Jerk")
plt.ylabel('Jerk [rad/s^2]')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlabel('TimeStamp')
plt.xlim(initial_timestamp, final_timestamp)
plt.plot(df_imu_features.index, df_imu_features['Gyro-Jerk'])
plt.show()
# -
# # Apply PCA
# +
df_short_imu_features = df_imu_features[initial_timestamp:final_timestamp]
df_short_imu_features.head()
# PCA (2 dimensions)
pca = PCA(n_components=2)
IMU = pca.fit(df_short_imu_features).transform(df_short_imu_features)
plt.figure(figsize=(25,10))
for i in range(len(IMU)):
plt.scatter(IMU[i][0], IMU[i][1])
# -
# Only for interpretation purpose
if 0:
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
plt.ion()
fig.canvas.draw()
x = list()
y = list()
for i in range(len(IMU)):
x.append(IMU[i][0])
y.append(IMU[i][1])
ax.scatter(IMU[i][0], IMU[i][1])
ax2.scatter(df_short_imu.index[i], df_short_imu['Accel-X'].values[i])
fig.canvas.draw()
# %matplotlib inline
# # Kmeans on 4D Features Data
kmeans = KMeans(n_clusters=2, random_state=0).fit(df_imu_features)
pd.DataFrame.from_records(kmeans.cluster_centers_,
columns=['Accel-Mag', 'Gyro-Mag', 'Accel-Jerk', 'Gyro-Jerk'])
| Nurvv_KMeansFeatureExtraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# magic command line
# %matplotlib inline
# +
# import dependencies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Load in csv
pyber_ride_df = pd.read_csv("Resources/PyBer_ride_data.csv")
pyber_ride_df
# -
# plot the months along the x-axis and the fare on the y-axis using the plot() function.
pyber_ride_df.plot(x="Month", y="Avg. Fare ($USD)")
plt.show()
# +
# Set x-axis and tick locations.
x_axis = np.arange(len(pyber_ride_df))
tick_locations = [value for value in x_axis]
# Plot the data.
pyber_ride_df.plot(x="Month", y="Avg. Fare ($USD)")
plt.xticks(tick_locations, pyber_ride_df["Month"])
plt.show()
# +
# The other approach is to add the kind parameter to the plot() function. Add the following code to a new cell:
pyber_ride_df.plot(x="Month", y="Avg. Fare ($USD)", kind='bar')
plt.show()
# +
# SKILL DRILL
# Using the data from the PyBer ride DataFrame, create a bar chart with the following annotations and characteristics:
# Add error bars.
# Add caps to the error bars.
# Change the color of the bars to sky blue.
# Rotate the labels on the x-axis to horizontal.
# Set the y-axis increment to every $5.
# Your chart should look similar to this:
# -
| Module Practice/PyBer_ride_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nyp-sit/it3103/blob/main/week14/simple_chatbot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="kBcdHO6HqntD"
# # Simple Chatbot
#
# In this final part, we will take the models that we have trained and use them to recognize an intent and the entities and building simple responses to that.
#
# Before starting, click on the Colab's Runtime > Manage Sessions menu. Click the "TERMINATE OTHER SESSIONS" button.
#
# Then, run the following cells to download the models (after training them to our intent and token classification tasks) and install the necessary libraries.
#
# The reason we are doing this is because downloading the models that you have trained from Colab is VERY slow. So we've already saved a copy of our own trained models and uploaded it to a public server on Amazon for download.
#
# + id="sSsEBxfeqnTc"
# !wget https://nyp-aicourse.s3.ap-southeast-1.amazonaws.com/pretrained-models/intent_model.zip
# !wget https://nyp-aicourse.s3.ap-southeast-1.amazonaws.com/pretrained-models/token_model.zip
# !unzip intent_model.zip
# !unzip token_model.zip
# + [markdown] id="uI9SzGqc5rid"
# Next, run the following to install a specific version of the HuggingFace Transformers library.
#
# Our model was trained against this version of the library, so it is advisable to use the same version for prediction / inference.
# + id="o1ZHduZsD9FM"
# !pip install transformers==4.7
# + [markdown] id="yIfHlpofrhsw"
# ## Section 1 - Inferring Intent
#
# In this section, we declare the codes to infer intent based on a single line of input text.
# + id="t7arnC-lxUx0"
# Import the necessary libraries
#
from transformers import (
AutoTokenizer,
TFAutoModelForSequenceClassification
)
import numpy as np
import tensorflow as tf
# Create the DistilBERT tokenizer
#
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
# Create a list of unique labels that we will recognize.
#
sentence_labels = [
"others",
"atis_abbreviation",
"atis_aircraft",
"atis_airfare",
"atis_airline",
"atis_flight",
"atis_flight_time",
"atis_greeting",
"atis_ground_service",
"atis_quantity",
"atis_yes",
"atis_no"]
# Define a function to perform inference on a single input text.
#
def infer_intent(model, text):
# Passes the text into the tokenizer
#
input = tokenizer(text, truncation=True, padding=True, return_tensors="tf")
# Sends the result from the tokenizer into our classification model
#
output = model(input)
pred_label = np.argmax(tf.nn.softmax(output.logits, axis=-1))
# Return the result to the caller
#
return sentence_labels[pred_label]
# Load the saved model file
#
intent_model = TFAutoModelForSequenceClassification.from_pretrained('intent_model')
# + [markdown] id="UfalBIty8HE9"
# Run the following cell to test the codes that infers the intent.
# + id="SGku6HIC7tus"
infer_intent(intent_model, "How much is the ticket to fly to New York")
# + [markdown] id="fPh__bBSrsXg"
# ## Section 2 - Inferring Entity
#
# In this section, we declare the codes to infer entities for each individual word in a line of text. The entities are then constructed and returned to the caller.
#
# + id="IuqsojUoPKLl"
from transformers import (
AutoTokenizer,
TFAutoModelForTokenClassification
)
import numpy as np
# + id="ctC7Sb93PhNl"
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
# + id="ZRFGoqUEvayO"
# Define a list of unique labels that we will recognized
#
token_labels = ['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC']
# Define the function to infer the individual tokens
#
def infer_tokens(model, text):
encodings = tokenizer(text.split(' '),
is_split_into_words=True,
padding=True,
truncation=True,
return_offsets_mapping=True,
return_tensors="tf")
text = tokenizer.tokenize(text, add_special_tokens=True)
label_mapping = [0] * len(encodings.offset_mapping[0])
for i, offset in enumerate(encodings.offset_mapping[0]):
if encodings.offset_mapping[0][i][0] == 0 and encodings.offset_mapping[0][i][1] != 0:
label_mapping[i] = 1
encodings.pop("offset_mapping")
output = token_model(encodings)[0]
cur_index = -1
result_tokens = []
result_texts = []
for i in range(output.shape[1]):
if label_mapping[i] == 1:
result_tokens.append(np.argmax(output[0][i]).item())
result_texts.append(text[i])
cur_index += 1
else:
if cur_index >= 0 and text[i] != "[CLS]" and text[i] != "[SEP]":
result_texts[cur_index] += text[i].replace("##", "")
return result_tokens, result_texts
# Define the function to combine individual tokens into a dictionary
#
def infer_combined_tokens(token_model, text):
result = {
"PER" : [],
"LOC" : [],
"ORG" : [],
"MISC" : []
}
result_tokens, result_texts = infer_tokens(token_model, text)
current_token_label = ""
current_result_index = -1;
for i in range(len(result_tokens)):
#print(result_tokens[i])
if token_labels[result_tokens[i]].startswith("B-"):
current_token_label = token_labels[result_tokens[i]].replace("B-", "")
result[current_token_label].append(result_texts[i])
current_result_index = len(result[current_token_label]) - 1
elif token_labels[result_tokens[i]].startswith("I-"):
result[current_token_label][current_result_index] += " " + result_texts[i]
return result
# Load the saved model file
#
# + id="kFlcl1NOzqcQ"
token_model = TFAutoModelForTokenClassification.from_pretrained('token_model')
# + id="K2YiSzKZPZlg"
tokens, text = infer_tokens(token_model, "How much is the ticket to fly to New York")
# + id="siH0F4r7QLvY"
print(tokens)
print(text)
# + [markdown] id="VjnIpnGJ8AYv"
# Run the following cell to test the codes that extracts and combines all the entities for us.
# + id="LK-8pQV574uI"
infer_combined_tokens(token_model, "<NAME> and <NAME> of Aims are going to fly to New York")
# + [markdown] id="3fBzlRb7vbXe"
# ## Section 3 - Implementing Logic for Our Chatbot
#
# In this section, let's implement some very basic logic for our chatbot. We will make use of the two functions that we wrote above.
#
# You can implement some simple logic that looks like the following:
#
# ```
# if (intent == "atis_flight" or intent == "atis_airline") and len(tokens["LOC"]):
# print ("Can I confirmed if you just asked about flying to " + tokens["LOC"][0])
# elif intent == "atis_yes":
# print ("Great, then let's me book the ticket for you")
# elif intent == "atis_no":
# print ("Oh I am sorry what did I get wrong?")
# elif intent == "atis_greeting":
# print ("Hi, how are you?")
# else:
# print ("I don't quite know how to respond to " + intent + " yet.")
# ```
# + id="QBZ-U3dQLj3H"
def chatbot():
print ("Chatbot Started. Press 'Q'+Enter to quit.")
while (True):
input_text = input()
if input_text == "Q" or input_text == "":
break
intent = infer_intent(intent_model, input_text)
tokens = infer_combined_tokens(token_model, input_text)
# TODO:
# Write you own logic to conduct a conversation with the user
# about buying tickets and flying somewhere.
#...#
if (intent == "atis_flight" or intent == "atis_airline") and len(tokens["LOC"]):
print ("Can I confirmed if you just asked about flying to " + tokens["LOC"][0])
elif intent == "atis_yes":
print ("Great, then let me book the ticket for you")
elif intent == "atis_no":
print ("Oh I am sorry what did I get wrong?")
elif intent == "atis_greeting":
print ("Hi, how are you?")
else:
print ("I don't quite know how to respond to " + intent + " yet.")
print ("Good bye!")
chatbot()
| week14/simple_chatbot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# uncomment below line of code if you want to calculate features and save dataframe
# this script prints the path at which dataframe with calculated features is saved.
# train.py calls the DataGenerator class to
# # %run ./train.py WMT original
# this notebook was trained on cloud compute. So use your own paths
# + _cell_guid="" _uuid=""
import pandas as pd
import pickle
import numpy as np
from tqdm import tqdm_notebook as tqdm
from IPython.core.interactiveshell import InteractiveShell
np.random.seed(2)
company_code = 'WMT'
strategy_type = 'original'
# use the path printed in above output cell after running stock_cnn.py. It's in below format
df = pd.read_csv("../input/stock-wmt-blog/df_"+company_code+".csv")
df['labels'] = df['labels'].astype(np.int8)
if 'dividend_amount' in df.columns:
df.drop(columns=['dividend_amount', 'split_coefficient'], inplace=True)
display(df.head())
# -
# Split data into Training, Validation and Test
# +
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
# from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler
from collections import Counter
list_features = list(df.loc[:, 'open':'eom_26'].columns)
print('Total number of features', len(list_features))
x_train, x_test, y_train, y_test = train_test_split(df.loc[:, 'open':'eom_26'].values, df['labels'].values, train_size=0.8,
test_size=0.2, random_state=2, shuffle=True, stratify=df['labels'].values)
# smote = RandomOverSampler(random_state=42, sampling_strategy='not majority')
# x_train, y_train = smote.fit_resample(x_train, y_train)
# print('Resampled dataset shape %s' % Counter(y_train))
if 0.7*x_train.shape[0] < 2500:
train_split = 0.8
else:
train_split = 0.7
# train_split = 0.7
print('train_split =',train_split)
x_train, x_cv, y_train, y_cv = train_test_split(x_train, y_train, train_size=train_split, test_size=1-train_split,
random_state=2, shuffle=True, stratify=y_train)
mm_scaler = MinMaxScaler(feature_range=(0, 1)) # or StandardScaler?
x_train = mm_scaler.fit_transform(x_train)
x_cv = mm_scaler.transform(x_cv)
x_test = mm_scaler.transform(x_test)
x_main = x_train.copy()
print("Shape of x, y train/cv/test {} {} {} {} {} {}".format(x_train.shape, y_train.shape, x_cv.shape, y_cv.shape, x_test.shape, y_test.shape))
# -
# Out of total 441+ features select top 'N' features (let's include base features like close, adjusted_close etc)
num_features = 225 # should be a perfect square
selection_method = 'all'
topk = 320 if selection_method == 'all' else num_features
# if train_split >= 0.8:
# topk = 400
# else:
# topk = 300
# +
# %%time
from operator import itemgetter
from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif
if selection_method == 'anova' or selection_method == 'all':
select_k_best = SelectKBest(f_classif, k=topk)
if selection_method != 'all':
x_train = select_k_best.fit_transform(x_main, y_train)
x_cv = select_k_best.transform(x_cv)
x_test = select_k_best.transform(x_test)
else:
select_k_best.fit(x_main, y_train)
selected_features_anova = itemgetter(*select_k_best.get_support(indices=True))(list_features)
print(selected_features_anova)
print(select_k_best.get_support(indices=True))
print("****************************************")
if selection_method == 'mutual_info' or selection_method == 'all':
select_k_best = SelectKBest(mutual_info_classif, k=topk)
if selection_method != 'all':
x_train = select_k_best.fit_transform(x_main, y_train)
x_cv = select_k_best.transform(x_cv)
x_test = select_k_best.transform(x_test)
else:
select_k_best.fit(x_main, y_train)
selected_features_mic = itemgetter(*select_k_best.get_support(indices=True))(list_features)
print(len(selected_features_mic), selected_features_mic)
print(select_k_best.get_support(indices=True))
# -
if selection_method == 'all':
common = list(set(selected_features_anova).intersection(selected_features_mic))
print("common selected featues", len(common), common)
if len(common) < num_features:
raise Exception('number of common features found {} < {} required features. Increase "topk variable"'.format(len(common), num_features))
feat_idx = []
for c in common:
feat_idx.append(list_features.index(c))
feat_idx = sorted(feat_idx[0:225])
print(feat_idx)
# +
if selection_method == 'all':
x_train = x_train[:, feat_idx]
x_cv = x_cv[:, feat_idx]
x_test = x_test[:, feat_idx]
print("Shape of x, y train/cv/test {} {} {} {} {} {}".format(x_train.shape,
y_train.shape, x_cv.shape, y_cv.shape, x_test.shape, y_test.shape))
# -
_labels, _counts = np.unique(y_train, return_counts=True)
print("percentage of class 0 = {}, class 1 = {}".format(_counts[0]/len(y_train) * 100, _counts[1]/len(y_train) * 100))
# +
from sklearn.utils.class_weight import compute_class_weight
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.utils import get_custom_objects
def get_sample_weights(y):
"""
calculate the sample weights based on class weights. Used for models with
imbalanced data and one hot encoding prediction.
params:
y: class labels as integers
"""
y = y.astype(int) # compute_class_weight needs int labels
class_weights = compute_class_weight('balanced', np.unique(y), y)
print("real class weights are {}".format(class_weights), np.unique(y))
print("value_counts", np.unique(y, return_counts=True))
sample_weights = y.copy().astype(float)
for i in np.unique(y):
sample_weights[sample_weights == i] = class_weights[i] # if i == 2 else 0.8 * class_weights[i]
# sample_weights = np.where(sample_weights == i, class_weights[int(i)], y_)
return sample_weights
def reshape_as_image(x, img_width, img_height):
x_temp = np.zeros((len(x), img_height, img_width))
for i in range(x.shape[0]):
# print(type(x), type(x_temp), x.shape)
x_temp[i] = np.reshape(x[i], (img_height, img_width))
return x_temp
def f1_weighted(y_true, y_pred):
y_true_class = tf.math.argmax(y_true, axis=1, output_type=tf.dtypes.int32)
y_pred_class = tf.math.argmax(y_pred, axis=1, output_type=tf.dtypes.int32)
conf_mat = tf.math.confusion_matrix(y_true_class, y_pred_class) # can use conf_mat[0, :], tf.slice()
# precision = TP/TP+FP, recall = TP/TP+FN
rows, cols = conf_mat.get_shape()
size = y_true_class.get_shape()[0]
precision = tf.constant([0, 0, 0]) # change this to use rows/cols as size
recall = tf.constant([0, 0, 0])
class_counts = tf.constant([0, 0, 0])
def get_precision(i, conf_mat):
print("prec check", conf_mat, conf_mat[i, i], tf.reduce_sum(conf_mat[:, i]))
precision[i].assign(conf_mat[i, i] / tf.reduce_sum(conf_mat[:, i]))
recall[i].assign(conf_mat[i, i] / tf.reduce_sum(conf_mat[i, :]))
tf.add(i, 1)
return i, conf_mat, precision, recall
def tf_count(i):
elements_equal_to_value = tf.equal(y_true_class, i)
as_ints = tf.cast(elements_equal_to_value, tf.int32)
count = tf.reduce_sum(as_ints)
class_counts[i].assign(count)
tf.add(i, 1)
return count
def condition(i, conf_mat):
return tf.less(i, 3)
i = tf.constant(3)
i, conf_mat = tf.while_loop(condition, get_precision, [i, conf_mat])
i = tf.constant(3)
c = lambda i: tf.less(i, 3)
b = tf_count(i)
tf.while_loop(c, b, [i])
weights = tf.math.divide(class_counts, size)
numerators = tf.math.multiply(tf.math.multiply(precision, recall), tf.constant(2))
denominators = tf.math.add(precision, recall)
f1s = tf.math.divide(numerators, denominators)
weighted_f1 = tf.reduce_sum(f.math.multiply(f1s, weights))
return weighted_f1
def f1_metric(y_true, y_pred):
"""
this calculates precision & recall
"""
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) # mistake: y_pred of 0.3 is also considered 1
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
# y_true_class = tf.math.argmax(y_true, axis=1, output_type=tf.dtypes.int32)
# y_pred_class = tf.math.argmax(y_pred, axis=1, output_type=tf.dtypes.int32)
# conf_mat = tf.math.confusion_matrix(y_true_class, y_pred_class)
# tf.Print(conf_mat, [conf_mat], "confusion_matrix")
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
get_custom_objects().update({"f1_metric": f1_metric, "f1_weighted": f1_weighted})
# -
sample_weights = get_sample_weights(y_train)
print("Test sample_weights")
rand_idx = np.random.randint(0, 1000, 30)
print(y_train[rand_idx])
print(sample_weights[rand_idx])
one_hot_enc = OneHotEncoder(sparse=False, categories='auto') # , categories='auto'
y_train = one_hot_enc.fit_transform(y_train.reshape(-1, 1))
print("y_train",y_train.shape)
y_cv = one_hot_enc.transform(y_cv.reshape(-1, 1))
y_test = one_hot_enc.transform(y_test.reshape(-1, 1))
dim = int(np.sqrt(num_features))
x_train = reshape_as_image(x_train, dim, dim)
x_cv = reshape_as_image(x_cv, dim, dim)
x_test = reshape_as_image(x_test, dim, dim)
# adding a 1-dim for channels (3)
x_train = np.stack((x_train,) * 3, axis=-1)
x_test = np.stack((x_test,) * 3, axis=-1)
x_cv = np.stack((x_cv,) * 3, axis=-1)
print("final shape of x, y train/test {} {} {} {}".format(x_train.shape, y_train.shape, x_test.shape, y_test.shape))
# +
from matplotlib import pyplot as plt
# %matplotlib inline
fig = plt.figure(figsize=(15, 15))
columns = rows = 3
for i in range(1, columns*rows +1):
index = np.random.randint(len(x_train))
img = x_train[index]
fig.add_subplot(rows, columns, i)
plt.axis("off")
plt.title('image_'+str(index)+'_class_'+str(np.argmax(y_train[index])), fontsize=10)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.imshow(img)
plt.show()
# +
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, LeakyReLU
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, Callback
from tensorflow.keras import optimizers
from tensorflow.keras.regularizers import l2, l1, l1_l2
from tensorflow.keras.initializers import RandomUniform, RandomNormal
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
params = {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.22, 'conv2d_filters_1': 35, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2,
'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0, 'conv2d_do_2': 0.05,
'conv2d_filters_2': 20, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2,
'kernel_regularizer_2': 0.0, 'layers': 'two'},
'dense_layers': {'dense_do_1': 0.22, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.0, 'layers': 'one'},
'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
# Original paper CNN params: input layer (15x15), two convolutional layers (15x15x32, 15x15x64),
# a max pooling (7x7x64), two dropout (0.25, 0.50), fully connected layers (128), and an out-
# put layer (3). stride?
# params = {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.25, 'conv2d_filters_1': 32, 'conv2d_kernel_size_1': 3, 'conv2d_mp_1': 0,
# 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0, 'conv2d_do_2': 0.5,
# 'conv2d_filters_2': 64, 'conv2d_kernel_size_2': 3, 'conv2d_mp_2': 7, 'conv2d_strides_2': 1,
# 'kernel_regularizer_2': 0.0, 'layers': 'two'},
# 'dense_layers': {'dense_do_1': 0.0, 'dense_nodes_1': 128, 'kernel_regularizer_1': 0.0, 'layers': 'one'},
# 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
# +
from functools import *
from sklearn.metrics import f1_score
from tensorflow.keras.metrics import AUC
def f1_custom(y_true, y_pred):
y_t = np.argmax(y_true, axis=1)
y_p = np.argmax(y_pred, axis=1)
f1_score(y_t, y_p, labels=None, average='weighted', sample_weight=None, zero_division='warn')
def create_model_cnn(params):
model = Sequential()
print("Training with params {}".format(params))
# (batch_size, timesteps, data_dim)
# x_train, y_train = get_data_cnn(df, df.head(1).iloc[0]["timestamp"])[0:2]
conv2d_layer1 = Conv2D(params["conv2d_layers"]["conv2d_filters_1"],
params["conv2d_layers"]["conv2d_kernel_size_1"],
strides=params["conv2d_layers"]["conv2d_strides_1"],
kernel_regularizer=regularizers.l2(params["conv2d_layers"]["kernel_regularizer_1"]),
padding='valid',activation="relu", use_bias=True,
kernel_initializer='glorot_uniform',
input_shape=(x_train[0].shape[0],
x_train[0].shape[1], x_train[0].shape[2]))
model.add(conv2d_layer1)
if params["conv2d_layers"]['conv2d_mp_1'] == 1:
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(params['conv2d_layers']['conv2d_do_1']))
if params["conv2d_layers"]['layers'] == 'two':
conv2d_layer2 = Conv2D(params["conv2d_layers"]["conv2d_filters_2"],
params["conv2d_layers"]["conv2d_kernel_size_2"],
strides=params["conv2d_layers"]["conv2d_strides_2"],
kernel_regularizer=regularizers.l2(params["conv2d_layers"]["kernel_regularizer_2"]),
padding='valid',activation="relu", use_bias=True,
kernel_initializer='glorot_uniform')
model.add(conv2d_layer2)
if params["conv2d_layers"]['conv2d_mp_2'] == 1:
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(params['conv2d_layers']['conv2d_do_2']))
model.add(Flatten())
model.add(Dense(params['dense_layers']["dense_nodes_1"], activation='relu'))
model.add(Dropout(params['dense_layers']['dense_do_1']))
if params['dense_layers']["layers"] == 'two':
model.add(Dense(params['dense_layers']["dense_nodes_2"], activation='relu',
kernel_regularizer=params['dense_layers']["kernel_regularizer_1"]))
model.add(Dropout(params['dense_layers']['dense_do_2']))
model.add(Dense(3, activation='softmax'))
if params["optimizer"] == 'rmsprop':
optimizer = optimizers.RMSprop(lr=params["lr"])
elif params["optimizer"] == 'sgd':
optimizer = optimizers.SGD(lr=params["lr"], decay=1e-6, momentum=0.9, nesterov=True)
elif params["optimizer"] == 'adam':
optimizer = optimizers.Adam(learning_rate=params["lr"], beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy', f1_metric])
# from keras.utils.vis_utils import plot_model use this too for diagram with plot
# model.summary(print_fn=lambda x: print(x + '\n'))
return model
def check_baseline(pred, y_test):
print("size of test set", len(y_test))
e = np.equal(pred, y_test)
print("TP class counts", np.unique(y_test[e], return_counts=True))
print("True class counts", np.unique(y_test, return_counts=True))
print("Pred class counts", np.unique(pred, return_counts=True))
holds = np.unique(y_test, return_counts=True)[1][2] # number 'hold' predictions
print("baseline acc:", (holds/len(y_test)*100))
# +
from IPython.display import SVG
from tensorflow.keras.utils import model_to_dot, plot_model
model = create_model_cnn(params)
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=False)
# SVG(model_to_dot(model).create(prog='dot', format='svg'))
# +
import os
best_model_path = os.path.join('.', 'best_model_keras')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,
patience=100, min_delta=0.0001)
# csv_logger = CSVLogger(os.path.join(OUTPUT_PATH, 'log_training_batch.log'), append=True)
rlp = ReduceLROnPlateau(monitor='val_loss', factor=0.02, patience=20, verbose=1, mode='min',
min_delta=0.001, cooldown=1, min_lr=0.0001)
mcp = ModelCheckpoint(best_model_path, monitor='val_f1_metric', verbose=1,
save_best_only=True, save_weights_only=False, mode='max', period=1) # val_f1_metric
# -
# %%time
history = model.fit(x_train, y_train, epochs=params['epochs'], verbose=1,
batch_size=64, shuffle=True,
# validation_split=0.3,
validation_data=(x_cv, y_cv),
callbacks=[mcp, rlp, es]
, sample_weight=sample_weights)
# +
from matplotlib import pyplot as plt
# %matplotlib inline
InteractiveShell.ast_node_interactivity = "last"
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['f1_metric'])
plt.plot(history.history['val_f1_metric'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train_loss', 'val_loss', 'f1', 'val_f1'], loc='upper left')
plt.show()
# -
#
# +
from sklearn.metrics import confusion_matrix, roc_auc_score, cohen_kappa_score
import seaborn as sns
model = load_model(best_model_path)
test_res = model.evaluate(x_test, y_test, verbose=0)
print("keras evaluate=", test_res)
pred = model.predict(x_test)
pred_classes = np.argmax(pred, axis=1)
y_test_classes = np.argmax(y_test, axis=1)
check_baseline(pred_classes, y_test_classes)
conf_mat = confusion_matrix(y_test_classes, pred_classes)
print(conf_mat)
labels = [0,1,2]
# ax = sns.heatmap(conf_mat, xticklabels=labels, yticklabels=labels, annot=True)
# ax.xaxis.set_ticks_position('top')
f1_weighted = f1_score(y_test_classes, pred_classes, labels=None,
average='weighted', sample_weight=None)
print("F1 score (weighted)", f1_weighted)
print("F1 score (macro)", f1_score(y_test_classes, pred_classes, labels=None,
average='macro', sample_weight=None))
print("F1 score (micro)", f1_score(y_test_classes, pred_classes, labels=None,
average='micro', sample_weight=None)) # weighted and micro preferred in case of imbalance
# https://scikit-learn.org/stable/modules/model_evaluation.html#cohen-s-kappa --> supports multiclass; ref: https://stats.stackexchange.com/questions/82162/cohens-kappa-in-plain-english
print("cohen's Kappa", cohen_kappa_score(y_test_classes, pred_classes))
prec = []
for i, row in enumerate(conf_mat):
prec.append(np.round(row[i]/np.sum(row), 2))
print("precision of class {} = {}".format(i, prec[i]))
print("precision avg", sum(prec)/len(prec))
# -
# * > baseline acc: 87.33333333333333
# > [[ 46 0 12]
# > [ 0 43 13]
# > [ 43 40 703]]
# > F1 score (weighted) 0.8898645661513367
# > F1 score (macro) 0.7244070540348785
# > F1 score (micro) 0.88
# > precision of class 0 = 0.79
# > precision of class 1 = 0.77
# > precision of class 2 = 0.89
# -------------------------------------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.01, 'conv2d_filters_1': 25, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.01, 'conv2d_do_2': 0.01, 'conv2d_filters_2': 12, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.01, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.01, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
# > baseline acc: 87.33333333333333
# > [[ 47 0 11]
# > [ 0 40 16]
# > [ 42 38 706]]
# > F1 score (weighted) 0.8901717584267965
# > F1 score (macro) 0.7220098760094632
# > F1 score (micro) 0.8811111111111111
# > precision of class 0 = 0.81
# > precision of class 1 = 0.71
# > precision of class 2 = 0.9
# -------------------------------------------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.015, 'conv2d_filters_1': 25, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.01, 'conv2d_do_2': 0.015, 'conv2d_filters_2': 12, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.015, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.01, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
#
# baseline acc: 87.33333333333333
# [[ 51 0 7]
# [ 0 40 16]
# [ 34 33 719]]
# F1 score (weighted) 0.9064483333587193
# F1 score (macro) 0.7581804094953979
# F1 score (micro) 0.9
# precision of class 0 = 0.88
# precision of class 1 = 0.71
# precision of class 2 = 0.91
#
# --------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.015, 'conv2d_filters_1': 25, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.01, 'conv2d_do_2': 0.015, 'conv2d_filters_2': 12, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.01, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.015, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.01, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
# baseline acc: 87.33333333333333
# [[ 48 0 10]
# [ 0 42 14]
# [ 63 51 672]]
# F1 score (weighted) 0.8636965910666424
# F1 score (macro) 0.679562772544323
# F1 score (micro) 0.8466666666666667
# precision of class 0 = 0.83
# precision of class 1 = 0.75
# precision of class 2 = 0.85
#
# ---------------------------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.015, 'conv2d_filters_1': 25, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.01, 'conv2d_do_2': 0.015, 'conv2d_filters_2': 12, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.015, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.02, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
#
# baseline acc: 87.33333333333333
# [[ 51 0 7]
# [ 0 41 15]
# [ 34 31 721]]
# F1 score (weighted) 0.9094692008630857
# F1 score (macro) 0.7656705928963122
# F1 score (micro) 0.9033333333333333
# precision of class 0 = 0.88
# precision of class 1 = 0.73
# precision of class 2 = 0.92
#
# ------------------------------------------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.015, 'conv2d_filters_1': 25, 'conv2d_kernel_size_1': 2,
# 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.01,
# 'conv2d_do_2': 0.015, 'conv2d_filters_2': 12, 'conv2d_kernel_size_2': 2,
# 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'},
# 'dense_layers': {'dense_do_1': 0.015, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.015, 'layers': 'one'}, 'epochs': 3000,
# 'lr': 0.001, 'optimizer': 'adam'}
#
# [[ 52 0 6]
# [ 0 42 14]
# [ 42 41 703]]
# F1 score (weighted) 0.8954175974904913
# F1 score (macro) 0.7400899830517504
# F1 score (micro) 0.8855555555555555
# precision of class 0 = 0.9
# precision of class 1 = 0.75
# precision of class 2 = 0.89
#
# -------------------------------------------------------
# IBM
# ----------------------------------------------------------------------------
# * **{'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.2, 'conv2d_filters_1': 30, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0, 'conv2d_do_2': 0.05, 'conv2d_filters_2': 15, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.2, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.0, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}**
#
# baseline acc: 87.44444444444444
#
# [[ 46 0 12]
# [ 0 47 8]
# [ 35 53 699]]
#
# F1 score (weighted) 0.8914500898959538
#
# F1 score (macro) 0.7322029896966632
#
# F1 score (micro) 0.88
#
# cohen's Kappa 0.5845248323352525, sometimes 0.61
#
# precision of class 0 = 0.79
#
# precision of class 1 = 0.85
#
# precision of class 2 = 0.89
#
# --------------------------------------------------
# * {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.2, 'conv2d_filters_1': 30, 'conv2d_kernel_size_1': 2,
# 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0,
# 'conv2d_do_2': 0.07, 'conv2d_filters_2': 20, 'conv2d_kernel_size_2': 2,
# 'conv2d_mp_2': 3, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'},
# 'dense_layers': {'dense_do_1': 0.2, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.0, 'layers': 'one'}, 'epochs': 3000,
# 'lr': 0.001, 'optimizer': 'adam'}
#
# baseline acc: 87.44444444444444
# [[ 42 0 16]
# [ 0 44 11]
# [ 31 39 717]]
# F1 score (weighted) 0.8993345798690069
# F1 score (macro) 0.7385150835481354
# F1 score (micro) 0.8922222222222224
# cohen's Kappa 0.5952205422097341
# precision of class 0 = 0.72
# precision of class 1 = 0.8
# precision of class 2 = 0.91
# precision avg 0.81
#
# WMT
# ------------------------------------------------------
# {'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.2, 'conv2d_filters_1': 30, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0, 'conv2d_do_2': 0.05, 'conv2d_filters_2': 15, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.2, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.0, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}
#
# baseline acc: 87.33333333333333
# [[ 52 0 6]
# [ 0 44 12]
# [ 47 42 697]]
# F1 score (weighted) 0.8923266236465278
# F1 score (macro) 0.7369509608548
# F1 score (micro) 0.8811111111111111
# cohen's Kappa 0.5944443508582787
# precision of class 0 = 0.9
# precision of class 1 = 0.79
# precision of class 2 = 0.89
# precision avg 0.86
# NOTE: same config with strides_1=2 gave exceptional (90+%) accuracy on classes 0,1 but class2 84%.
#
# * **{'batch_size': 80, 'conv2d_layers': {'conv2d_do_1': 0.22, 'conv2d_filters_1': 35, 'conv2d_kernel_size_1': 2, 'conv2d_mp_1': 2, 'conv2d_strides_1': 1, 'kernel_regularizer_1': 0.0, 'conv2d_do_2': 0.05, 'conv2d_filters_2': 20, 'conv2d_kernel_size_2': 2, 'conv2d_mp_2': 2, 'conv2d_strides_2': 2, 'kernel_regularizer_2': 0.0, 'layers': 'two'}, 'dense_layers': {'dense_do_1': 0.22, 'dense_nodes_1': 100, 'kernel_regularizer_1': 0.0, 'layers': 'one'}, 'epochs': 3000, 'lr': 0.001, 'optimizer': 'adam'}**
#
# baseline acc: 87.33333333333333
# [[ 53 0 5]
# [ 0 45 11]
# [ 39 30 717]]
# F1 score (weighted) 0.9127522951482708
# F1 score (macro) 0.7792439001374168
# F1 score (micro) 0.9055555555555556
# cohen's Kappa 0.6589784510043419
# precision of class 0 = 0.91
# precision of class 1 = 0.8
# precision of class 2 = 0.91
# precision avg 0.8733333333333334
# +
# suffix = '_'+str(np.round(test_res[1],2))+'_'+str(np.round(f1_weighted,2))
# model_path = os.path.join('..', 'outputs', 'model_'+company_code+suffix+'.h5')
# model.save(model_path)
# print("model save path", os.path.join('..', 'outputs', 'model_'+company_code+suffix+'.h5'))
# + _cell_guid="" _uuid=""
p = 0.9
r = 0.5
(2*p*r)/(p+r)
| src/stock_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import torch
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
import pickle as pickle
import gzip as gzip
import matplotlib.pyplot as plt
# + pycharm={"name": "#%%\n"}
def load_data():
f = gzip.open("mnist.pkl.gz", 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding="bytes")
return training_data, validation_data, test_data
training_data, validation_data, test_data = load_data()
x_train = torch.FloatTensor(training_data[0])
y_train = torch.LongTensor(training_data[1])
x_test = torch.FloatTensor(test_data[0])
y_test = torch.LongTensor(test_data[1])
# + pycharm={"name": "#%%\n"}
class Model(nn.Module):
def __init__(self, ip_size, hidden_size, op_size):
super(Model, self).__init__()
self.fc1 = nn.Linear(ip_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, op_size)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
model = Model(784,200,10)
print(model)
# + pycharm={"name": "#%%\n"}
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
# + pycharm={"name": "#%%\n"}
def check_accuracy(check_x, check_y, model):
correct = 0
samples = 0
model.eval()
with torch.no_grad():
x = check_x
y = check_y
y_pred = model(x)
_, y_pre_indices = torch.max(y_pred,dim=1)
# tensor1 == tensor2 returns a tensor of bool with element by element checking
correct_tensor = (y_pre_indices == y)
correct += torch.sum(correct_tensor).item() # torch.sum() returns a tensor of sum of input
samples += y_pre_indices.size(0)
accuracy = round((correct/samples) * 100,4)
print(f'Accuracy is {correct} / {samples} = {accuracy}')
model.train()
# + pycharm={"name": "#%%\n"}
epochs = 100
losses = []
for i in range(epochs):
y_pred = model.forward(x_train)
loss = criterion(y_pred,y_train)
losses.append(loss)
if i % 10 == 0:
print(f'Epoch: {i} Loss: {loss}')
check_accuracy(x_test,y_test,model)
check_accuracy(x_train,y_train,model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# + pycharm={"name": "#%%\n"}
plt.plot(range(epochs),[loss.item() for loss in losses])
plt.ylabel('Loss')
plt.xlabel('epoch')
# + pycharm={"name": "#%%\n"}
| OCR_PyTorch0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
#
# Symbol Value
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
# For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
#
# Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
#
# I can be placed before V (5) and X (10) to make 4 and 9.
# X can be placed before L (50) and C (100) to make 40 and 90.
# C can be placed before D (500) and M (1000) to make 400 and 900.
# Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
#
# Example 1:
#
# Input: "III"
# Output: 3
# Example 2:
#
# Input: "IV"
# Output: 4
# Example 3:
#
# Input: "IX"
# Output: 9
# Example 4:
#
# Input: "LVIII"
# Output: 58
# Explanation: L = 50, V= 5, III = 3.
# Example 5:
#
# Input: "MCMXCIV"
# Output: 1994
# Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
class Solution:
def romanToInt(self, s: str) -> int:
ROMAN = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
if s == "":
return 0
index = len(s) - 2
sum = ROMAN[s[-1]]
while index >= 0:
if ROMAN[s[index]] < ROMAN[s[index + 1]]:
sum -= ROMAN[s[index]]
else:
sum += ROMAN[s[index]]
index -= 1
return sum
| Top 100 interview Questions/13. Roman to Integer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# !pip install matplotlib
# +
import h5py
import io
import logging
import math
import numpy as np
import random
import requests
import torch
import zarr
# +
#@title utility function to download / save data as zarr
def create_data(
url,
name,
offset,
resolution,
sections=None,
squeeze=True):
in_f = h5py.File(io.BytesIO(requests.get(url).content), 'r')
raw = in_f['volumes/raw']
labels = in_f['volumes/labels/neuron_ids']
f = zarr.open(name, 'a')
if sections is None:
sections=range(raw.shape[0]-1)
for i, r in enumerate(sections):
print(f'Writing data for section {r}')
raw_slice = raw[r:r+1,:,:]
labels_slice = labels[r:r+1,:,:]
if squeeze:
raw_slice = np.squeeze(raw_slice)
labels_slice = np.squeeze(labels_slice)
f[f'raw/{i}'] = raw_slice
f[f'labels/{i}'] = labels_slice
f[f'raw/{i}'].attrs['offset'] = offset
f[f'raw/{i}'].attrs['resolution'] = resolution
f[f'labels/{i}'].attrs['offset'] = offset
f[f'labels/{i}'].attrs['resolution'] = resolution
# -
#lets use the some cremi challenge data as an example
create_data(
'http://cremi.org/static/data/sample_A_20160501.hdf',
'Data/training_data.zarr',
offset=[0,0],
resolution=[4,4])
# +
# conda install jupyter pytorch tensorboard h5py
from torch.utils.data import Dataset
import h5py
class ISBIDataset(Dataset):
def __init__(self,filename):
self.filename = filename
self.samples = self.get_num_samples()
def __len__(self):
return self.samples
def get_num_samples(self):
with h5py.File(self.filename) as f:
samples = f['raw'].shape[0]
return samples
def __getitem__(self,index):
with h5py.File(self.filename) as f:
x = f['raw'][index]
y = f['affinities'][0,index]
return torch.tensor(x).unsqueeze(0),torch.tensor(y).long()
# -
import os
os.getcwd()
# +
from torch.utils.data import Dataset
import h5py
class ISBIDataset(Dataset):
def __init__(self,filename):
self.filename = filename
self.samples = self.get_num_samples()
def __len__(self):
return self.samples
def get_num_samples(self):
with zarr.open(self.filename, 'r') as f:
samples = len(list(f['raw']))
return samples
def affinities(self, y, pad_size=1):
# shift 1
y0 = y[:-pad_size,:]
y1 = y[pad_size:, :]
aff1 = y0==y1
aff1 = np.pad(aff1, ((1,0), (0,0)))
#shift 2
y11 = y[:, :-pad_size]
y12 = y[:, pad_size:]
aff2 = y11==y12
aff2 = np.pad(aff2, ((0,0), (1,0)))
return np.stack((aff1, aff2), axis=0)
def __getitem__(self,index):
with zarr.open(self.filename, 'r') as f:
x = f[f'raw/{index}']
y = f[f'labels/{index}']
return (x, self.affinities(y))
# -
# def affinity(self, y, pads=(1, 0)):
# padder=[[i, 0] for i in pads]
# slicer= [slice(0, -p) if p!=0 else slice(None) for p in pads]
# y1=np.pad(y, pad_width=padder)[slicer]
# aff = y==y1
# return aff
#
# def affinities(self, y, pads = ((1,0), (0,1), (1,1))):
# y_shape=y.shape
# dims = len(pads)
# affinities=np.zeros((dims, *y_shape))
# affinities[p,...] = self.affinity(y, pad)
# return affinities
fn='Data/training_data.zarr'
f = zarr.open(fn, 'r')
list(f.keys())
len(list(f['raw']))
f['labels/0'][:].shape
arlosmagicdata=ISBIDataset('Data/training_data.zarr')
arlosmagicdata[0][1][0]
import matplotlib.pyplot as plt
plt.imshow(arlosmagicdata[0][1][0])
f.info
list(f.keys())
# ! pip install zarr[jupyter]
f.tree()
f.info
| notebooks/Test-LoadingTrainingDataAndAffinities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# !cp ../input/gdcm-conda-install/gdcm.tar .
# !tar -xvzf gdcm.tar
# !conda install --offline ./gdcm/gdcm-2.8.9-py37h71b2a6d_0.tar.bz2
# -
import sys
sys.path.append('../input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master')
# +
import pandas as pd
import numpy as np
import os
import time
import random
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler # for training only, need nightly build pytorch
import pydicom
from efficientnet_pytorch import EfficientNet
from scipy.ndimage.interpolation import zoom
from albumentations import Compose, HorizontalFlip, VerticalFlip, RandomRotate90
from albumentations.pytorch import ToTensorV2
# +
# Configurations
img_inp = {'b0' : 224,
'b1' : 240,
'b2' : 260,
'b3' : 300,
'b4' : 380,
'b5' : 456,
'b6' : 528,
'b7' : 600}
pretrained_model = {
'efficientnet-b0': '../input/efficientnet-pytorch/efficientnet-b0-08094119.pth',
'efficientnet-b1': '../input/efficientnet-pytorch/efficientnet-b1-dbc7070a.pth',
'efficientnet-b2': '../input/efficientnet-pytorch/efficientnet-b2-27687264.pth',
'efficientnet-b3': '../input/efficientnet-pytorch/efficientnet-b3-c8376fa2.pth',
'efficientnet-b4': '../input/efficientnet-pytorch/efficientnet-b4-e116e8b3.pth',
'efficientnet-b5': '../input/efficientnet-pytorch/efficientnet-b5-586e6cc6.pth',
'efficientnet-b6': '../input/efficientnet-pytorch/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': '../input/efficientnet-pytorch/efficientnet-b7-dcc49843.pth',
}
CFG = {
'train': True,
'train_img_path': '../input/rsna-str-pulmonary-embolism-detection/train',
'test_img_path': '../input/rsna-str-pulmonary-embolism-detection/test',
'cv_fold_path': '../input/samplersna/rsna_train_splits_fold_20.csv',
'train_path': '../input/rsna-str-pulmonary-embolism-detection/train.csv',
'test_path': '../input/rsna-str-pulmonary-embolism-detection/test.csv',
'image_target_cols': [
'pe_present_on_image',
],
'exam_target_cols': [
'pe_present_on_image',
'negative_exam_for_pe',
'indeterminate',
'both_no', # Added new column
'rv_lv_ratio_gte_1',
'rv_lv_ratio_lt_1',
'chronic_pe',
'acute_and_chronic_pe',
'acute_pe', # Added new column
'leftsided_pe',
'central_pe',
'rightsided_pe',
'qa_motion',
'qa_contrast',
'flow_artifact',
'true_filling_defect_not_pe'
],
'lr': 0.0005,
'epochs': 1,
'device': 'cuda', # cuda, cpu
'train_bs': 64,
'valid_bs': 64,
'accum_iter': 1,
'verbose_step': 1,
'num_workers': 0,
'efbnet': 'efficientnet-b3', # change here
'img_size': 300, # change here
'effnet_fc': 128,
'metadata_feats': 26,
'train_folds': [
# [1, 2, 3, 4],
# [0, 2, 3, 4],
# [0, 1, 3, 4],
# [0, 1, 2, 4],
[5, 6, 7, 8]
],
'valid_folds': [
# [0],
# [1],
# [2],
# [3],
[9]
],
'stage_model_path': '../input/rsna-pre-models',
'model_path': '../working',
'tag': 'stage1'
}
# +
# Seed
SEED = 42
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# -
# pre-process train df
def preprocess_DF(df):
both_no = lambda x: (1 - (x.negative_exam_for_pe + x.indeterminate))
acute_pe = lambda x: (1 - (x.chronic_pe + x.acute_and_chronic_pe))
df['both_no'] = df.apply(both_no, axis=1)
df['acute_pe'] = df.apply(acute_pe, axis=1)
df['acute_pe'] = np.where(df['both_no']==0, 0, df['acute_pe'])
return df
# +
# Get image + pre-processing
def window_min_max(img, min_, max_, WL=50, WW=350):
upper, lower = WL+WW//2, WL-WW//2
X = np.clip(img.copy(), lower, upper)
X = X - np.min(X)
X = X / np.max(X)
return X
def get_img_min_max(path, min_, max_):
'''
# min_: patient level pixel min
# max_: patient level pixel max
RED channel / LUNG window / level=-600, width=1500
GREEN channel / PE window / level=100, width=700
BLUE channel / MEDIASTINAL window / level=40, width=400
'''
d = pydicom.read_file(path)
# Get image
img = (d.pixel_array * d.RescaleSlope) + d.RescaleIntercept
r = window_min_max(img, min_, max_, -600, 1500)
g = window_min_max(img, min_, max_, 100, 700)
b = window_min_max(img, min_, max_, 40, 400)
res = np.concatenate([r[:, :, np.newaxis],
g[:, :, np.newaxis],
b[:, :, np.newaxis]], axis=-1)
res = zoom(res, [CFG['img_size']/res.shape[0], CFG['img_size']/res.shape[1], 1.], prefilter=False, order=1)
# Get numerical metadata
SliceThickness = float(d.SliceThickness)
KVP = float(d.KVP)/100.0
TableHeight = float(d.TableHeight)/100.0
XRayTubeCurrent = float(d.XRayTubeCurrent)/100.0
Exposure = float(d.Exposure)/100.0
GantryDetectorTilt = float(d.GantryDetectorTilt)
ImagePositionPatient = [x/100.0 for x in list(d.ImagePositionPatient)]
ImageOrientationPatient = list(d.ImageOrientationPatient)
mt_num = np.array((SliceThickness, KVP, TableHeight,
XRayTubeCurrent, Exposure,
*ImagePositionPatient, *ImageOrientationPatient,
GantryDetectorTilt))
# Get categorical metadata
SpecificCharacterSet = d.SpecificCharacterSet
ImageType = d.ImageType
ConvolutionKernel = d.ConvolutionKernel
PatientPosition = d.PatientPosition
sps_100 = np.where(SpecificCharacterSet=='ISO_IR 100', 1, 0)
sps_other = np.where(sps_100==0, 1, 0)
it_opa = np.where(ImageType=="['ORIGINAL', 'PRIMARY', 'AXIAL']", 1, 0)
it_o = np.where(ImageType=="ORIGINAL", 1, 0)
it_other = np.where(it_opa+it_o > 0, 0, 1)
ck_std = np.where(ConvolutionKernel=="STANDARD", 1, 0)
ck_b = np.where(ConvolutionKernel=="B", 1, 0)
ck_other = np.where(ck_std+ck_b > 0, 0, 1)
pp_ffs = np.where(PatientPosition=="FFS", 1, 0)
pp_hfs = np.where(PatientPosition=="HFS", 1, 0)
pp_other = np.where(pp_ffs+pp_hfs > 0, 0, 1)
mt_cat = np.array((sps_100, sps_other, it_opa, it_o, it_other, ck_std, ck_b, ck_other, pp_ffs, pp_hfs, pp_other))
# Get Metadata
mt = np.concatenate((mt_num, mt_cat))
return res, mt
# +
# Dataset
class RSNADataset(TensorDataset):
def __init__(
self, df, label_smoothing, data_root,
image_subsampling=True, transforms=None, output_label=True
):
super().__init__()
self.df = df.reset_index(drop=True).copy()
self.label_smoothing = label_smoothing
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
# get labels
if self.output_label:
target = self.df[CFG['exam_target_cols']].iloc[index].values
target[1:-1] = target[0]*target[1:-1]
path = "{}/{}/{}/{}.dcm".format(self.data_root,
self.df.iloc[index]['StudyInstanceUID'],
self.df.iloc[index]['SeriesInstanceUID'],
self.df.iloc[index]['SOPInstanceUID'])
# Get image and metadata
img, mt = get_img_min_max(path, 0, 0)
if self.transforms:
img = self.transforms(image=img)['image']
# Get metadata and pre-process
# mt = mt[None, :]
# do label smoothing
if self.output_label == True:
target = np.clip(target, self.label_smoothing, 1 - self.label_smoothing)
return img, mt, target
else:
return img, mt
# +
# Image Transformation
def get_train_transforms():
return Compose([
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
RandomRotate90(p=0.5),
ToTensorV2(p=1.0),
], p=1.)
def get_valid_transforms():
return Compose([
ToTensorV2(p=1.0),
], p=1.)
# +
# Models
class FeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
# self.cnn_model = EfficientNet.from_pretrained(CFG['efbnet'], in_channels=3)
self.cnn_model = EfficientNet.from_name(CFG['efbnet'])
self.cnn_model.load_state_dict(torch.load(pretrained_model[CFG['efbnet']]))
# self.model._fc = nn.Linear(self.cnn_model._fc.in_features, CFG['effnet_fc'], bias=True)
self.pooling = nn.AdaptiveAvgPool2d(1)
def get_dim(self):
return self.cnn_model._fc.in_features
def forward(self, x):
feats = self.cnn_model.extract_features(x)
return self.pooling(feats).view(x.shape[0], -1)
class stg1_study_model(nn.Module):
def __init__(self):
super().__init__()
# For image
self.cnn_model = FeatureExtractor()
# For metadata
self.fnn_fc1 = nn.Linear(in_features=CFG['metadata_feats'], out_features=32)
self.fnn_fc2 = nn.Linear(in_features=32, out_features=32)
self.fnn_fc3 = nn.Linear(in_features=32, out_features=16)
# Final Fusion
self.final_fc = nn.Linear(in_features=self.cnn_model.get_dim()+16, out_features=len(CFG['exam_target_cols']))
def forward(self, imgs, mts):
imgs_embdes = self.cnn_model(imgs) # bs * efb_feat_size
mt_embed = self.fnn_fc1(mts)
mt_embed = self.fnn_fc2(mt_embed)
mt_embed = self.fnn_fc3(mt_embed)
embed = torch.cat([imgs_embdes, mt_embed],dim=1)
image_preds = self.final_fc(embed)
return image_preds
# +
# Loss functions
def rsna_wloss_train(y_true_img, y_pred_img, device):
bce_func = torch.nn.BCEWithLogitsLoss(reduction='sum').to(device)
y_pred_img = y_pred_img.view(*y_true_img.shape)
image_loss = bce_func(y_pred_img, y_true_img)
correct_count = ((y_pred_img>0) == y_true_img).sum(axis=0)
counts = y_true_img.size()[0]
return image_loss, correct_count, counts
def rsna_wloss_valid(y_true_img, y_pred_img, device):
return rsna_wloss_train(y_true_img, y_pred_img, device)
def rsna_wloss_inference(y_true_img, y_pred_img):
bce_func = torch.nn.BCELoss(reduction='sum')
image_loss = bce_func(y_pred_img, y_true_img)
correct_count = ((y_pred_img>0) == y_true_img).sum()
counts = y_pred_img.shape[0]
return image_loss, correct_count, counts
# -
# DataLoader
def prepare_train_dataloader(train, cv_df, train_fold, valid_fold):
from catalyst.data.sampler import BalanceClassSampler
train_patients = cv_df.loc[cv_df.fold.isin(train_fold), 'StudyInstanceUID'].unique()
valid_patients = cv_df.loc[cv_df.fold.isin(valid_fold), 'StudyInstanceUID'].unique()
train_ = train.loc[train.StudyInstanceUID.isin(train_patients),:].reset_index(drop=True)
valid_ = train.loc[train.StudyInstanceUID.isin(valid_patients),:].reset_index(drop=True)
# train mode to do image-level subsampling
train_ds = RSNADataset(train_, 0.0, CFG['train_img_path'], image_subsampling=False, transforms=get_train_transforms(), output_label=True)
valid_ds = RSNADataset(valid_, 0.0, CFG['train_img_path'], image_subsampling=False, transforms=get_valid_transforms(), output_label=True)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=CFG['train_bs'],
pin_memory=False,
drop_last=False,
shuffle=True,
num_workers=CFG['num_workers'],
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
return train_loader, val_loader
def train_one_epoch(epoch, model, device, scaler, optimizer, train_loader):
model.train()
t = time.time()
loss_sum = 0
acc_sum = None
loss_w_sum = 0
acc_record = []
loss_record = []
avg_cnt = 40
for step, (imgs, mts, image_labels) in enumerate(train_loader):
imgs = imgs.to(device).float()
mts = mts.to(device).float()
image_labels = image_labels.to(device).float()
with autocast():
image_preds = model(imgs, mts) #output = model(input)
image_loss, correct_count, counts = rsna_wloss_train(image_labels, image_preds, device)
loss = image_loss/counts
scaler.scale(loss).backward()
loss_ = image_loss.detach().item()/counts
acc_ = correct_count.detach().cpu().numpy()/counts
loss_record += [loss_]
acc_record += [acc_]
loss_record = loss_record[-avg_cnt:]
acc_record = acc_record[-avg_cnt:]
loss_sum = np.vstack(loss_record).mean(axis=0)
acc_sum = np.vstack(acc_record).mean(axis=0)
#loss_w_sum += counts
if ((step + 1) % CFG['accum_iter'] == 0) or ((step + 1) == len(train_loader)):
# may unscale_ here if desired (e.g., to allow clipping unscaled gradients)
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
acc_details = ["{:.5}: {:.4f}".format(f, float(acc_sum[i])) for i, f in enumerate(CFG['exam_target_cols'])]
acc_details = ", ".join(acc_details)
if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(train_loader)):
print(
f'epoch {epoch} train Step {step+1}/{len(train_loader)}, ' + \
f'loss: {loss_sum[0]:.3f}, ' + \
acc_details + ', ' + \
f'time: {(time.time() - t):.2f}', end='\r' if (step + 1) != len(train_loader) else '\n'
)
def valid_one_epoch(epoch, model, device, scheduler, val_loader, schd_loss_update=False):
model.eval()
t = time.time()
loss_sum = 0
acc_sum = None
loss_w_sum = 0
for step, (imgs, mts, image_labels) in enumerate(val_loader):
imgs = imgs.to(device).float()
mts = mts.to(device).float()
image_labels = image_labels.to(device).float()
image_preds = model(imgs, mts) #output = model(input)
#print(image_preds.shape, exam_pred.shape)
image_loss, correct_count, counts = rsna_wloss_valid(image_labels, image_preds, device)
loss = image_loss/counts
loss_sum += image_loss.detach().item()
if acc_sum is None:
acc_sum = correct_count.detach().cpu().numpy()
else:
acc_sum += correct_count.detach().cpu().numpy()
loss_w_sum += counts
acc_details = ["{:.5}: {:.4f}".format(f, acc_sum[i]/loss_w_sum) for i, f in enumerate(CFG['image_target_cols'])]
acc_details = ", ".join(acc_details)
if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(val_loader)):
print(
f'epoch {epoch} valid Step {step+1}/{len(val_loader)}, ' + \
f'loss: {loss_sum/loss_w_sum:.3f}, ' + \
acc_details + ', ' + \
f'time: {(time.time() - t):.2f}', end='\r' if (step + 1) != len(val_loader) else '\n'
)
if schd_loss_update:
scheduler.step(loss_sum/loss_w_sum)
else:
scheduler.step()
# ## The Actual Run
# +
seed_everything(SEED)
train_df = pd.read_csv(CFG['train_path'])
cv_df = pd.read_csv(CFG['cv_fold_path'])
train_df = preprocess_DF(train_df)
cv_df = preprocess_DF(cv_df)
print(train_df)
print(cv_df)
# -
for fold, (train_fold, valid_fold) in enumerate(zip(CFG['train_folds'], CFG['valid_folds'])):
if fold < 0:
continue
print(fold)
train_loader, val_loader = prepare_train_dataloader(train_df, cv_df, train_fold, valid_fold)
device = torch.device(CFG['device'])
model = stg1_study_model().to(device)
model.load_state_dict(torch.load('{}/model_{}'.format(CFG['model_path'], CFG['tag'])))
scaler = GradScaler()
optimizer = torch.optim.Adam(model.parameters(), lr=CFG['lr'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=0.1, step_size=1); schd_loss_update=False
for epoch in range(CFG['epochs']):
train_one_epoch(epoch, model, device, scaler, optimizer, train_loader)
torch.save(model.state_dict(),'{}/model_{}'.format(CFG['model_path'], CFG['tag']))
with torch.no_grad():
valid_one_epoch(epoch, model, device, scheduler, val_loader, schd_loss_update=schd_loss_update)
# torch.save(model.state_dict(),'{}/model_{}'.format(CFG['model_path'], CFG['tag']))
del model, optimizer, train_loader, val_loader, scaler, scheduler
torch.cuda.empty_cache()
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
| 02_rsna_pulmonary_embolism/level1_mimo_image-level-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### <NAME> 1301170767
# ### <NAME> 1301174682
# ### <NAME> 1301170770
import random
import matplotlib.pyplot as plt
import imageio
import numpy as np
class Particle():
def __init__(self,x,y,status=None):
self.imun=False
self.total_hari=0
self.status=status
self.x=x
self.y=y
# ### EDIT JUMLAH PARTICLE DAN DIGIT BELAKANG KOMA
# +
Particles=[]
for x in range(150):
lol=Particle(round(random.uniform(0,20),1),round(random.uniform(0,20),1),status='infected')
Particles.append(lol)
for x in range(500):
lol=Particle(round(random.uniform(0,20),1),round(random.uniform(0,20),1),status='non-infected')
Particles.append(lol)
# -
def check_infection(Particles):
for x in Particles:
if x.status=='infected':
return False
return True
# ### EDIT MOVEMENT STEP DISINI
def iteration(Particles):
for i,x in enumerate(Particles):
if random.choices(['bergerak','tidak bergerak'],[0.8,0.2])==['bergerak']:
arah=random.choices(['atas','bawah','kiri','kanan'],[0.25,0.25,0.25,0.25])
#print("JALAN")
if arah==['atas']:
#print("JALAN",Particles[i].x)
x.x=x.x
x.y=round(Particles[i].y+0.3,1)
if arah==['bawah']:
x.x=x.x
x.y=round(Particles[i].y-0.3,1)
if arah==['kiri']:
x.x=round(Particles[i].x-0.3,1)
x.y=x.y
if arah==['kanan']:
x.x=round(Particles[i].x+0.3,1)
x.y=x.y
if x.status=='infected':
x.total_hari=x.total_hari+1
else:
if x.status=='infected':
x.total_hari=x.total_hari+1
return Particles
def PBC(Particles):
for i,x in enumerate(Particles):
if x.x>20:
x.x=20
elif x.x<0:
x.x=0
elif x.y>20:
x.y=20
elif x.y<0:
x.y=0
return Particles
def infect(Particles):
infected=[]
infected=[]
for i,x in enumerate(Particles):
if x.status=='infected':
infected.append([x.x,x.y])
for i,x in enumerate(Particles):
if [x.x,x.y] in infected and x.imun==False:
x.status='infected'
return Particles
# ### EDIT BATAS HARI UNTUK SEMBUH
def recover(Particles):
for i,x in enumerate(Particles):
if x.status=='infected' and x.total_hari>60:
Particles[i].status='recovered'
Particles[i].imun='True'
return Particles
def plotter(Particles,counter):
fig, ax = plt.subplots(2,figsize=(20,20))
ax[0].set(xlabel='X', ylabel='Y',
)
ax[0].set_ylim(0, 20)
ax[0].set_xlim(0, 20)
infected_x_x=[]
infected_x_y=[]
non_infected_x_x=[]
non_infected_x_y=[]
recovered_x_x=[]
recovered_x_y=[]
for x in Particles:
if x.status=='infected':
infected_x_x.append(x.x)
infected_x_y.append(x.y)
elif x.status=='non-infected':
non_infected_x_x.append(x.x)
non_infected_x_y.append(x.y)
elif x.status=='recovered':
recovered_x_x.append(x.x)
recovered_x_y.append(x.y)
if len(infected_x_x)!=0:
ax[0].scatter(infected_x_x,infected_x_y,color='r')
if len(non_infected_x_x)!=0:
ax[0].scatter(non_infected_x_x,non_infected_x_y,color='b')
if len(recovered_x_x)!=0:
ax[0].scatter(recovered_x_x,recovered_x_y,color='y')
labels=np.arange(1,3)
ax[1].bar(1,len(infected_x_x),color='r',label='Infected')
ax[1].bar(3,len(non_infected_x_x),color='b',label='Non-Infected')
ax[1].bar(5,len(recovered_x_x),color='y',label='Recovered')
ax[1].set(title="DAY: %s"%counter)
ax[1].legend()
ax[0].set(title='Infected:{} Non-Infected:{} Recovered:{}'.format(len(infected_x_x),len(non_infected_x_x),len(recovered_x_x)))
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
# +
images=[]
counter=0
while check_infection(Particles)==False:
#for x in range(200):
#print(Particles[0].x,Particles[0].y)
Particles=iteration(Particles)
Particles=infect(Particles)
Particles=recover(Particles)
Particles=PBC(Particles)
#print(Particles[0].x,Particles[0].y)
#for x in Particles:
images.append(plotter(Particles,counter))
counter=counter+1
# -
kwargs_write = {'fps':1.0, 'quantizer':'nq'}
imageio.mimsave('./covid2.gif', images, fps=3)
| RandomWalkCovid/TUBES2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Now You Code 2: Paint Pricing
#
# House Depot, a big-box hardware retailer, has contracted you to create an app to calculate paint prices.
#
# The price of paint is determined by the following factors:
# - Everyday quality paint is `$19.99` per gallon.
# - Select quality paint is `$24.99` per gallon.
# - Premium quality paint is `$32.99` per gallon.
#
# In addition if the customer wants computerized color-matching that incurs an additional fee of `$4.99` per gallon.
#
# Write a program to ask the user to select a paint quality: 'everyday', 'select' or 'premium' and then whether they need color matching and then outputs the price per gallon of the paint.
#
# Example Run 1:
#
# ```
# Which paint quality do you require ['everyday', 'select', 'premium'] ?select
# Do you require color matching [y/n] ?y
# Total price of select paint with color matching is $29.98
# ```
#
# Example Run 2:
#
# ```
# Which paint quality do you require ['everyday', 'select', 'premium'] ?premium
# Do you require color matching [y/n] ?n
# Total price of premium paint without color matching is $32.99
# ```
# ## Step 1: Problem Analysis
#
# Inputs: paint quality and whether you want color matching
#
# Outputs: cost
#
# Algorithm (Steps in Program): get input, calculate price, output it
#
#
# Step 2: Write code here
quality = input("What paint quality do you require? ['everyday', 'select', 'premium'] ")
cost = 0
if quality == "everyday":
cost = 19.99
elif quality == "select":
cost = 24.99
elif quality == "premium":
cost = 32.99
else:
print("That is not a paint quality")
raise SystemExit
match = input("Do you require color matching? ")
if match == 'y':
cost = cost+4.99
print("Total cost of %s paint with color matching is $%.2f"%(quality, cost))
elif match == 'n':
print("Total cost of %s paint without color matching is $%.2f"%(quality, cost))
else:
print("you must input either 'y' or 'n'")
# ## Step 3: Questions
#
# 1. When you enter something other than `'everyday', 'select',` or `'premium'` what happens? Modify the program to print `that is not a paint quality` and then exit in those cases.
# I was unsure of how to exit so I looked it up and used "raise SystemExit" which force quits the program
# 2. What happens when you enter something other than `'y'` or `'n'` for color matching? Re-write the program to print `you must enter y or n` whenever you enter something other than those two values.
# 3. Why can't we use Python's `try...except` in this example?
# we can't use the try...except in this example because that will stop the program if there is an error at any point while we only want to quit it if the user inputs an invalid type of paint quality
# 4. How many times (at minimum) must we execute this program and check the results before we can be reasonably assured it is correct?
# I would do it 9 times
# ## Reminder of Evaluation Criteria
#
# 1. What the problem attempted (analysis, code, and answered questions) ?
# 2. What the problem analysis thought out? (does the program match the plan?)
# 3. Does the code execute without syntax error?
# 4. Does the code solve the intended problem?
# 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
#
| content/lessons/04/Now-You-Code/NYC2-Paint-Matching.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dlnd
# language: python
# name: dlnd
# ---
# # 학습 목표
# - TensorFlow 2.0 간단 사용법
import numpy as np
import tensorflow as tf
# # Tensor 생성
# - []
# - List 생성
[1, 2, 3]
[[1, 2, 3], [4, 5, 6]]
# ### Array 생성
# - tuple이나 list 둘 다 np.array()로 씌어서 array를 만들 수 있음
arr = np.array([1, 2, 3])
arr.shape
arr = np.array([[1, 2, 3], [1, 2, 3]])
arr.shape
# ### Tensor 생성
# - tf.constant()
# - list -> Tensor
tf.constant([1, 2, 3])
# - tf.constant()
# - tuple -> Tensor
tf.constant(((1, 2, 3), (1, 2, 3)))
# - tf.constant()
# - Array -> Tensor
arr = np.array([1, 2, 3])
arr
tensor = tf.constant(arr)
tensor
# ### Tensor에 담긴 정보 확인
# - shape 확인
tensor.shape
# - data type 확인
# - 주의: Tensor 생성 할 때도 data type을 정해주지 않기 때문에 data type에 대한 혼동이 올 수 있음
# - Data Type에 따라 모델의 무게나 성능 차이에도 영향을 줄 수 있음
tensor.dtype
# - data type 정의
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
tensor
# - data type 변환
# - Numpy에서 astype()을 주었듯이, TensorFlow에서는 tf.cast를 사용
arr = np.array([1, 2, 3], dtype=np.float32)
arr.astype(np.uint8)
tensor
tf.cast(tensor, dtype=tf.uint8)
# - Tensor에서 Numpy 불러오기
# - .numpy()
tensor.numpy()
# - Tensor에서 Numpy 불러오기
# - np.array()
np.array(tensor)
# type()를 사용하여 numpy array로 변환된 것 확인
type(tensor)
type(tensor.numpy())
# ## 난수 생성
# 
# - Normal Distribution은 중심극한 이론에 의한 연속적인 모양
# - Uniform Distribution은 중심 극한 이론과는 무관하며 불연속적이며 일정한 분포
# - numpy에서는 normal distribution을 기본적으로 생성
# - np.random.randn()
np.random.randn(9)
# - tf.random.normal
# - TensorFlow에서 Normal Distribution
tf.random.normal([3, 3])
# - tf.random.uniform
# - TensorFlow에서 Uniform Distribution
tf.random.uniform([4, 4])
| DL_TF20/Part 1 - Tensors in TensorFlow-Antonio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Iterated Mark and Recapture"
# > "An example of Bayesian estimation of population size using mark and recapture."
#
# - toc: true
# - branch: master
# - badges: false
# - comments: false
# - categories: [probability, Bayesian, simulation]
# - hide: false
# - search_exclude: false
# - image: images/blog_posts/mark_and_recapture.png
# - redirect_from: blog/iterated_mark
# **(Edited 03/01/2020, see 'Likelihood' section and end of article)**
#
# ## Introduction
#
# Suppose you have a population of wild animals and you want to estimate the population size. It's impractical to catch all of them, so what do you do? A standard method is ['mark and recapture'](https://en.wikipedia.org/wiki/Mark_and_recapture): catch some animals, mark them, release them, and catch some again. In the second capture, there will be some marked animals and some unmarked (i.e., not seen before). The ratio gives you some information about the population size.
#
# The standard estimator for the population size is the [Lincoln index](https://en.wikipedia.org/wiki/Lincoln_index), but it is only applicable in the case of two capture events. What if we want to *repeatedly* capture, mark, and release, and mark the animals such that we know how many times each has been captured? I'm by no means an expert in frequestist statistical methods, so I don't know what the approach would be in this iterated capture case, but I gather it's complicated. What I'll show in this notebook is how to do Bayesian inference for this problem. If you just want to see some code, skip to the bottom of this notebook.
#
#
# An example of Bayesian inference of population size for iterated mark-and-recapture is shown in the following video.
# > youtube: https://youtu.be/xzCq-rQ8Omk
# The left hand side is a (crude) simulation of animals moving around. During a capture event, a square appears and the animals within are captured. An animal's colour indicates the number of times it has been captured. The right hand side shows the current state of belief about the total population size.
#
# ## Assumptions
#
# In the following we'll make these simplifying modelling assumptions:
#
# - The population size is constant. Animals don't leave or join the population between capture events.
# - Every animal has an equal probability of being captured, and this probability is independent between capture events.
# - The total number of animals captured in a given capture event does *not* depend on the total population size (apart from being upper-bounded by it). This assumption is actually false the animation above, but is often true in mark-and-recapture field work. Note, if the total number of animals captured at each stage did depend on the population size, the total number observed would give us further information about the population size.
# ## Short Bayesian Inference Intro/Recap
# In Bayesian inference we assign probabilities to hypotheses to represent our state of belief, and use [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem) to update the probability of a hypothesis using the probability of an observation *given* that hypothesis.
#
# Bayes' theorem says that, for any two random variables $H$ and $X$:
#
# $P(H\mid X) \propto P(X \mid H) \cdot P(H)$.
#
# If we let $X$ stand for an observation, and $H$ for a hypothesis, then what this says is: "The **probability that a hypothesis is true after making an observation** (called the *posterior*) is proportional to its **probability before that observation** (called the *prior*) multiplied by **the probability of making that observation if the hypothesis is true** (called the *likelihood*). An important feature of Bayesian inference for our mark-and-recapture problem is that if you make a sequence of observations, you can iteratively apply Bayes' theorem to keep updating your posterior belief.
#
# So, to do Bayesian inference you need to be able to:
#
# 1. Specify a prior distribution over hypotheses, in the absence of observational data.
# 2. For a given hypothesis and observation, compute the likelihood, i.e., the probability of having made that observation.
# 3. Re-normalize the posterior distribution, so that it sums to 1.
# ## Our Prior Distribution, and Normalization
#
# The normalization step is often the hard part. In our case, we're going to simplify things by using a prior distribution that assigns non-zero probability to only a finite set of hypotheses. Normalization, then, is just a matter of rescaling the posterior probability vector so that it always sums to 1.
#
# E.g., the above animation uses a prior $p(k) = \frac{1}{350}$ for population size $k$ between 100 and 450, and assigns 0 probability otherwise. I.e., before making observations we believe it to be equally likely that the total population size is any value between 100 and 450.
# ## The Likelihood
#
# Suppose we've previously made several captures, and there are $10$ animals that have been captured once, $5$ that have been captured twice, and none that have been captured more than twice. We *don't* know how many have been captured zero times, but a hypothesis that there are $k$ animals in total says that there are $k-15$ such animals.
#
# So, rephrasing the hypothesis: "The number of animals that have been captured zero, one, and two times is $k-15$, $10$, and $5$", and if we then capture again (sampling without replacement), the number of the newly captured animals that have previously been captured zero, one, or two times follows a [multivariate hypergeometric distribution](https://en.wikipedia.org/wiki/Hypergeometric_distribution#Multivariate_hypergeometric_distribution), so this gives us our likelihood.
#
# (Quick probability distribution recap: suppose you have an urn with $N_1$ white balls and $N_2$ black balls, and you sample $M$ times with replacement. The binomial distribution is the probability distribution over the number of white balls you will observe. If you sample instead **without** replacement, the number of white balls sampled follows a hypergeometric distribution. The multivariate hypergeometric distribution is the generalization to more than two colours of balls, and is the distribution over the vector of counts for each colour that you will observe.)
#
# **03/01/2020 Edit**: Since writing this article, I've realised that the number of animals captured that had previously been captured one, two, three, etc. times does not give any information about the population size. All of the information comes from the ratio of seen-before to never-seen-before. I'll explain why at the end of the article, but leave it unedited otherwise.
# ## Some Code
# First we need a way to compute the likelihood of a given observation for a multivariate hypergeometric distribution. Unfortunately, Scipy only implements the univariate case. Fortunately, we can build the multivariate case out of recursive univariate hypergeometric distributions, i.e., by first computing the likelihood of the number of zero vs greater than zero previous captures, then the number of one vs greater than one, etc.
# +
import numpy as np
from scipy.stats import hypergeom
# The two input vectors here are the number of individuals in the population
# and in the latest capture that have been observed 0, 1, 2, etc. times.
# Each of these numbers is known, except for the population number of individuals
# that have been observed zero times. This is determined by our hypotheses.
def multi_hypergeom_likelihood(observation_vector, population_vector):
obs_head, *obs_tail = observation_vector
pop_head, *pop_tail = population_vector
if len(obs_tail) == 0:
return 1.0
return hypergeom.pmf(
obs_head,
sum(population_vector),
pop_head,
sum(observation_vector)
) * multi_hypergeom_likelihood(obs_tail, pop_tail)
# -
# Now let's set up a population and a prior belief over its size.
# +
true_population_size = 350
# A uniform prior distribution
candidate_population_sizes = np.arange(100, 450)
belief = np.ones_like(candidate_population_sizes, dtype=np.float32)
belief /= belief.sum()
# This is to keep track of how many times each member of the population has been seen.
# Note that for convenience we also record the counts for individuals that have never been
# captured before. This information is *not* used when computing likelihoods, since it is
# 'unknown' and implied by the hypothesis.
population_capture_counts = np.zeros(true_population_size, dtype=np.int32)
# -
# Here we crudely simulate the process of capturing animals.
def capture(true_population_size):
number_captured = np.random.randint(30, 61)
captured_indices = np.random.choice(
true_population_size,
replace=False,
size=number_captured
)
return captured_indices
# And here is some code for computing the likelihood for each hypothesis and updating the posterior over hypotheses.
def update_belief(candidate_pop_sizes, belief, population_capture_counts, captured_indices):
max_capture_count = np.max(population_capture_counts)
captured_prev_capture_counts = population_capture_counts[captured_indices]
# Compute a vector of the number of individuals in the population
# / latest capture that have been previously observed 0, 1, 2, etc.
# times. Each of these is a known quantity, except for the
# number of animals in the population that have previously
# not been observed. This value will be filled in according to each
# hypothesis before computing likelihoods.
observation_vector = [sum(captured_prev_capture_counts == i) for i in range(max_capture_count + 1)]
true_population_vector = [sum(population_capture_counts == i) for i in range(max_capture_count + 1)]
for k_i, k in enumerate(candidate_pop_sizes):
hypothesized_pop_vector = np.copy(true_population_vector)
hypothesized_pop_vector[0] = k - np.sum(true_population_vector[1:])
likelihood = multi_hypergeom_likelihood(observation_vector, hypothesized_pop_vector)
belief[k_i] *= likelihood
belief[~np.isfinite(belief)] = 0
belief /= belief.sum()
return belief
# Now let's simulate eight rounds of capture, mark, and release, and plot the posterior belief over the population size at each step.
# +
import matplotlib.pyplot as plt
# There will be 5 capture events
for observation_i in range(8):
# Capture some animals
captured_idx = capture(true_population_size)
# Update posterior based on how many times each had been seen before
belief = update_belief(
candidate_population_sizes,
belief,
population_capture_counts,
captured_idx
)
# Update the population counts
population_capture_counts[captured_idx] += 1
# Plot the posterior
print(f'Capture event number {observation_i + 1}')
plt.plot(candidate_population_sizes, belief)
plt.xlabel('Population size'); plt.ylabel('Posterior belief'); plt.show()
# -
# ## 03/01/2020 edit: The number of times an animal has been seen adds no further information
# It turns out that during a capture event, the number of previously seen animals that had been previously seen once, twice, three times, etc., does not give any additional information about the total population size. In retrospect, I'm not sure why I believed that it would; it seems intuitive that it wouldn't.
#
# To simplify the discussion, let's forget the capture-recapture example and think of the conceptually simpler problem of sampling coloured balls from urns.
#
# Consider an urn containing balls of $c$ different colours, where there are $K_i$ balls of colour index $i$, and the total number of balls is $N = \sum_{i=1}^c K_i$.
#
# Consider the case that the $K_i$ are known for $i > 1$, and $K_1$ is unknown.
#
# Suppose we sample without replacement $n$ times, and in our sample there are $k_i$ balls of colour $i$, and so $n = \sum_{i=1}^c k_i$.
#
# If we have a prior over $K_1$, we can use the probability of having seen this sample given different values of $K_1$ to update our state of belief over its value. This is analogous to what we did in the article above.
#
# This likelihood comes from a multivariate hypergeometric distribution, and is given by:
#
# $p_1(k_1, \ldots, k_c; K_1) = \frac{ \prod_{i=1}^c {K_i \choose k_i} }{ N \choose n } $.
#
# What if instead of computing the likelihood of observing the given number of balls of each colour \{$k_1, \ldots, k_c$\}, we instead just computed the likelihood of having observed the given number of balls that are colour 1 in the sample? This likelihood comes from a univariate hypergeometric distribution, and is given by:
#
# $p_2(k_1, \ldots, k_c; K_1) = \frac{ {K_1 \choose k_1} {N - K_1 \choose n-k_1} }{ N \choose n } $.
#
# But the ratio of these two is
#
# $\frac{p_2(k_1, \ldots, k_c; K_1)}{p_1(k_1, \ldots, k_c; K_1)} = \frac{ {K_1 \choose k_1} {N - K_1 \choose n-k_1} }{ \prod_{i=1}^c {K_i \choose k_i} } = \frac{ { \sum_{i=2}^c K_i \choose \sum_{i=2}^c k_i} }{ \prod_{i=2}^c {K_i \choose k_i} },$
#
# which does not depend on $K_1$; the effect of using the multivariate likelihood over the univariate is just to multiply by a constant, having no effect on the posterior.
| _notebooks/2020-01-02-iterated-mark-and-recapture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Word2Vec
# +
# Если Вы запускаете ноутбук на colab или kaggle,
# выполните следующие строчки, чтобы подгрузить библиотеку dlnlputils:
# # !git clone https://github.com/Samsung-IT-Academy/stepik-dl-nlp.git && pip install -r stepik-dl-nlp/requirements.txt
# import sys; sys.path.append('./stepik-dl-nlp')
# +
# %load_ext autoreload
# %autoreload 2
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import torch
from torch import nn
from torch.nn import functional as F
import dlnlputils
from dlnlputils.data import tokenize_corpus, build_vocabulary, texts_to_token_ids, \
PaddedSequenceDataset, Embeddings
from dlnlputils.pipeline import train_eval_loop, predict_with_model, init_random_seed
from dlnlputils.visualization import plot_vectors
init_random_seed()
# -
# ## Загрузка данных и подготовка корпуса
# +
# Если Вы запускаете ноутбук на colab или kaggle, добавьте в начало пути ./stepik-dl-nlp
full_dataset = list(pd.read_csv('./datasets/nyt-ingredients-snapshot-2015.csv')['input'].dropna())
random.shuffle(full_dataset)
TRAIN_VAL_SPLIT = int(len(full_dataset) * 0.7)
train_source = full_dataset[:TRAIN_VAL_SPLIT]
test_source = full_dataset[TRAIN_VAL_SPLIT:]
print("Обучающая выборка", len(train_source))
print("Тестовая выборка", len(test_source))
print()
print('\n'.join(train_source[:10]))
# -
# токенизируем
train_tokenized = tokenize_corpus(train_source)
test_tokenized = tokenize_corpus(test_source)
print('\n'.join(' '.join(sent) for sent in train_tokenized[:10]))
# строим словарь
vocabulary, word_doc_freq = build_vocabulary(train_tokenized, max_doc_freq=0.9, min_count=5, pad_word='<PAD>')
print("Размер словаря", len(vocabulary))
print(list(vocabulary.items())[:10])
# +
# отображаем в номера токенов
train_token_ids = texts_to_token_ids(train_tokenized, vocabulary)
test_token_ids = texts_to_token_ids(test_tokenized, vocabulary)
print('\n'.join(' '.join(str(t) for t in sent)
for sent in train_token_ids[:10]))
# -
plt.hist([len(s) for s in train_token_ids], bins=20);
plt.title('Гистограмма длин предложений');
MAX_SENTENCE_LEN = 20
train_dataset = PaddedSequenceDataset(train_token_ids,
np.zeros(len(train_token_ids)),
out_len=MAX_SENTENCE_LEN)
test_dataset = PaddedSequenceDataset(test_token_ids,
np.zeros(len(test_token_ids)),
out_len=MAX_SENTENCE_LEN)
print(train_dataset[0])
# ## Алгоритм обучения - Skip Gram Negative Sampling
#
# **Skip Gram** - предсказываем соседние слова по центральному слову
#
# **Negative Sampling** - аппроксимация softmax
#
# $$ W, D \in \mathbb{R}^{Vocab \times EmbSize} $$
#
# $$ \sum_{CenterW_i} P(CtxW_{-2}, CtxW_{-1}, CtxW_{+1}, CtxW_{+2} | CenterW_i; W, D) \rightarrow \max_{W,D} $$
#
# $$ P(CtxW_{-2}, CtxW_{-1}, CtxW_{+1}, CtxW_{+2} | CenterW_i; W, D) = \prod_j P(CtxW_j | CenterW_i; W, D) $$
#
# $$ P(CtxW_j | CenterW_i; W, D) = \frac{e^{w_i \cdot d_j}} { \sum_{j=1}^{|V|} e^{w_i \cdot d_j}} = softmax \simeq \frac{e^{w_i \cdot d_j^+}} { \sum_{j=1}^{k} e^{w_i \cdot d_j^-}}, \quad k \ll |V| $$
# +
def make_diag_mask(size, radius):
"""Квадратная матрица размера Size x Size с двумя полосами ширины radius вдоль главной диагонали"""
idxs = torch.arange(size)
abs_idx_diff = (idxs.unsqueeze(0) - idxs.unsqueeze(1)).abs()
mask = ((abs_idx_diff <= radius) & (abs_idx_diff > 0)).float()
return mask
make_diag_mask(10, 3)
# +
class SkipGramNegativeSamplingTrainer(nn.Module):
def __init__(self, vocab_size, emb_size, sentence_len, radius=5, negative_samples_n=5):
super().__init__()
self.vocab_size = vocab_size
self.negative_samples_n = negative_samples_n
self.center_emb = nn.Embedding(self.vocab_size, emb_size, padding_idx=0)
self.center_emb.weight.data.uniform_(-1.0 / emb_size, 1.0 / emb_size)
self.center_emb.weight.data[0] = 0
self.context_emb = nn.Embedding(self.vocab_size, emb_size, padding_idx=0)
self.context_emb.weight.data.uniform_(-1.0 / emb_size, 1.0 / emb_size)
self.context_emb.weight.data[0] = 0
self.positive_sim_mask = make_diag_mask(sentence_len, radius)
def forward(self, sentences):
"""sentences - Batch x MaxSentLength - идентификаторы токенов"""
batch_size = sentences.shape[0]
center_embeddings = self.center_emb(sentences) # Batch x MaxSentLength x EmbSize
# оценить сходство с настоящими соседними словами
positive_context_embs = self.context_emb(sentences).permute(0, 2, 1) # Batch x EmbSize x MaxSentLength
positive_sims = torch.bmm(center_embeddings, positive_context_embs) # Batch x MaxSentLength x MaxSentLength
positive_probs = torch.sigmoid(positive_sims)
# увеличить оценку вероятности встретить эти пары слов вместе
positive_mask = self.positive_sim_mask.to(positive_sims.device)
positive_loss = F.binary_cross_entropy(positive_probs * positive_mask,
positive_mask.expand_as(positive_probs))
# выбрать случайные "отрицательные" слова
negative_words = torch.randint(1, self.vocab_size,
size=(batch_size, self.negative_samples_n),
device=sentences.device) # Batch x NegSamplesN
negative_context_embs = self.context_emb(negative_words).permute(0, 2, 1) # Batch x EmbSize x NegSamplesN
negative_sims = torch.bmm(center_embeddings, negative_context_embs) # Batch x MaxSentLength x NegSamplesN
# уменьшить оценку вероятность встретить эти пары слов вместе
negative_loss = F.binary_cross_entropy_with_logits(negative_sims,
negative_sims.new_zeros(negative_sims.shape))
return positive_loss + negative_loss
def no_loss(pred, target):
"""Фиктивная функция потерь - когда модель сама считает функцию потерь"""
return pred
# -
# ## Обучение
trainer = SkipGramNegativeSamplingTrainer(len(vocabulary), 100, MAX_SENTENCE_LEN,
radius=5, negative_samples_n=25)
best_val_loss, best_model = train_eval_loop(trainer,
train_dataset,
test_dataset,
no_loss,
lr=1e-2,
epoch_n=2,
batch_size=8,
device='cpu',
early_stopping_patience=10,
max_batches_per_epoch_train=2000,
max_batches_per_epoch_val=len(test_dataset),
lr_scheduler_ctor=lambda optim: torch.optim.lr_scheduler.ReduceLROnPlateau(optim, patience=1, verbose=True))
# Если Вы запускаете ноутбук на colab или kaggle, добавьте в начало пути ./stepik-dl-nlp
torch.save(trainer.state_dict(), 'models/sgns.pth')
# Если Вы запускаете ноутбук на colab или kaggle, добавьте в начало пути ./stepik-dl-nlp
trainer.load_state_dict(torch.load('models/sgns.pth'))
# ## Исследуем характеристики полученных векторов
embeddings = Embeddings(trainer.center_emb.weight.detach().cpu().numpy(), vocabulary)
embeddings.most_similar('chicken')
embeddings.analogy('cake', 'cacao', 'cheese')
test_words = ['salad', 'fish', 'salmon', 'sauvignon', 'beef', 'pork', 'steak', 'beer', 'cake', 'coffee', 'sausage', 'wine', 'merlot', 'zinfandel', 'trout', 'chardonnay', 'champagne', 'cacao']
test_vectors = embeddings.get_vectors(*test_words)
print(test_vectors.shape)
fig, ax = plt.subplots()
fig.set_size_inches((10, 10))
plot_vectors(test_vectors, test_words, how='svd', ax=ax)
# ## Обучение Word2Vec с помощью Gensim
import gensim
word2vec = gensim.models.Word2Vec(sentences=train_tokenized, size=100,
window=5, min_count=5, workers=4,
sg=1, iter=10)
word2vec.wv.most_similar('chicken')
gensim_words = [w for w in test_words if w in word2vec.wv.vocab]
gensim_vectors = np.stack([word2vec.wv[w] for w in gensim_words])
fig, ax = plt.subplots()
fig.set_size_inches((10, 10))
plot_vectors(gensim_vectors, test_words, how='svd', ax=ax)
# ## Загрузка предобученного Word2Vec
#
# Источники готовых векторов:
#
# https://rusvectores.org/ru/ - для русского языка
#
# https://wikipedia2vec.github.io/wikipedia2vec/pretrained/ - много разных языков
import gensim.downloader as api
available_models = api.info()['models'].keys()
print('\n'.join(available_models))
pretrained = api.load('word2vec-google-news-300') # > 1.5 GB!
pretrained.most_similar('cheese')
pretrained.most_similar(positive=['man', 'queen'], negative=['king'])
pretrained_words = [w for w in test_words if w in pretrained.vocab]
pretrained_vectors = np.stack([pretrained[w] for w in pretrained_words])
fig, ax = plt.subplots()
fig.set_size_inches((10, 10))
plot_vectors(pretrained_vectors, test_words, how='svd', ax=ax)
# ## Заключение
#
# * Реализовали Skip Gram Negative Sampling на PyTorch
# * Обучили на корпусе рецептов
# * Сходство слов модель выучила неплохо
# * Для аналогий мало данных
# * Обучили SGNS с помощью библиотеки Gensim
# * Загрузили веса Word2Vec, полученные с помощью большого корпуса (GoogleNews)
# * Списки похожих слов отличаются!
# * Аналогии работают
| task2_word_embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import scipy.sparse as sp_sparse
import scanpy as sc
import numpy as np
from anndata import AnnData
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
sample_info = pd.read_csv('[PATH]/info/HTO_barcode_info.csv')
subject2clinical = {s:c for s, c in zip(sample_info.subject_id, sample_info.clinical)}
tid2tissue = {1: 'PFC',
2: 'PFC',
3: 'medulla',
4: 'medulla',
5: 'choroidplexus',
6: 'choroidplexus'}
# +
df_meta = pd.read_csv('../processed_data/clean_data/metadata.csv', index_col = 0)
df_meta['clinical'] = [subject2clinical[s] for s in df_meta.subject_id]
df_meta.tissue = [t.split('_')[0] for t in df_meta.tissue]
df_meta.tissue = df_meta.tissue.astype('category')
df_meta.subject_id = df_meta.subject_id.astype('category')
df_meta.celltype = df_meta.celltype.astype('category')
df_meta.pid = df_meta.pid.astype('category')
features = ['celltype', 'subject_id', 'pid', 'tissue', 'tissue_id', 'clinical']
df_meta = df_meta[features]
# -
from collections import Counter
Counter(pd.read_csv('../processed_data/clean_data/metadata.csv', index_col = 0).pid)
# +
def get_freq_mat(df_count):
## input: df_count, assumed indexed by [(pid, ct)]
num_ct = len(set([i[-1] for i in df_count.index]))
index_mat = df_count.index.values.reshape(-1, num_ct)
for i in range(index_mat.shape[0]):
assert len(set([s[0] for s in index_mat[i, :]])) == 1
count_mat = df_count.values.reshape((-1, num_ct))
row_notallzero = (count_mat.sum(axis = 1) > 0)
count_mat = count_mat[row_notallzero]
index_mat = index_mat[row_notallzero]
freq_mat = count_mat / count_mat.sum(axis = 1, keepdims = True)
logfreq_mat = np.log10(freq_mat + 1e-5)
return count_mat, freq_mat, logfreq_mat, index_mat
id2pid = sorted(set(df_meta.pid))
pid2subject = {p:s for s, p in zip(df_meta.subject_id, df_meta.pid)}
id2clinical = np.array([subject2clinical[pid2subject[p]] for p in id2pid])
df_count = df_meta.copy()
df_count = df_count.groupby(['pid', 'tissue_id', 'celltype']).count().iloc[:, 0]
df_count = df_count.fillna(0)
count_mat, freq_mat, logfreq_mat, index_mat = get_freq_mat(df_count)
id2ct = np.array([i[-1] for i in index_mat[0, :]])
id2pid = np.array([i[0] for i in index_mat[:, 0]])
id2subject = np.array([pid2subject[p] for p in id2pid])
id2clinical = np.array([subject2clinical[pid2subject[i[0]]] for i in index_mat[:, 0]])
id2tissue_id = np.array([int(i[1]) for i in index_mat[:, 0]])
id2tissue = np.array([tid2tissue[int(i[1])] for i in index_mat[:, 0]])
# +
#freq_mat.ravel(),
df_plot = pd.DataFrame({'freq': freq_mat.ravel(),
'celltype': [i[-1] for i in index_mat.ravel()],
'clinical': [subject2clinical[pid2subject[i[0]]].split('_')[0] for i in index_mat.ravel()],
'tissue': [tid2tissue[int(i[1])] for i in index_mat.ravel()]
})
# +
import seaborn as sns
fig, axes = plt.subplots(3, 1, figsize = (12, 6), sharex=True, sharey=True)
for ip, t in enumerate(['PFC', 'medulla', 'choroidplexus']):
tmp = df_plot[df_plot.tissue == t]
ax = sns.stripplot(x="celltype", y="freq", size = 3, hue = 'clinical',
data=tmp, ax = axes[ip], color = 'gray', jitter=0.25, dodge=True)
ax = sns.violinplot(x="celltype", y="freq", hue="clinical", scale = 'width',
data=tmp, palette="muted", ax = axes[ip], cut = 0, width = .75)
plt.sca(ax)
if t != 'choroidplexus':
plt.title(t, fontsize = 12)
else:
plt.title('choroid plexus', fontsize = 12)
plt.xlabel('')
plt.ylabel('')
plt.xticks(fontsize = 12)
if ip != 0:
ax.get_legend().remove()
else:
plt.legend()
fig.text(-0.02, 0.5, 'cell fraction', va='center',
rotation='vertical', fontsize = 12)
plt.tight_layout()
plt.savefig('../figs/cell_fraction_all_violin.pdf', bbox_inches='tight')
plt.savefig('../figs/png/cell_fraction_all_violin.png', dpi = 600, bbox_inches='tight')
plt.show()
# -
| celltype_freq/analysis_cell_counting-violin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kevincong95/cs231n-emotiw/blob/master/notebooks/audio/1.0-la-audio_openl3_hyperparam_tuning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="D9CnBuS7q9lT" colab_type="code" colab={}
from __future__ import print_function
# + id="3Jb5s6L0qJoj" colab_type="code" outputId="95ff4b3b-5322-4627-c756-1dd0370a8bc0" colab={"base_uri": "https://localhost:8080/", "height": 35}
# %tensorflow_version 1.x
# + id="-ND1xWvpqKMU" colab_type="code" colab={}
import tensorflow as tf
import tensorflow
# + id="3oKgScP1lO-w" colab_type="code" outputId="960ea8a2-fbf1-4ed5-b655-28f73fce9d75" colab={"base_uri": "https://localhost:8080/", "height": 35}
print(tf.__version__)
# + id="z2q8ZK_gSsEF" colab_type="code" outputId="8347f329-cbea-4266-d6b7-976f1a0ea02e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install hyperas
# + id="h5ph7aKdeAqz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5f34ebb0-3860-4739-91cf-a4c9e0891971"
import numpy as np
from tensorflow import keras
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils
from hyperas import optim
from hyperas.distributions import choice, uniform
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
# + id="T1vqT7vGSRWG" colab_type="code" colab={}
# TO DO
import numpy as np
def data():
"""
Data providing function:
This function is separated from create_model() so that hyperopt
won't reload data for each evaluation run.
"""
X_train = np.load('/content/drive/My Drive/Machine-Learning-Projects/cs231n-project/notebooks/audio-final/audio-pickle-all-X-openl3-train.pkl', allow_pickle=True)
Y_train = np.load('/content/drive/My Drive/Machine-Learning-Projects/cs231n-project/notebooks/audio-final/audio-pickle-all-Y-openl3-train.pkl' , allow_pickle=True)
Y_val = np.load('/content/drive/My Drive/Machine-Learning-Projects/cs231n-project/notebooks/audio-final/audio-pickle-all-Y-openl3-val.pkl' , allow_pickle=True)
X_val = np.load('/content/drive/My Drive/Machine-Learning-Projects/cs231n-project/notebooks/audio-final/audio-pickle-all-X-openl3-val.pkl' , allow_pickle=True)
x_train, y_train, x_test, y_test = X_train , Y_train , X_val , Y_val
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
inputs = keras.Input(shape=[None,6144])
# CNN Portion
x = tf.keras.layers.Conv1D(64, 2, activation='selu')(inputs) # This does convolves on the time domain. I.e. it is NOT time distributed.
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling1D(pool_size=2, strides=1, padding='valid')(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Conv1D(512, 2, activation='selu')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling1D(pool_size=2, strides=1, padding='valid')(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Conv1D(512, 2, activation='selu')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling1D(pool_size=2, strides=1, padding='valid')(x)
x = tf.keras.layers.Dropout(0.2)(x)
# Recurrent Portion
x = tf.keras.layers.Bidirectional(keras.layers.LSTM(10, return_sequences=True, input_shape=[None, 6144] , dropout=0.2 , activation='selu'))(x)
x = tf.keras.layers.Bidirectional(keras.layers.LSTM(5))(x)
x = tf.keras.layers.Dense(32 , activation='selu')(x)
x = tf.keras.layers.Dropout(0.4)(x)
outputs = tf.keras.layers.Dense(3 , activation='softmax')(x)
# Define Hyperparams and Compile
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate={{uniform(1e-6, 1e-4)}},
decay_steps=10000,
decay_rate=0.9)
model = keras.Model(inputs=inputs, outputs=outputs)
opt = keras.optimizers.Adam(learning_rate=lr_schedule)
opt = keras.optimizers.Adagrad(learning_rate=lr_schedule)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt , metrics=['accuracy'])
result = model.fit(x_train , y_train , epochs=10 , batch_size=32, validation_data=[x_test , y_test])
#get the highest validation accuracy of the training epochs
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
# + id="xtlmcXRooWjP" colab_type="code" outputId="afb90289-f9be-44e0-8060-aec3aab5b5aa" colab={"base_uri": "https://localhost:8080/", "height": 1000}
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials() , notebook_name='audio_openl3_hyperparam_tuning')
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
save_path_final = "'/content/drive/My Drive/Machine-Learning-Projects/cs231n-project/models/openl3-cnn-lstm-hyperparam-tuning.h5"
print("Best performing model chosen hyper-parameters:")
print(best_run)
| notebooks/audio/1.0-la-audio_openl3_hyperparam_tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.chdir('../')
import DeepPurpose.DTI as models
from DeepPurpose.utils import *
from DeepPurpose.dataset import *
import Processing.dataset_filter as processors
import Processing.testing as testing
if not os.path.exists('./result/DeepDTA'):
os.makedirs('./result/DeepDTA')
# -
df = pd.read_csv('./data/r1/title_r1_test45021.csv', sep = ',', error_bad_lines=False)
X_drug, X_target, y = df['Drug'].values, df['Target'].values, df['Label'].values
testing.batch_predict('./result/DeepDTA/r1/model_r1_320k_random_err_uniform_3_100epochs', 'r1 random_err 320k', X_drug, X_target, y)
df = pd.read_csv('./data/r2/title_r2_test45021.csv', sep = ',', error_bad_lines=False)
X_drug, X_target, y = df['Drug'].values, df['Target'].values, df['Label'].values
testing.batch_predict('./result/DeepDTA/r2/model_r2_320k_random_err_uniform_3_100epochs', 'r2 random_err 320k', X_drug, X_target, y)
df = pd.read_csv('./data/r3/title_r3_test45021.csv', sep = ',', error_bad_lines=False)
X_drug, X_target, y = df['Drug'].values, df['Target'].values, df['Label'].values
testing.batch_predict('./result/DeepDTA/r3/model_r3_320k_random_err_uniform_3_100epochs', 'r3 random_err 320k', X_drug, X_target, y)
| Processing/DeepDTA-kdki-random_err-uniform-3-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/desmond-rn/projet-inverse-2d/blob/master/src/notebook/Regression90x90.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="qxCUfRNWGGvi"
# L'objectif est de prédire la position et la hauteur d'un saut de densité (créneau) $\rho$, à partir de $E_l, E_d, E_r, E_u$ (energie des photons), $F_l, F_d, F_r, F_u$ (flux des photons), et $T_l, T_d, T_r, T_u$ (temperature du milieu) sur les bords du domaine en tout temps.
# + id="vU6rE5DnlATL" colab_type="code" colab={}
# %reset -f
# + colab_type="code" id="JtAq0O5XGELH" colab={}
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
import keras
import random
import sklearn
from ast import literal_eval as l_eval
# + id="34nIRiVYjixy" colab_type="code" colab={}
from keras import layers
from keras import models
from keras import utils
from keras import losses
from keras import metrics
from keras import optimizers
from keras import backend as K
from sklearn import metrics
# + id="_613bT0QlATU" colab_type="code" colab={}
np.set_printoptions(precision = 3)
# + [markdown] colab_type="text" id="iNXZiUPPFgj7"
# # PREPARATION
# + [markdown] colab_type="text" id="lCNMvubIgyJW"
# ## Chargement des données
# + id="pCjoUMLNoH5p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b3f1a78e-c230-4588-ef3f-f2a93bc03455"
""" DEPUIS GOOGLE DRIVE """
# from google.colab import drive
# drive.mount('/gdrive')
# + id="bfhzPvyEom1Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="91a8c813-9d2f-4612-83e1-0b5652a2aa0f"
# %cd ~
# %cd /gdrive/My\ Drive
# %cd Colab\ Notebooks/Projet\ inverse/data/PQT
print()
# %ls
# + [markdown] colab_type="text" id="B51nSgjiGGn5"
# Les donnees spatiales et temporelles (qui contiennent $\rho$, $E$, $F$, et $T$)
# + id="eiTf-qS0bRtI" colab_type="code" colab={}
""" Fonction pour concatener des dataframes"""
def join_dataframes(parts):
df = pd.read_parquet("df_rho_"+parts[0]+".pqt")
for part in parts[1:]:
tmp = pd.read_parquet("df_rho_"+part+".pqt")
df = pd.concat([df, tmp], ignore_index=True)
return df
# + [markdown] id="lKsgynJny7k6" colab_type="text"
# - **PART 1**: la hauteur vaut 1, on a 9 positions [0.2, 0.5, 0.8] x [0.2, 0.5, 0.8] et 12 sources
#
# - **PART 2**: la hauteur vaut 10, on a 9 positions [0.2, 0.5, 0.8] x [0.2, 0.5, 0.8] et 12 sources
# - **PART 3**: la hauteur vaut 10, on a 16 positions [0.35, 0.45, 0.55, 0.65] x [0.35, 0.45, 0.55, 0.65] et 12 sources
# - **PART 4**: la hauteur vaut 10, on a 4 positions [0.25, 0.75] x [0.25, 0.75] et 12 sources
#
# - **PART 5**: la hauteur vaut 4, on a 9 positions [0.35, 0.5, 0.65] x [0.35, 0.5, 0.65] et 12 sources
#
# - **PART 6**: la hauteur vaut 7, on a 6 positions [0.2, 0.4] x [0.2, 0.4, 0.6, 0.8] et 12 sources
#
#
# + [markdown] id="o2ProToG8vvJ" colab_type="text"
# - **PART 10**: la hauteur vaut 10, on a 4 positions [0.3, 0.7] x [0.3, 0.7] et 4 sources
# - **PART 11**: la hauteur vaut 10, on a 40 positions [0.200, 0.232] x [0.2 ... 0.8] et 4 sources
# - **PART 12**: la hauteur vaut 10, on a 40 positions [0.263, 0.295] x [0.2 ... 0.8] et 4 sources
# - **PART 13**: la hauteur vaut 10, on a 40 positions [0.326, 0.358] x [0.2 ... 0.8] et 4 sources
# - **PART 14**: la hauteur vaut 10, on a 40 positions [0.389, 0.421] x [0.2 ... 0.8] et 4 sources
# - **PART 15**: la hauteur vaut 10, on a 40 positions [0.453, 0.484] x [0.2 ... 0.8] et 4 sources
# - **PART 16**: la hauteur vaut 10, on a 40 positions [0.516, 0.547] x [0.2 ... 0.8] et 4 sources
# - **PART 17**: la hauteur vaut 10, on a 40 positions [0.579, 0.611] x [0.2 ... 0.8] et 4 sources
# - **PART 18**: la hauteur vaut 10, on a 40 positions [0.642, 0.674] x [0.2 ... 0.8] et 4 sources
# - **PART 19**: la hauteur vaut 10, on a 40 positions [0.705, 0.737] x [0.2 ... 0.8] et 4 sources
# - **PART 20**: la hauteur vaut 10, on a 40 positions [0.768, 0.800] x [0.2 ... 0.8] et 4 sources
#
#
#
# + id="sUDefv_LZiMW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="40eafaa9-a904-4d4c-aa32-c69c09d04674"
df = join_dataframes(["part2", "part3", "part4"])
df.head(2)
# + id="JIkwLfHT9Dak" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="5f705248-9f45-4f86-c075-9ec9a46312ff"
""" Dans les dataframes de 10 a 20, on a seulement 4 sources, ce qui necssite un traitement special """
parts = ["part"+str(i) for i in range(10, 21)]
print("la datafarme 'df_prime' contient:", parts)
print()
df_prime = join_dataframes(parts)
df_prime.head(2)
# + [markdown] colab_type="text" id="99gWfb-KtxHq"
# Préréquis pour cet apprentissage. Toutes les données doivent etre similaires sur un certain nombre de leurs paramètres.
# + id="_vZfDviWlATr" colab_type="code" colab={}
t_f = 0.0075
x_min = 0
x_max = 1
for i in range(len(df)):
assert df.loc[i, 't_f'] == t_f
assert df.loc[i, 'E_0_expr'] == "0.01372*(5^4)"
# etc...
assert df.loc[i, 'x_min'] == x_min
assert df.loc[i, 'x_max'] == x_max
# + [markdown] colab_type="text" id="7vAh6z14br_a"
# ## Visualisation
# + colab_type="code" id="c1-xpr7mHr_A" colab={}
""" Visualisons de la densite sur le domaine """
# pour placer les indices entre 1 et N
def set_ticks(ax):
spacing = 20
ax.set_xticks(np.arange(0, N, spacing))
ax.set_xticklabels(np.arange(1, N+1, spacing))
ax.set_yticks(np.arange(0, M, spacing))
ax.set_yticklabels(np.arange(1, M+1, spacing))
def min_max(mat, dim=2):
mat_min = mat
for i in range(dim-1, -1, -1):
mat_min = np.nanmin(mat_min, axis=i)
mat_max = mat
for i in range(dim-1, -1, -1):
mat_max = np.nanmax(mat_max, axis=i)
return mat_min, mat_max
def plot_output(ax, df, index, cmap="viridis"):
rho = np.stack(df.loc[index, 'rho'])
rho_min, rho_max = min_max(rho)
# print("rho:", (rho_min, rho_max))
img = ax.imshow(rho,
origin='lower',
cmap=cmap,
interpolation='none',
aspect='auto',
vmin=rho_min, vmax=rho_max,
extent=[df.loc[index, "x_min"], df.loc[index, "x_max"], df.loc[index, "y_min"], df.loc[index, "y_max"]])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("densité", size="x-large")
# + colab_type="code" id="KKtwWvMLHr_L" colab={}
""" Visualisons des signaux sur les bords du domaine """
def plot_input(ax, df, index, cmap="viridis", signal="E"):
# build the waves
up = np.stack(df.loc[index, signal+"_u"])
down = np.stack(df.loc[index, signal+"_d"])
left = np.stack(df.loc[index, signal+"_l"])
right = np.stack(df.loc[index, signal+"_r"])
x_min = df.loc[index, "x_min"]
x_max = df.loc[index, "x_max"]
y_min = df.loc[index, "y_min"]
y_max = df.loc[index, "y_max"]
t_min = df.loc[index, "t_0"]
t_max = df.loc[index, "t_f"]
ax[0, 1].imshow(up, aspect='auto', origin="lower", cmap=cmap, extent=[x_min, x_max, t_min, t_max])
ax[2, 1].imshow(down, aspect='auto', origin="lower", cmap=cmap, extent=[x_min, x_max, t_min, t_max])
ax[1, 0].imshow(left.T, aspect='auto', origin="lower", cmap=cmap, extent=[t_min, t_max, y_min, y_max])
ax[1, 2].imshow(right.T, aspect='auto', origin="lower", cmap=cmap, extent=[t_min, t_max, y_min, y_max])
ax[1, 0].set_ylabel('y')
ax[0, 2].set_ylabel('y')
ax[0, 1].set_xlabel('x')
ax[2, 1].set_xlabel('x')
ax[0, 1].set_ylabel('t')
ax[2, 1].set_ylabel('t')
ax[1, 0].set_xlabel('t')
ax[0, 2].set_xlabel('t')
ax[0, 1].set_title(signal+' en haut',size="medium", y=0.99)
ax[2, 1].set_title(signal+' en bas',size="medium", y=0.99)
ax[1, 0].set_title(signal+' à gauche',size="medium", y=0.99)
ax[1, 2].set_title(signal+' à droite',size="medium", y=0.99)
# + colab_type="code" id="uFn_xccAHr_O" colab={}
def plot_io(ax, df, index, signal, cmap="viridis"):
for i in range(3):
for j in range(3):
if (i == 1 or j == 1):
ax[i, j].set_aspect("equal")
else:
ax[i, j].remove()
plot_input(ax, df, index, signal=signal, cmap=cmap)
plot_output(ax[1,1], df, index, cmap=cmap)
# + id="W3rchLEoNWZD" colab_type="code" colab={}
def plot_all_sources(df, index_list, signal, base_fig_size, cmap="viridis"):
fig,ax = plt.subplots(nrows=3, ncols=3*len(index_list), figsize=(base_fig_size[0]*len(index_list), base_fig_size[1]))
for i, index in enumerate(index_list):
if i%2 == 0:
plot_io(ax[:, 3*i:3*(i+1)], df, index, signal=signal, cmap="viridis")
else:
plot_io(ax[:, 3*i:3*(i+1)], df, index, signal=signal, cmap="inferno")
plt.suptitle("observation de rho et de "+signal+" pour les indices "+str(index_list), size="xx-large", y=1.02)
plt.tight_layout();
# + colab_type="code" id="uPFYFOFEHr_T" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="02efac33-302e-45aa-9997-7001049b735c"
plot_all_sources(df, index_list=[4, 5, 6, 7], signal="E", base_fig_size=(8, 8), cmap="viridis")
# + id="-6y6NIAtOowE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="6f2160d2-4a9d-4dab-a7a1-bc417b6980e5"
plot_all_sources(df, index_list=[52, 53, 54, 55], signal="E", base_fig_size=(8, 8), cmap="viridis")
# + [markdown] id="CkBpt4uj41JL" colab_type="text"
# ## Création des inputs X
# + [markdown] id="txy0h4islAT6" colab_type="text"
# Pour les dataframes de 1 a 6, il faut **12** simulations pour avoir un sample complet. Mais pour les dataframes de 10 a 20, il faut **4** simulations pour avoir un sample complet.
# + id="58vtqZsTqwo7" colab_type="code" colab={}
def resample(input, shape_output):
shape_input = input.shape
output = np.empty(shape_output, dtype=float)
step = shape_input[0]//shape_output[0]
i = 0
for j in np.arange(0, shape_output[0]):
if i < shape_input[0]:
output[j] = input[i]
i += step
return output
# + id="hNyKGz1KrJMx" colab_type="code" colab={}
# """ Generation les inputs X : 1er methode """
# """ On a 12 sources reparties sur 3 bords (haut, gauche et bas) et en entree du reseau de neuronnes le signal sur la droite"""
# n_sources = 12
# n_samples = len(df) // n_sources
# shape_X = (df.loc[0, "step_count"] // 10, df.loc[0, "N"]) # car N = M
# X = np.empty(shape=(n_samples, n_sources*shape_X[0], shape_X[1], 3), dtype=np.float32)
# for i in range(0, len(df), n_sources):
# E = resample(np.stack(df.loc[i, 'E_r']), shape_X)
# F = resample(np.stack(df.loc[i, 'F_r']), shape_X)
# T = resample(np.stack(df.loc[i, 'T_r']), shape_X)
# rho = df.loc[i, 'rho_expr']
# for j in range(1, n_sources):
# E = np.vstack([E, resample(np.stack(df.loc[i+j, 'E_r']), shape_X)])
# F = np.vstack([F, resample(np.stack(df.loc[i+j, 'F_r']), shape_X)])
# T = np.vstack([T, resample(np.stack(df.loc[i+j, 'T_r']), shape_X)])
# assert rho == df.loc[i+j, 'rho_expr']
# index = i // n_sources
# X[index, :, :, 0] = E
# X[index, :, :, 1] = F
# X[index, :, :, 2] = T
# print("X shape =", X.shape)
# + id="9CGKutzREH-c" colab_type="code" colab={}
# """ Generation les inputs X: 2eme methode """
# """ Pour constrire les entrees X a partir de 'df'. Chaque sample correspond a 'n_sources' et les sources sur la gauche commencent a 'start'"""
# shape_X = (df.loc[0, "step_count"] // 15, df.loc[0, "N"]) # car N = M
# def make_data_X(df, n_sources, start):
# n_samples = len(df) // n_sources
# X = np.zeros(shape=(n_samples, 12*shape_X[0], shape_X[1], 3), dtype=np.float32)
# index = 0
# for i in range(0, len(df), n_sources):
# E1 = resample(np.stack(df.loc[i+start, 'E_d']), shape_X)
# E2 = resample(np.stack(df.loc[i+start, 'E_r']), shape_X)
# E3 = resample(np.stack(df.loc[i+start, 'E_u']), shape_X)
# F1 = resample(np.stack(df.loc[i+start, 'F_d']), shape_X)
# F2 = resample(np.stack(df.loc[i+start, 'F_r']), shape_X)
# F3 = resample(np.stack(df.loc[i+start, 'F_u']), shape_X)
# T1 = resample(np.stack(df.loc[i+start, 'T_d']), shape_X)
# T2 = resample(np.stack(df.loc[i+start, 'T_r']), shape_X)
# T3 = resample(np.stack(df.loc[i+start, 'T_u']), shape_X)
# rho = df.loc[i+start, 'rho_expr']
# assert df.loc[i+start, 'E_l_expr'] != "neumann"
# assert df.loc[i+start, 'E_d_expr'] == "neumann"
# assert df.loc[i+start, 'E_r_expr'] == "neumann"
# assert df.loc[i+start, 'E_u_expr'] == "neumann"
# for j in range(start+1, start+4):
# E1 = np.vstack([E1, resample(np.stack(df.loc[i+j, 'E_d']), shape_X)])
# E2 = np.vstack([E2, resample(np.stack(df.loc[i+j, 'E_r']), shape_X)])
# E3 = np.vstack([E3, resample(np.stack(df.loc[i+j, 'E_u']), shape_X)])
# F1 = np.vstack([F1, resample(np.stack(df.loc[i+j, 'F_d']), shape_X)])
# F2 = np.vstack([F2, resample(np.stack(df.loc[i+j, 'F_r']), shape_X)])
# F3 = np.vstack([F3, resample(np.stack(df.loc[i+j, 'F_u']), shape_X)])
# T1 = np.vstack([T1, resample(np.stack(df.loc[i+j, 'T_d']), shape_X)])
# T2 = np.vstack([T2, resample(np.stack(df.loc[i+j, 'T_r']), shape_X)])
# T3 = np.vstack([T3, resample(np.stack(df.loc[i+j, 'T_u']), shape_X)])
# assert rho == df.loc[i+j, 'rho_expr']
# assert df.loc[i+j, 'E_l_expr'] != "neumann"
# X[index, :, :, 0] = np.vstack([E1, E2, E3])
# X[index, :, :, 1] = np.vstack([F1, F2, F3])
# X[index, :, :, 2] = np.vstack([T1, T2, T3])
# index += 1
# return X
# + id="OQziRYVTGibo" colab_type="code" colab={}
""" Generation les inputs X: 3eme methode """
""" On utilise les signaux sur les quatre bords """
resample_rate = 0.1
time_size = int(df.loc[0, "step_count"] * resample_rate) # On garde 10% des iterations (40 sur 404)
space_size = df.loc[0, "N"] # On conserve tout l'espace (N = M = 90)
shape_X = (time_size, space_size) # Taille d'un input X sans tenir compte des 4*4*3 canaux a suivre
def make_data_X(df, n_sources, start):
n_samples = len(df) // n_sources
X = np.zeros(shape=(n_samples, shape_X[0], shape_X[1], 4, 4, 3), dtype=np.float32)
index = 0
for i in range(0, len(df), n_sources):
rho = df.loc[i+start, 'rho_expr']
assert df.loc[i+start, 'E_l_expr'] != "neumann"
assert df.loc[i+start, 'E_d_expr'] == "neumann"
assert df.loc[i+start, 'E_r_expr'] == "neumann"
assert df.loc[i+start, 'E_u_expr'] == "neumann"
for j in range(start+0, start+4):
for k, signal in enumerate(["E", "F", "T"]):
X[index, :, :, 0, j-start, k] = resample(np.stack(df.loc[i+j, signal+'_l']), shape_X)
X[index, :, :, 1, j-start, k] = resample(np.stack(df.loc[i+j, signal+'_d']), shape_X)
X[index, :, :, 2, j-start, k] = resample(np.stack(df.loc[i+j, signal+'_r']), shape_X)
X[index, :, :, 3, j-start, k] = resample(np.stack(df.loc[i+j, signal+'_u']), shape_X)
assert rho == df.loc[i+j, 'rho_expr']
assert df.loc[i+j, 'E_l_expr'] != "neumann"
assert index == i//n_sources + i%n_sources
index += 1
return X
# + id="gOMPl5utDwWG" colab_type="code" colab={}
""" On a seulement 4 sources (mais on fait comme si il y avait 12) et en entree du reseau de neuronnes le signal sur les 3 autres bords """
X = make_data_X(df, 12, 4)
# print("X shape =", X.shape)
# + id="NFtq-EIEGxw7" colab_type="code" colab={}
""" Sur les dataframes de 10 a 20, on a effectivement 4 sources ici, il n'ya pas de simulation inutile ici """
X_prime = make_data_X(df_prime, 4, 0)
# print("X_prime shape =", X_prime.shape)
# + [markdown] id="Ke5ENnwZlAUJ" colab_type="text"
# ## Creations des outputs y
# + id="CJwqagYKJcEd" colab_type="code" colab={}
""" 2 eme methode, on a en realite 4 sources """
len_y = 2
def make_data_y(df, n_sources, start):
n_targets = len(df) // n_sources
y = np.zeros(shape=(n_targets, len_y), dtype=np.float32)
index = 0
for i in range(0, len(df), n_sources):
attr = df.loc[i+start, 'rho_attr']
for j in range(start+1, start+4):
assert attr == df.loc[i+j, 'rho_attr']
attr = l_eval(attr)[0]
# y[index] = [attr[0], attr[1], attr[3]]
y[index] = [attr[0], attr[1]]
# y[index] = [attr[0]]
index += 1
return y
# + id="XhXLsWnhKigy" colab_type="code" colab={}
""" 1er partie du jeu des donnees y: 12 sources """
y = make_data_y(df, 12, 4)
# + id="twu0H7L35_j3" colab_type="code" colab={}
""" 2eme partie du jeu des donnees y: 4 sources """
y_prime = make_data_y(df_prime, 4, 0)
# print("y shape =", y_prime.shape)
# + [markdown] id="VOWJE-HgKED7" colab_type="text"
# ## Combinaison des jeux de donnees
# + [markdown] id="9THXM3pa5ZLs" colab_type="text"
# Pour finir, on combine les deux jeux de donnees pour effectivement former X et y
# + id="wUrFyUEwHsLR" colab_type="code" colab={}
""" Liberation de la RAM avant de continuer """
del df
del df_prime
# + id="QfMELSt4FMIe" colab_type="code" colab={}
""" A la fin, on combine les deux """
X = np.vstack([X_prime, X])
# + id="8SJimB4a7jg3" colab_type="code" colab={}
""" On fait une simple normalisation des donnnees. Division par le maximum de valeur absolue de X """
""" 1er et 2eme aproche """
# min_E, max_E = min_max(np.abs(X[:, :, :, 0]), 3)
# min_F, max_F = min_max(np.abs(X[:, :, :, 1]), 3)
# min_T, max_T = min_max(np.abs(X[:, :, :, 2]), 3)
# X[:, :, :, 0] = X[:, :, :, 0] / max_E
# X[:, :, :, 1] = X[:, :, :, 1] / max_F
# X[:, :, :, 2] = X[:, :, :, 2] / max_T
""" 3eme approche """
min_E, max_E = min_max(np.abs(X[:, :, :, :, :, 0]), 3)
min_F, max_F = min_max(np.abs(X[:, :, :, :, :, 1]), 3)
min_T, max_T = min_max(np.abs(X[:, :, :, :, :, 2]), 3)
X[:, :, :, :, :, 0] = X[:, :, :, :, :, 0] / max_E
X[:, :, :, :, :, 1] = X[:, :, :, :, :, 1] / max_F
X[:, :, :, :, :, 2] = X[:, :, :, :, :, 2] / max_T
# + id="dY_IfX-uJrUM" colab_type="code" colab={}
""" On reshape X sous la forme (time_size, space_size, 4*4*3) """
X = X.reshape((len(X), shape_X[0], shape_X[1], 4*4*3))
# + id="YJg44GbgJrJt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="62b5e0ab-eb24-478b-bffe-bf73e9e56e0f"
n_samples = len(X)
print("X shape =", X.shape)
# + id="B4lrEinOrGXB" colab_type="code" colab={}
""" verifier qu'aucun des X n'est completement null """
for i in range(n_samples):
assert np.all(X[i] == 0) == False
# + id="TCdCtaEoKiqB" colab_type="code" colab={}
""" A la fin, on combine les deux """
y = np.vstack([y_prime, y])
# + id="gIxdAqnUIQ5_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5c672b97-45a2-4550-b496-8cb8470a48c9"
n_targets = len(y)
print("y shape =", y.shape)
# + [markdown] id="wRQ5KTLOlAUk" colab_type="text"
# ## Séparation des données train, test et val
# + id="losdBCe1-RpU" colab_type="code" colab={}
""" On fait une permutation des entrees et sorties """
shuffle = np.arange(n_samples)
random.shuffle(shuffle)
X_copy = X.copy()
y_copy = y.copy()
for i in range(n_samples):
X_copy[i] = X[shuffle[i]]
y_copy[i] = y[shuffle[i]]
X = X_copy
y = y_copy
# + id="VrxeQ1TblAUm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c8ae6a98-0610-43e8-a4de-38254a0f2a04"
len_train, len_val = 7*len(X)//10, 2*len(X)//10
X_train = X[:len_train]
X_val = X[len_train:len_train+len_val]
X_test = X[len_train+len_val:]
y_train = y[:len_train]
y_val = y[len_train:len_train+len_val]
y_test = y[len_train+len_val:]
print("X shapes =", np.shape(X_train), np.shape(X_val), np.shape(X_test))
print("y shapes =", np.shape(y_train), np.shape(y_val), np.shape(y_test))
# + id="DzYej95karrk" colab_type="code" colab={}
# """ Sauvegarde de ce jeu de donnees pour un prochain apprentissage """
# np.save("dataset2_samples.npy", X)
# np.save("dataset2_labels.npy", y)
# + [markdown] id="hjukTxjUlAUr" colab_type="text"
# # APPRENTISSAGE
# + [markdown] id="QWnwry9dUzyk" colab_type="text"
# ## Configuration du modèle
# + id="3_STaXV3i0w2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 561} outputId="619c3212-3364-4da2-eb86-7a6051f521e8"
""" Un modèle convolutif """
model = models.Sequential(name="Convoluted_DRNN")
kernel_size = (3, 6)
strides = (1, 1)
model.add(layers.Conv2D(64, kernel_size, strides=strides, activation='relu', input_shape=(shape_X[0], shape_X[1], 4*4*3)))
model.add(layers.Conv2D(32, kernel_size, strides=strides, activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, kernel_size, strides=strides, activation='relu'))
model.add(layers.Conv2D(64, kernel_size, strides=strides, activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(128, kernel_size, strides=strides, activation='relu'))
model.add(layers.Conv2D(128, kernel_size, strides=strides, activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(500, activation='relu', activity_regularizer=keras.regularizers.l2(1e-5)))
# model.add(layers.Dense(500, activation='relu'))
model.add(layers.Dense(50, activation='relu', activity_regularizer=keras.regularizers.l2(1e-5)))
# model.add(layers.Dense(50, activation='relu'))
model.add(layers.Dense(len_y, activation='linear'))
model.summary()
# + [markdown] id="cNhLkiMumSys" colab_type="text"
# Pour la compilation:
# - on utlise l'optimiseur Adam
# - on utlise la `mse` pour fonction loss
# - on observe le score r2
# + id="2N9K_GfhEFi0" colab_type="code" colab={}
""" Pour calculer le score r^2"""
def r2_score(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# + id="3_1MeR4_mKoU" colab_type="code" colab={}
model.compile(optimizer=keras.optimizers.Adam(1e-5),
loss=keras.losses.mse,
# metrics=[keras.metrics.mean_absolute_error]
# metrics=[keras.metrics.mean_absolute_percentage_error]
# metrics=[keras.metrics.cosine_proximity]
metrics=[r2_score]
)
# + id="D1rLlhVmOCTu" colab_type="code" colab={}
""" Pour une decroissance du taux d'apprentissage """
def scheduler(epoch, lr):
if epoch == 0:
return 0.001
else:
return 0.8 * lr
callback = keras.callbacks.LearningRateScheduler(scheduler)
# + id="Tze2I2AOmX7M" colab_type="code" colab={}
# history = model.fit(X_train,
# y_train,
# epochs=20,
# batch_size=len(X_train)//5+1,
# # callbacks=[callback],
# validation_data=(X_val, y_val))
# + id="0ncYCe8zEzVA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e57a2fd0-e7cb-477a-c81d-dcc4be6f8ab4"
""" Technique d'aumentation du batch size """
def fit_increase(batch_sizes):
for i, bs in enumerate(batch_sizes):
print("STEP "+str(i), "/ 4", "- BATCH SIZE =", bs)
global history
history = model.fit(X_train,
y_train,
# epochs=int(5*(i+1)),
epochs=15,
batch_size=bs,
# callbacks=[callback],
validation_data=(X_val, y_val))
print()
bbs = len(X_train) // 100 # base batch size (smallest)
fit_increase([bbs*1, bbs*5, bbs*10, bbs*20])
# + [markdown] id="z2qprGHPPJUf" colab_type="text"
# Verifions le taux d'apprentissage final.
# + id="t8LGeaDnPL8p" colab_type="code" colab={}
# """ devrait donner 0.002 * 0.95^(n_epoch-1) """
# round(model.optimizer.lr.numpy(), 5)
# + [markdown] id="mS5F5p6WC0wL" colab_type="text"
# Observons la decroissance de la loss
# + id="glQU6RacnMdB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="d7f26c54-cbee-4cc9-ff5e-63d9fe3d40f7"
history_dict = history.history
loss = history.history['loss']
val_loss = history.history['val_loss']
# acc = history_dict['cosine_proximity']
# val_acc = history_dict['val_cosine_proximity']
acc = history_dict['r2_score']
val_acc = history_dict['val_r2_score']
epochs = range(1, len(loss)+1)
ticks = range(1, len(loss)+1, 2)
fig,(ax0,ax1)=plt.subplots(1,2,figsize=(10, 3))
ax0.plot(epochs, loss, 'b', label='training loss')
ax0.plot(epochs, val_loss, 'g.', label='validation loss')
ax0.set_xticks(ticks)
ax0.set_xlabel('epochs')
ax0.set_ylabel('loss')
ax0.legend();
ax1.plot(epochs, acc, 'b', label='training accuracy')
ax1.plot(epochs, val_acc, 'g.', label='validation accuracy')
ax1.set_xticks(ticks)
ax1.set_xlabel('epochs')
ax1.set_ylabel('accuracy')
ax1.legend();
plt.suptitle("Training and validation loss and accuracy", y=1.1, fontsize='xx-large')
plt.tight_layout()
# + id="suic5-nwGbMU" colab_type="code" colab={}
# """ Sauvegardons les bons modeles qu'on obtient """
# model.save('goodModel4')
# + [markdown] id="bOAk_vJQEbx6" colab_type="text"
# ## Quelques predictions
# + id="syfpIpvIEcaL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="682f32d6-2472-4682-ed31-652e745a6b46"
hat_y_test = model.predict(X_test)
# + id="xv3ZE2qlsMYx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dbdc64f7-2be6-48a8-b9dd-5e58b6cf4624"
print("min max pour les prediction:", min_max(hat_y_test, 1)[0], min_max(hat_y_test, 1)[1])
# + id="AOtOPDldEkmE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="bfb550f6-2783-4362-a5d0-ef96ffcd65ba"
def print_prediction(hat_y_test, nb):
start = len_train + len_val #indique le debut des donnees test dans la dataframe
for i in range(0, n_targets-start, (n_targets-start)//nb):
print("target: ", y_test[i])
print("prediction:", hat_y_test[i], "\n")
# print_prediction(hat_y_test, len(X_test))
print_prediction(hat_y_test, 20)
# + id="sYifAp-3Us9d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="2035f53c-2801-4fe7-e59a-26fd7258326b"
""" Le score r2 d'apres Keras """
print("r2 score on predictions: %.2f"%(100*model.evaluate(X_test, y_test)[1]), "%")
# + [markdown] id="fGKbGINGgGbR" colab_type="text"
# Deffinissons une mesure d'accuracy.
#
# On suppose la prediction correcte si elle est suffisament proche du label:
# - au **dizième** près pour la position (suivant x ou y)
# - à l'**unité** près pour la hauteur
# + id="5c7idcq8h6gG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="eac92bae-c746-460a-aaa9-7b12433ed2d6"
""" Calculons l'accuracy """
condition1 = (abs(y_test[:, 0] - hat_y_test[:, 0]) <= .1) # postition x
condition2 = (abs(y_test[:, 1] - hat_y_test[:, 1]) <= .1) # postition y
# condition3 = (abs(y_test[:, 2] - hat_y_test[:, 2]) <= 1) # hauteur
acc_pos_x = 100 * len(hat_y_test[condition1]) / len(hat_y_test)
acc_pos_y = 100 * len(hat_y_test[condition2]) / len(hat_y_test)
# acc_hgt = 100 * len(hat_y_test[condition3]) / len(hat_y_test)
# acc = 100 * len(hat_y_test[condition1 & condition2 & condition3]) / len(hat_y_test)
print("position x accuracy: %.2f"%acc_pos_x, "%")
print("position y accuracy: %.2f"%acc_pos_y, "%")
# print("height accuracy: %.2f"%acc_hgt, "%")
# print()
# print("total accuracy: %.2f"%acc, "%")
# + [markdown] id="yqY96hSoOPXC" colab_type="text"
# ## Conclusion
| src/notebook/Regression90x90.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="NpJd3dlOCStH"
# <a href="https://colab.research.google.com/github/magenta/ddsp/blob/master/ddsp/colab/tutorials/0_processor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="hMqWDc_m6rUC"
#
# ##### Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
#
#
#
# + colab={} colab_type="code" id="VNhgka4UKNjf"
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + [markdown] colab_type="text" id="ZFIqwYGbZ-df"
# # DDSP Processor Demo
#
# This notebook provides an introduction to the signal `Processor()` object. The main object type in the DDSP library, it is the base class used for Synthesizers and Effects, which share the methods:
#
# * `get_controls()`: inputs -> controls.
# * `get_signal()`: controls -> signal.
# * `__call__()`: inputs -> signal. (i.e. `get_signal(**get_controls())`)
#
# Where:
# * `inputs` is a variable number of tensor arguments (depending on processor). Often the outputs of a neural network.
# * `controls` is a dictionary of tensors scaled and constrained specifically for the processor
# * `signal` is an output tensor (usually audio or control signal for another processor)
#
# Let's see why this is a helpful approach by looking at the specific example of the `Additive()` synthesizer processor.
# + cellView="form" colab={} colab_type="code" id="21w0_tyszEtN"
#@title Install and import dependencies
# !pip install -qU ddsp
# Ignore a bunch of deprecation warnings
import warnings
warnings.filterwarnings("ignore")
import ddsp
import ddsp.training
from ddsp.colab.colab_utils import play, specplot, DEFAULT_SAMPLE_RATE
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
sample_rate = DEFAULT_SAMPLE_RATE # 16000
# + [markdown] colab_type="text" id="AiCIG1x5bxkh"
# # Example: additive synthesizer
#
# The additive synthesizer models a sound as a linear combination of harmonic sinusoids. Amplitude envelopes are generated with 50% overlapping hann windows. The final audio is cropped to n_samples.
# + [markdown] colab_type="text" id="59gA5RKMHiS3"
# ## `__init__()`
#
# All member variables are initialized in the constructor, which makes it easy to change them as hyperparameters using the [gin](https://github.com/google/gin-config) dependency injection library. All processors also have a `name` that is used by `ProcessorGroup()`.
# + colab={} colab_type="code" id="mtNivsWq3qtW"
n_frames = 1000
hop_size = 64
n_samples = n_frames * hop_size
# Create a synthesizer object.
additive_synth = ddsp.synths.Additive(n_samples=n_samples,
sample_rate=sample_rate,
name='additive_synth')
# + [markdown] colab_type="text" id="vuDIPhc58ZQI"
#
# ## `get_controls()`
#
# The outputs of a neural network are often not properly scaled and constrained. The `get_controls` method gives a dictionary of valid control parameters based on neural network outputs.
#
#
# + [markdown] colab_type="text" id="xFPeXRPG6I44"
# **3 inputs (amps, hd, f0)**
# * `amplitude`: Amplitude envelope of the synthesizer output.
# * `harmonic_distribution`: Normalized amplitudes of each harmonic.
# * `fundamental_frequency`: Frequency in Hz of base oscillator
#
#
# + colab={} colab_type="code" id="4v4q5NuM4JWf"
# Generate some arbitrary inputs.
# Amplitude [batch, n_frames, 1].
# Make amplitude linearly decay over time.
amps = np.linspace(1.0, -3.0, n_frames)
amps = amps[np.newaxis, :, np.newaxis]
# Harmonic Distribution [batch, n_frames, n_harmonics].
# Make harmonics decrease linearly with frequency.
n_harmonics = 30
harmonic_distribution = (np.linspace(-2.0, 2.0, n_frames)[:, np.newaxis] +
np.linspace(3.0, -3.0, n_harmonics)[np.newaxis, :])
harmonic_distribution = harmonic_distribution[np.newaxis, :, :]
# Fundamental frequency in Hz [batch, n_frames, 1].
f0_hz = 440.0 * np.ones([1, n_frames, 1], dtype=np.float32)
# + colab={} colab_type="code" id="KVTMi2dX5yFe"
# Plot it!
time = np.linspace(0, n_samples / sample_rate, n_frames)
plt.figure(figsize=(18, 4))
plt.subplot(131)
plt.plot(time, amps[0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Amplitude')
plt.subplot(132)
plt.plot(time, harmonic_distribution[0, :, :])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Harmonic Distribution')
plt.subplot(133)
plt.plot(time, f0_hz[0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
_ = plt.title('Fundamental Frequency')
# + [markdown] colab_type="text" id="ORU6bwfWRDks"
# Consider the plots above as outputs of a neural network. These outputs violate the synthesizer's expectations:
# * Amplitude is not >= 0 (avoids phase shifts)
# * Harmonic distribution is not normalized (factorizes timbre and amplitude)
# * Fundamental frequency * n_harmonics > nyquist frequency (440 * 20 > 8000), which will lead to [aliasing](https://en.wikipedia.org/wiki/Aliasing).
#
# + colab={} colab_type="code" id="zrYgCcby_xZg"
controls = additive_synth.get_controls(amps, harmonic_distribution, f0_hz)
print(controls.keys())
# + colab={} colab_type="code" id="lnzqmowPB5Lu"
# Now let's see what they look like...
time = np.linspace(0, n_samples / sample_rate, n_frames)
plt.figure(figsize=(18, 4))
plt.subplot(131)
plt.plot(time, controls['amplitudes'][0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Amplitude')
plt.subplot(132)
plt.plot(time, controls['harmonic_distribution'][0, :, :])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Harmonic Distribution')
plt.subplot(133)
plt.plot(time, controls['f0_hz'][0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
_ = plt.title('Fundamental Frequency')
# + [markdown] colab_type="text" id="wVgLs8_BRuxz"
# Notice that
# * Amplitudes are now all positive
# * The harmonic distribution sums to 1.0
# * All harmonics that are above the Nyquist frequency now have an amplitude of 0.
# + [markdown] colab_type="text" id="nbY9iIbdDljR"
# The amplitudes and harmonic distribution are scaled by an "exponentiated sigmoid" function (`ddsp.core.exp_sigmoid`). There is nothing particularly special about this function (other functions can be specified as `scale_fn=` during construction), but it has several nice properties:
# * Output scales logarithmically with input (as does human perception of loudness).
# * Centered at 0, with max and min in reasonable range for normalized neural network outputs.
# * Max value of 2.0 to prevent signal getting too loud.
# * Threshold value of 1e-7 for numerical stability during training.
# + colab={} colab_type="code" id="r6-dnQ2NEM1P"
x = tf.linspace(-10.0, 10.0, 1000)
y = ddsp.core.exp_sigmoid(x)
plt.figure(figsize=(18, 4))
plt.subplot(121)
plt.plot(x, y)
plt.subplot(122)
_ = plt.semilogy(x, y)
# + [markdown] colab_type="text" id="z32JcedbIM9J"
# ## `get_signal()`
#
# Synthesizes audio from controls.
# + colab={} colab_type="code" id="k-lraCkcIjyK"
audio = additive_synth.get_signal(**controls)
play(audio)
specplot(audio)
# + [markdown] colab_type="text" id="LSvRsjJUVxOA"
# ## `__call__()`
#
# Synthesizes audio directly from the raw inputs. `get_controls()` is called internally to turn them into valid control parameters.
# + colab={} colab_type="code" id="DBIeyZLLI-RO"
audio = additive_synth(amps, harmonic_distribution, f0_hz)
play(audio)
specplot(audio)
# + [markdown] colab_type="text" id="7j5XCUJkK9WZ"
# # Example: Just for fun...
# Let's run another example where we tweak some of the controls...
# + colab={} colab_type="code" id="2uN7x3wqLBcD"
## Some weird control envelopes...
# Amplitude [batch, n_frames, 1].
amps = np.ones([n_frames]) * -5.0
amps[:50] += np.linspace(0, 7.0, 50)
amps[50:200] += 7.0
amps[200:900] += (7.0 - np.linspace(0.0, 7.0, 700))
amps *= np.abs(np.cos(np.linspace(0, 2*np.pi * 10.0, n_frames)))
amps = amps[np.newaxis, :, np.newaxis]
# Harmonic Distribution [batch, n_frames, n_harmonics].
n_harmonics = 20
harmonic_distribution = np.ones([n_frames, 1]) * np.linspace(1.0, -1.0, n_harmonics)[np.newaxis, :]
for i in range(n_harmonics):
harmonic_distribution[:, i] = 1.0 - np.linspace(i * 0.09, 2.0, 1000)
harmonic_distribution[:, i] *= 5.0 * np.abs(np.cos(np.linspace(0, 2*np.pi * 0.1 * i, n_frames)))
if i % 2 != 0:
harmonic_distribution[:, i] = -3
harmonic_distribution = harmonic_distribution[np.newaxis, :, :]
# Fundamental frequency in Hz [batch, n_frames, 1].
f0_hz = np.ones([n_frames]) * 200.0
f0_hz[:100] *= np.linspace(2, 1, 100)**2
f0_hz[200:1000] += 20 * np.sin(np.linspace(0, 8.0, 800) * 2 * np.pi * np.linspace(0, 1.0, 800)) * np.linspace(0, 1.0, 800)
f0_hz = f0_hz[np.newaxis, :, np.newaxis]
# Get valid controls
controls = additive_synth.get_controls(amps, harmonic_distribution, f0_hz)
# + colab={} colab_type="code" id="ovBa3pkUMrAC"
# Plot!
time = np.linspace(0, n_samples / sample_rate, n_frames)
plt.figure(figsize=(18, 4))
plt.subplot(131)
plt.plot(time, controls['amplitudes'][0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Amplitude')
plt.subplot(132)
plt.plot(time, controls['harmonic_distribution'][0, :, :])
plt.xticks([0, 1, 2, 3, 4])
plt.title('Harmonic Distribution')
plt.subplot(133)
plt.plot(time, controls['f0_hz'][0, :, 0])
plt.xticks([0, 1, 2, 3, 4])
_ = plt.title('Fundamental Frequency')
# + colab={} colab_type="code" id="Xf7Vc3UtNQ87"
audio = additive_synth.get_signal(**controls)
play(audio)
specplot(audio)
| ddsp/colab/tutorials/0_processor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:biobombe]
# language: R
# name: conda-env-biobombe-r
# ---
# # Compare Top Feature Performance with k = 200
#
# **<NAME>, 2019**
#
# Here, I compare the results of aggregating the top 200 features for each algorithm (and the top 200 features for all algorithms combined) with the models originally trained using k = 200.
#
# I also compare results using the top 1 feature for all algorithms and 200 randomized features from all algorithms.
# +
suppressPackageStartupMessages(library(readr))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(ggrepel))
source(file.path("scripts", "viz_util.R"))
# -
genes <- c("TP53", "PTEN", "PIK3CA", "KRAS", "TTN")
# +
# Load all mutation classification results
mut_path <- file.path("results", "top_feature_classification")
top_feature_df <- load_results(results_path = mut_path,
process_output = FALSE,
uses_top_features = TRUE)
top_feature_df <- top_feature_df[['metrics']]
top_feature_df <- top_feature_df %>% dplyr::filter(data_type == 'cv')
top_feature_df$gene_or_cancertype <- factor(top_feature_df$gene_or_cancertype, levels = genes)
print(dim(top_feature_df))
head(top_feature_df, 2)
# +
# Load all mutation classification results
mut_path <- file.path("results", "mutation")
full_mutation_df <- load_results(results_path = mut_path, process_output = FALSE)
full_mutation_raw_df <- full_mutation_df[['raw_metrics']]
full_mutation_df <- full_mutation_df[['metrics']]
# +
# Process mutation results
full_mutation_df <- full_mutation_df %>%
dplyr::filter(z_dim == 200,
gene_or_cancertype %in% genes,
data_type == 'cv')
full_mutation_raw_df <- full_mutation_raw_df %>%
dplyr::filter(gene_or_cancertype %in% genes,
data_type == 'cv')
full_mutation_df$gene_or_cancertype <- factor(full_mutation_df$gene_or_cancertype, levels = genes)
full_mutation_df$signal <- factor(full_mutation_df$signal, levels = c("signal", "shuffled"))
full_mutation_raw_df$gene_or_cancertype <- factor(full_mutation_raw_df$gene_or_cancertype, levels = genes)
full_mutation_raw_df$signal <- factor(full_mutation_raw_df$signal, levels = c("signal", "shuffled"))
full_mutation_df <- full_mutation_df %>%
dplyr::group_by(algorithm,
data_type,
gene_or_cancertype,
algorithm,
signal,
z_dim) %>%
summarise(mean_auroc = mean(auroc),
mean_aupr = mean(aupr))
# -
source(file.path("scripts", "viz_util.R"))
panel_a_auroc <- plot_top_features(top_feature_df,
full_mutation_df,
full_mutation_raw_df,
auroc_or_aupr = 'AUROC')
panel_a_auroc
# +
panel_b_aupr <- plot_top_features(top_feature_df,
full_mutation_df,
full_mutation_raw_df,
auroc_or_aupr = 'AUPR')
panel_b_aupr
# +
# Now, build a custom figure legend
# -
custom_data <- as.data.frame(cbind(c(1, 3, 5, 7), c(5, 3, 6, 7), c("A", "B", "C", "D")))
colnames(custom_data) <- c('one', 'two', 'three')
custom_data
# +
custom_gg <- ggplot(custom_data,
aes(x = one, y = two)) +
geom_point(aes(shape = three),
alpha = 0.8) +
scale_shape_manual(name = "Points",
values = c(21, 24, 23, 25),
labels = c("Real\n(k = 200)", "Permuted\n(k = 200)",
"Real\n(random 200)",
"Real\n(top 1)")) +
guides(shape = guide_legend(override.aes = list(fill = c("grey20",
"grey20",
"lightblue",
"yellow"),
color = c("white",
"white",
"black",
"black"),
size = 3),
keywidth = 1,
keyheight = 1.5,
default.unity = "mm")) +
theme(legend.position = "right",
legend.title = element_text(size = 10),
legend.text = element_text(size = 7.5),
legend.key.size = unit(1, "lines"))
custom_gg
# +
# Create multipanel plot
alg_legend <- cowplot::get_legend(panel_b_aupr)
custom_legend <- cowplot::get_legend(custom_gg)
main_plot <- (
cowplot::plot_grid(
panel_a_auroc + theme(legend.position = 'none') + xlab(""),
panel_b_aupr + theme(legend.position = "none") + xlab(""),
labels = c("a", "b"),
ncol = 1,
nrow = 2
)
)
full_legend <- (
cowplot::plot_grid(
ggdraw(),
alg_legend,
custom_legend,
ggdraw(),
ncol = 1,
nrow = 4,
rel_heights = c(0.9, 1, 1.6, 0.9)
)
)
main_plot = cowplot::plot_grid(main_plot,
full_legend,
rel_widths = c(1, 0.15),
ncol = 2)
main_plot
# -
for(extension in c('.png', '.pdf')) {
sup_file <- paste0("supplemental_tcga_top_feature_summary", extension)
sup_file <- file.path("figures", sup_file)
cowplot::save_plot(filename = sup_file,
plot = main_plot,
base_height = 170,
base_width = 170,
units = "mm")
}
| 9.tcga-classify/visualize-top-features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Filters & Multivalue Columns
import numpy as np
import pandas as pd
data = pd.read_csv("data.csv")
data.head()
# ### Filtering by row or column index
data.filter(items=["Item", "Unit"]).head()
data.filter(regex='^R').head()
data.filter(like='it').head()
# ### Same rules for index when axis 0 is specified
data.filter(items=[0,3,5], axis=0)
# ### Dealing with multivalue columns
import random
x = [[random.randrange(1,10) for a in range(5)] for b in range(10)]
x
names = "<NAME> <NAME>"
df = pd.DataFrame({"Name" : names.split(), "Sales" : x})
df
# ### To simply split
df2 = pd.DataFrame(df['Sales'].values.tolist(), columns=['May', 'June',
'July', 'August', 'Sept'])
df2
# ### Alternative method
pd.concat([df['Sales'].apply(pd.Series), df["Name"]], axis=1)
| Section 3/Code/3.2-Filters_Splitting_Multivalue_columns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Postcards of Parliament From A Digital Flâneur
# + [markdown] slideshow={"slide_type": "slide"}
# __Flâneur__, *noun*, A man who saunters around observing society.
# + [markdown] slideshow={"slide_type": "-"}
# *The flâneur wandered in the shopping arcades, but he did not give in to the temptations of consumerism; the arcade was primarily a pathway to a rich sensory experience — and only then a temple of consumption. His goal was to observe, to bathe in the crowd, taking in its noises, its chaos, its heterogeneity, its cosmopolitanism. Occasionally, he would narrate what he saw — surveying both his private self and the world at large — in the form of short essays for daily newspapers.*
#
# __The Death of the Cyberflâneur__, <NAME>, New York Times, Sunday Review, February 4, 2012
# + [markdown] slideshow={"slide_type": "slide"}
# # APIs
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Using the APIs
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Packages Make Life Easier (?)
# +
import mnis
import datetime
# Create a date for the analysis
d = datetime.date.today()
# Download the full data for MPs serving on the given date as a list
mnis.getCommonsMembersOn(d)[0]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating Custom `pandas` Data Reader Packages
# -
import pd_datareader_nhs.nhs_digital_ods as ods
# + slideshow={"slide_type": "fragment"}
ods.search(string='Prison', field='Label')
# + slideshow={"slide_type": "subslide"}
dd=ods.download('eprison')
dd.head()
# + [markdown] slideshow={"slide_type": "slide"}
# # Package Issues
# + [markdown] slideshow={"slide_type": "fragment"}
# - development
# + [markdown] slideshow={"slide_type": "fragment"}
# - building up example and reusable recipes
# + [markdown] slideshow={"slide_type": "fragment"}
# - ownership and production quality (participation in development)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Notebooks as Open / Shared Recipes
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # But How Do I Share *Working* Examples?
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## BinderHub Build Sequence
#
# *"[P]hilosophically similar to Heroku Build Packs"*
# + [markdown] slideshow={"slide_type": "subslide"}
# - `requirements.txt`
# - python packages
# - `environment.yml`
# - conda environment specification
# - `apt.txt`
# - debian packages that should be installed (latest version of Ubuntu)
# + [markdown] slideshow={"slide_type": "subslide"}
# - `postBuild`
# - arbitrary commands to be run after the whole repository has been built
# - `REQUIRE`
# - Julia packages
# - `Dockerfile`
# - treated as a regular Dockerfile. __The presence of a Dockerfile will cause all other building behavior to not be triggered.__
# + [markdown] slideshow={"slide_type": "slide"}
# ## Building a Local Docker Image From a Github Repository
# + [markdown] slideshow={"slide_type": "-"}
# ```bash
# pip3 install jupyter-repo2docker
#
# jupyter-repo2docker --image-name psychemedia/parlihacks --no-run https://github.com/psychemedia/parlihacks
#
#
# docker push psychemedia/parlihacks
# ```
# -
# ## Creating Simple Service APIs
# + [markdown] slideshow={"slide_type": "fragment"}
# In terminal:
#
# ```
# jupyter kernelgateway --KernelGatewayApp.api='kernel_gateway.notebook_http' --KernelGatewayApp.seed_uri='./SimpleAPI2.ipynb' --port 8899
# ```
# + slideshow={"slide_type": "fragment"}
import requests
requests.get('http://1192.168.3.11:8899/demo/role/worker').json()
# + slideshow={"slide_type": "subslide"}
requests.get('http://1192.168.3.11:8899/demo/name/jo').json()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Possible DIY Service Types
# + [markdown] slideshow={"slide_type": "fragment"}
# - *data servers*: for example, API defined against a CSV file or simple SQLite3 database
# + [markdown] slideshow={"slide_type": "fragment"}
# - *known entity taggers*: for example, identify members mentioned within a text
# + [markdown] slideshow={"slide_type": "fragment"}
# - *classifiers*: for example, attempt to associate a text with a particular topic, or subject expert who can best handle a particular question, relative to a particular trained classifier / model
# + [markdown] slideshow={"slide_type": "slide"}
# ## Reporting Patterns
#
# - generate report for a single MP about their constituency
# - as a standalone item
# - in comparison to all other consituencies nationally
# - in comparison to a subset of other constitutencies eg neighbouring, similar
# + [markdown] slideshow={"slide_type": "fragment"}
# - generate a report over all consituencies nationally
# + [markdown] slideshow={"slide_type": "fragment"}
# - generate a report of wards within a particular constituency
# -
| notebooks/ShowNtell_nov17.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install pandas --upgrade
import numpy as np
import pandas as pd
# ### Download Titanic dataset
# <b>Download link: </b>https://www.kaggle.com/c/titanic/data
titanic = pd.read_csv('data/titanic.csv')
titanic.head()
titanic = titanic.drop(columns=['Cabin'])
titanic.head()
np.sum(titanic['Age'].isna())
# #### Group data by port of embarkation
# * C = Cherbourg
# * Q = Queenstown
# * S = Southampton
port = titanic.groupby('Embarked').aggregate(np.mean)
port
port[['Age','Fare']]
# <b>How many people survived and what is the percentage?</b>
total = np.count_nonzero(titanic['PassengerId'])
survived = np.count_nonzero(titanic['Survived'])
percent_survived = (survived / total) * 100
print('Total = %i' %total)
print('Survived = %i' %survived)
print('Percentage Survived = %i' %percent_survived)
# <b> Female Survivors </b>
females = titanic[titanic['Sex'] == 'female']
females.head()
females_survived = np.count_nonzero(females['Survived'])
print('Females who Survived = %i' %females_survived)
# <b>Male Survivors</b>
males = titanic[titanic['Sex'] == 'male']
males_survived = np.count_nonzero(males['Survived'])
print('Males who Survived = %i' %males_survived)
# <b> Out of the suvivors how many belonged to a particular passenger class </b>
survivors = titanic[titanic['Survived'] == 1]
Pclass = titanic.groupby('Pclass').aggregate(np.count_nonzero)
Pclass[['Survived']]
# ##### Use np.sum() to get total number of survivors
# We had earlier used np.count_nonzero() on the Survived column
np.sum(Pclass['Survived'])
| data/external/numpy-working-with-multidimensional-data/02/demos/m03_demo02_NumpyAndPandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../../')
# %run setup.py install
os.chdir(owd)
# %matplotlib inline
import pyross
import numpy as np
import matplotlib.pyplot as plt
# # Introduction: Forecast for SEAIRQ model with stochastic parameters
# In this notebook, we consider the SEAIRQ model.
#
# We assume that the parameters
#
# * $\beta$ (probability of infection on contact),
# * $\gamma_{E}$ (rate of progression for exposed individual to class A),
# * $\gamma_{AA}$ (rate of progression from class A to asymptomatic infective class),
# * $\gamma_{AS}$ (rate of progression from class A to symptomatic infective class),
# * $\gamma_{I_a}$ (rate of recovery for asymptomatic infected individuals), and
# * $\gamma_{I_s}$ (rate of recovery for symptomatic infected individuals)
# * $ \tau_S$ (quarantining rate for susceptibles)
# * $ \tau_E$ (quarantining rate for exposed)
# * $ \tau_A$ (quarantining rate for A)
# * $ \tau_{I_a}$ (quarantining rate for asymptomatic infectives)
# * $ \tau_{I_s}$ (quarantining rate for symptomatic infectives)
#
# are not known exactly, but rather are characterized by a 11D Gaussian distribution with known mean and covariance matrix. The Gaussian distribution function is trunacted, i.e. set to zero if any parameter is $< 0$.
#
# **We now illustrate how uncertainties in the parameters affect the predictions of the SEAIRQ model.**
#
# For this we simulate the SEIR model $N_s = 500$ times; for each simulation the above parameters are sampled from a given 11D Gaussian distribution. The resulting 500 trajectories are shown together with their mean, standard deviation, median, and 5 as well as 95 percentiles.
#
# We perform this analysis for the deterministic SEAIRQ model.
# # Define model parameters and initialise pyross.forecast.SEAIRQ
# +
M = 1 # the SEAIRQ model we consider has no age structure
Ni = 50000*np.ones(M) # so there is only one age group
N = np.sum(Ni) # and the total population is the size of this age group
E0 = np.array([0])
A0 = np.array([1])
Ia0 = np.array([0]) # the SEAIRQ model we consider has only one kind of infective
Is0 = np.array([20]) # we take these to be symptomatic
Q0 = np.array([0])
R0 = np.array([0]) # and assume there are no recovered individuals initially
S0 = N-(Ia0+Is0+R0+E0) # The initial susceptibles are obtained from S + E + A + Ia + Is + R = N
# there is no contact structure
def contactMatrix(t):
return np.identity(M)
# duration of simulation and output datapoints
Tf = 500; Nt=Tf+1
# These parameters we consider exact
fsa = 1 # the self-isolation parameter
tE = 0.00 # rate E -> Q
tA = 0.00 # rate A -> Q
tIa = 0.00 # rate Ia -> Q
tIs = 0.05 # rate Is -> Q
# These are the parameters that we sample stochastically
# means
alpha = 0.0 # fraction of asymptomatic infectives
beta = 0.2 # infection rate
gIa = 0.1 # recovery rate of asymptomatic infectives
gIs = 0.1 # recovery rate of symptomatic infectives
gE = 0.04 # recovery rate of E
gA = 0.2 # rate to go from A to Ia
# order in covariance matrix:
# beta, gE, gAA, gAS, gIa, gIs, tS, tE, tA, tIa, tIs
#
cov = np.zeros([6,6],dtype=float)
cov[0,0] = 0*alpha**2 # cov(alpha, alpha) = Var(alpha)
cov[1,1] = 0.1*beta**2 # cov(beta, beta) = Var(beta)
cov[2,2] = 0.01*gIa**2 # cov(gIa,gIa) = Var(gIa)
cov[3,3] = 0.01*gIs**2 # cov(gIs,gIs) = Var(gIs)
cov[4,4] = 0.01*gA**2 # cov(gA, gA) = Var(gA)
cov[5,5] = 0.01*gE**2 # cov(gE, gE) = Var(gE)
#
cov[1,5] = 0.01*beta*gE # cov(beta, gE)
cov[5,1] = cov[1,5] # covariance matrix is symmetric
#
cov[2,3] = cov[2,2] # cov(gIa, gIs)
cov[3,2] = cov[2,3]
# Define parameters for simulations
parameters = {'alpha':alpha, 'beta':beta,
'gE':gE,'gA':gA,
'gIa':gIa, 'gIs':gIs, 'gE':gE, 'fsa':fsa,
'tE':tE,'tA':tA,'tIa':tIa,'tIs':tIs,
'cov':cov
}
# Initialise pyross forecast module
model_forecast = pyross.forecast.SEAIRQ(parameters, M, Ni)
# Number of simulations over which we average
Ns = 500
# +
# Define a function which we use below to plot simulation results
def plot_trajectories(result,
percentile=-1,
plot_index = 4, # which time series should be plotted?
filename='None'): # set filename for saving figures
# plot_index class
# 0 susceptibles
# 1 exposed
# 2 asymptomatic and infectious
# 3 asymptomatic infectives
# 4 symptomatic infectives
# 5 quarantined
if plot_index == 0:
title='Susceptibles'
ylabel = r'$N_S$'
elif plot_index == 1:
title='Exposed'
ylabel = r'$N_{E}$'
elif plot_index == 2:
title=r'Asymptomatic, infectious (A)'
ylabel = r'$N_{A}$'
elif plot_index == 3:
title='Asymptomatic infectives'
ylabel = r'$N_{I,a}$'
elif plot_index == 4:
title='Symptomatic infectives'
ylabel = r'$N_{I,s}$'
elif plot_index == 5:
title='Quarantined'
ylabel = r'$N_{Q}$'
else:
raise RuntimeError("plot_index should be 0, 1, 2, or 3.")
#
fontsize=25
#
#
trajectories = result['X']
t_arr = result['t']
traj_mean = result['X_mean']
traj_std = result['X_std']
#
#
# Plot trajectories
#
fig, ax = plt.subplots(1,1,figsize=(7,5))
ax.set_title(title,
y=1.05,
fontsize=fontsize)
for i,e in enumerate(trajectories):
ax.plot(t_arr,e[plot_index],
alpha=0.15,
)
ax.fill_between(t_arr,traj_mean[plot_index] - traj_std[plot_index],
traj_mean[plot_index] + traj_std[plot_index],
alpha=0.7,
color='limegreen',
label='Std deviation')
ax.plot(t_arr,traj_mean[plot_index] - traj_std[plot_index],
alpha=1,
label='Std deviation',
lw=1.5,
ls='--',
color='black')
ax.plot(t_arr,traj_mean[plot_index] + traj_std[plot_index],
alpha=1,
#label='Std deviation',
lw=1.5,
ls='--',
color='black')
ax.plot(t_arr,traj_mean[plot_index],
alpha=1,
lw=2,
color='black',
label='Mean')
ax.set_xlim(np.min(t_arr),np.max(t_arr))
ax.set_ylabel(ylabel,fontsize=fontsize)
ax.set_xlabel(r'$t$ [days]',fontsize=fontsize)
ax.legend(loc='upper right',fontsize=18)
plt.show(fig)
if filename != 'None':
fig.savefig(filename + '_trajs.png', bbox_inches='tight',dpi=100)
plt.close(fig)
#
#
#
# Plot percentiles
#
if percentile > 0:
percentiles_lower = np.percentile(trajectories[:,plot_index],percentile,axis=0)
percentiles_upper = np.percentile(trajectories[:,plot_index],100-percentile,axis=0)
percentiles_median = np.percentile(trajectories[:,plot_index],50,axis=0)
print("In the following plot, red dashed lines denote {0} and {1} percentiles of the numerical data:".format(percentile,
100-percentile))
fig, ax = plt.subplots(1,1,figsize=(7,5))
ax.set_title(title,
y=1.05,
fontsize=fontsize)
for i,e in enumerate(trajectories):
ax.plot(t_arr,e[plot_index],
alpha=0.15,
)
ax.fill_between(t_arr,percentiles_lower,
percentiles_upper,
alpha=0.1,
color='red',
label='Percentiles')
ax.plot(t_arr,percentiles_lower,
alpha=1,
lw=2,
label='Percentiles',
ls='--',
color='red',
)
ax.plot(t_arr,percentiles_upper,
alpha=1,
lw=2,
color='red',
ls='--',
)
ax.plot(t_arr,percentiles_median,
alpha=1,
lw=2,
color='red',
label='Median')
ax.plot(t_arr,traj_mean[plot_index],
alpha=1,
lw=2,
color='black',
label='Mean')
ax.set_xlim(np.min(t_arr),np.max(t_arr))
ax.set_ylabel(ylabel,fontsize=fontsize)
ax.set_xlabel(r'$t$ [days]',fontsize=fontsize)
ax.legend(loc='upper right',fontsize=18)
plt.show(fig)
if filename != 'None':
fig.savefig(filename + '_trajs2.png', bbox_inches='tight',dpi=100)
plt.close(fig)
# Define a function which we use below to plot parameters used for simulations
def plot_sample_parameters(result,
filename='None'): # set filename for saving figures
#
fontsize=25
#
# Scatterplot of used parameters
#
sample_parameters = result['sample_parameters'].T
beta = result['beta']
gE = result['gE']
gIa = result['gIa']
gIs = result['gIs']
#
title = r'Samples for stochastic $\beta$, $\gamma_{E}$'
labelx = r'$\beta $'
labely = r'$\gamma_{E}$'
x_mean = beta
y_mean = gE
labelx_mean = r'$\langle \beta \rangle$'
labely_mean = r'$\langle \gamma_{E} \rangle$'
data_index_x = 1
data_index_y = 4
fig, ax = plt.subplots(1,1,figsize=(7,5))
ax.set_title(title,y=1.05,fontsize=fontsize)
ax.axvline(x_mean,color='limegreen',ls='--',lw=2,label=labelx_mean)
ax.axhline(y_mean,color='dodgerblue',ls='--',lw=2,label=labely_mean)
ax.scatter(sample_parameters[data_index_x], sample_parameters[data_index_y] ,
label='sampled data',
color='black',s=10) #, c = truth)
ax.set_xlabel(labelx,fontsize=fontsize)
ax.set_ylabel(labely,fontsize=fontsize)
ax.set_xlim(0,1.05*np.max(sample_parameters[data_index_x]))
ax.set_ylim(0,1.05*np.max(sample_parameters[data_index_y]))
ax.legend(loc='best',fontsize=15)
plt.show(fig)
if filename != 'None':
fig.savefig(filename + '_samples1.png', bbox_inches='tight',dpi=100)
plt.close(fig)
#
# -
# # Forecast based on deterministic model
# +
result = model_forecast.simulate(S0, E0, A0, Ia0, Is0, Q0,
contactMatrix, Tf, Nt,
verbose=True,
Ns=Ns)
plot_trajectories(result,
plot_index = 2,
percentile=5,
)
plot_trajectories(result,
# filename='forecast_deterministic',
percentile=5,
)
plot_trajectories(result,
plot_index = 5,
percentile=5,
)
plot_sample_parameters(result)
# -
| examples/forecast/ex03 - SEAIRQ.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:alert]
# language: python
# name: conda-env-alert-py
# ---
# # Notebook Conf
# %load_ext autoreload
# %autoreload 2
# # Import Libraries
# +
from pathlib import Path
import pandas as pd
import re
import numpy as np
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_distances
import gensim.downloader as api
PATH_PROJ = Path.home() / 'project/intent-classification'
PATH_DATA = PATH_PROJ
# -
# # Processing Data
# ## string clean
df = pd.read_csv(PATH_DATA / 'data.csv', usecols=['Intent', 'Questions'])
df = df.rename(columns={'Intent': 'intent', 'Questions': 'query'})
df.dropna(inplace=True)
df = df.drop(df[df.intent == 'Late fee waiver for credit card'].index)
def string_clean(text):
""" Basic text cleaning """
# Remove numbers
# Remove punctuations
# Remove single character
# Stemming
text = text.lower()
text = re.sub(r'[^a-z0-9\s]', '', text)
return text
df['query'] = df['query'].apply(string_clean)
# ## tokenize
# +
# import nltk
# nltk.download('punkt')
# -
def tokenize(wd):
return ' '.join(word_tokenize(wd))
df['query'] = df['query'].apply(tokenize)
# ## lemmatizer and stopwords
# +
import spacy
nlp = spacy.load("en_core_web_sm")
from spacy.lang.en.stop_words import STOP_WORDS
stop_words = list(STOP_WORDS)
# -
df['query'] = df['query'].apply(lambda x:' '.join([token.lemma_ for token in nlp(x) if token.lemma_ not in stop_words]))
# # Modeling
# ## word2vec
word2vec = api.load("word2vec-google-news-300")
# +
# request for easicredit late fee waiver
# -
def get_sentence_vec(sentence, word2vec, idf=None):
words = sentence.split()
words = [word for word in words if word in word2vec.vocab]
if len(words) == 0:
return np.zeros((300, ), dtype='float32')
# use mean if no idf provided
if idf is None:
emb = word2vec[words].mean(axis=0)
else:
# get all idf of words
idf_series = np.array([idf.get(word, 0.0) for word in words])
# change shape to 1 x num_of_words
idf_series = idf_series.reshape(1, -1)
# use matrix multiplication to get weighted word vector sum for sentence embeddings
# print(idf_series.shape, len(words))
emb = np.matmul(idf_series, word2vec[words]).reshape(-1)
return emb
def get_sentence_centre(sentence_list, score_list ,word2vec, num_features):
emb = np.zeros((num_features, ), dtype='float32')
sentence_count = 0
for sentence, score in zip(sentence_list,score_list):
sentence_count += 1
emb = emb + get_sentence_vec(sentence, word2vec,idf = score)
return emb / sentence_count
# ## cluster leave one out
def get_cluster_centre(df, intent_list, word2vec):
result = {}
for intent in intent_list:
df_tmp = df[df.intent == intent]
sentence_centre_tmp = get_sentence_centre(df_tmp['query'].values,df_tmp['score'].values ,word2vec, 300)
result[intent] = sentence_centre_tmp
return result
def get_tfidf_for_words(text):
# get matrix use todense(), get ndarray use toarray()
tfidf_matrix= vectorizer.transform([text]).todense()
# get index for words in tfidf
feature_index = tfidf_matrix[0,:].nonzero()[1]
# create list: [(intent1, idf1), (intent2, idf2), ...]
tfidf_scores = zip([feature_names[i] for i in feature_index], [tfidf_matrix[0, x] for x in feature_index])
# return dict: {intent1: tfidf1, intent2: tfidf2, ...}
return dict(tfidf_scores)
# +
# embed_mtx = np.array([
# word2vec[w] if w in word2vec.vocab else [0]*300 for w in feature_names
# ])
# np.matmul( np.asarray(vectorizer.transform(df['query']).todense()), embed_mtx).shape
# -
intent_list = df.intent.unique().tolist()
sentence_distance = []
for ind in df.index:
sentence_distance_tmp = []
query = df.loc[ind, 'query']
df_data = df.drop(ind)
## get tf-idf score
# get a different idf score each time the query is taken out
vectorizer = TfidfVectorizer()
vectorizer.fit(df_data['query'])
# feature_names to be used to calculate word2vec.vocab coverage
feature_names = vectorizer.get_feature_names()
df_data['score'] = df['query'].apply(get_tfidf_for_words)
print(df_data.head())
# get tfidf score to be used in weighted word vector
query_score = get_tfidf_for_words(query)
sentence_centre_dic = get_cluster_centre(df_data, intent_list, word2vec)
for intent in intent_list:
sentence_distance_tmp.append(cosine_distances(get_sentence_vec(query, word2vec,query_score).reshape(1,-1),
sentence_centre_dic[intent].reshape(1,-1)).item())
sentence_distance.append(sentence_distance_tmp)
df_sentence_distance = pd.DataFrame(sentence_distance, columns=intent_list)
df_sentence_distance.head()
df.reset_index(drop=True, inplace=True)
df_result_2 = pd.concat([df, df_sentence_distance], axis=1)
df_result_2.head()
# check accuracy
df_tmp = df_result_2.copy()
df_tmp.set_index(['intent', 'query'], inplace=True)
df_tmp['cluster'] = df_tmp.idxmin(axis=1)
df_tmp.reset_index(inplace=True)
df_tmp['correct'] = (df_tmp.cluster == df_tmp.intent)
sum(df_tmp.correct) / len(df_tmp)
# output features
df_result_2.to_csv('data_leave_one_out.csv')
list(df_result_2.columns.values)
# ## check embedding coverage
# check embedding coverage
[x for x in feature_names if x not in word2vec.vocab]
print(len([x for x in feature_names if x in word2vec.vocab]) / len(feature_names))
| 03-weighted_cluster_leave_one_out_score.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# name: python3
# ---
# # Investigate the concepts available in the database (mimic-iv)
#
# The concepts are available from the mimic-code github repo.
# +
import getpass
import json
import math
import os
import psycopg2
import pandas as pd
import time
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from configobj import ConfigObj
from multiprocessing import Pool, RLock
from tqdm import tqdm
from typing import Tuple
from projects.utils import *
from projects.common import *
# +
def save_dsv(path: str, data: pd.DataFrame):
save_dir, _ = os.path.split(path)
os.makedirs(save_dir, exist_ok=True)
data.to_csv(path, na_rep='', sep='$', index=False)
def create_patient_info(dtype=int):
return {
'UID': np.array([], dtype=int),
'Value': np.array([], dtype=dtype),
}
def sort_patient_table(x: dict):
sorted_ids = np.argsort(x['UID'])
for k in x.keys():
x[k] = x[k][sorted_ids]
# +
db_dir = os.path.abspath('') + "/../../../db"
(query_schema_core,
query_schema_hosp,
query_schema_icu,
query_schema_derived,
conn) = connect_to_database(db_dir)
# -
# Table for icustays:
# ['subject_id', 'hadm_id', 'stay_id', 'first_careunit', 'last_careunit', 'intime', 'outtime', 'los']
#
# Table for transfers:
# ['subject_id', 'hadm_id', 'transfer_id', 'eventtype', 'careunit', 'intime', 'outtime']
#
# Table for patients:
# ['subject_id', 'gender', 'anchor_age', 'anchor_year', 'anchor_year_group', 'dod']
#
# Table for admissions:
# ['subject_id', 'hadm_id', 'admittime', 'dischtime', 'deathtime', 'admission_type', 'admission_location', 'discharge_location', 'insurance', 'language', 'marital_status', 'ethnicity', 'edregtime', 'edouttime', 'hospital_expire_flag']
# +
patients_df = get_database_table_as_dataframe(conn, query_schema_core, 'patients')
admissions_df = get_database_table_as_dataframe(conn, query_schema_core, 'admissions')
transfers_df = get_database_table_as_dataframe(conn, query_schema_core, 'transfers').sort_values(by=['intime', 'outtime'])
icustays_df = get_database_table_as_dataframe(conn, query_schema_icu, 'icustays').sort_values(by=['intime', 'outtime'])
assert len(patients_df.to_numpy()[:, 0]) == len(np.unique(patients_df.to_numpy()[:, 0]))
assert len(admissions_df.to_numpy()[:, 1]) == len(np.unique(admissions_df.to_numpy()[:, 1]))
assert len(icustays_df.to_numpy()[:, 2]) == len(np.unique(icustays_df.to_numpy()[:, 2]))
patients_list = patients_df['subject_id'].tolist()
admissions_list = admissions_df['hadm_id'].tolist()
# -
_CAREUNITS = ['Coronary Care Unit (CCU)',
'Cardiac Vascular Intensive Care Unit (CVICU)']
custom_icustays_list = [i[1]['stay_id']
for i in icustays_df.iterrows()
if i[1]['first_careunit'] in _CAREUNITS or i[1]['last_careunit'] in _CAREUNITS]
# # Concept 1 : icustay_detail
#
# +
_table = 'icustay_detail'
df = get_database_table_as_dataframe(conn, query_schema_derived, _table)
# +
# uid_info = {idx: name
# for idx, name in enumerate(df.columns.to_list() +
# ['first_careunit', 'last_careunit'])}
# uid_info_path = os.path.abspath('') + "/../../../" + UID_INFO_PATH
# os.remove(uid_info_path)
# with open(uid_info_path, 'w+') as f:
# json.dump(uid_info, f)
# for df_i in tqdm(df.iterrows(), total=len(df)):
# df_row = df_i[1]
# if df_row['stay_id'] in custom_icustays_list:
# info_dict = create_patient_info()
# c = 0
# for i, j in zip(uid_info, df_row):
# info_dict['UID'] = np.append(info_dict['UID'], i)
# info_dict['Value'] = np.append(info_dict['Value'], j)
# c = i
# c += 1
# info_dict['UID'] = np.append(info_dict['UID'], c)
# j = icustays_df.loc[icustays_df['stay_id'] ==
# df_row['stay_id']]['first_careunit'].item()
# info_dict['Value'] = np.append(info_dict['Value'], j)
# c += 1
# info_dict['UID'] = np.append(info_dict['UID'], c)
# j = icustays_df.loc[icustays_df['stay_id'] ==
# df_row['stay_id']]['last_careunit'].item()
# info_dict['Value'] = np.append(info_dict['Value'], j)
# save_path = os.path.join(STRUCTURED_EXPORT_DIR,
# 'info_'+str(df_i[1]['stay_id'])+'.dsv')
# save_dsv(save_path, pd.DataFrame(info_dict))
| mimic-iv/projects/notebooks/03_investigate_concept_icustay_detail.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
# [](https://colab.research.google.com/github/hpcgarage/pyboot-g2s3/blob/master/supplemental/market--soln.ipynb)
#
# # Exercises: Market-clearing prices
#
# ## (Sample solutions)
#
# In this problem, you will simulate a simple economic market of buyers and sellers who wish to trade a certain product. This problem assesses your ability to translate a procedure into efficient code using elementary Python. There are two (2) exercises worth a total of ten (10) points.
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# This notebook includes some simple plots to help you understand and debug the output. Run the cell below now, which will define the plotting routines. (You do not need to understand the code inside this cell.)
#
# > The test cells in this file compare against data that is included in the repository containing this code. If you are running in some environment (e.g., Google Colab) and need this data, it is available here: https://cse6040.gatech.edu/datasets/market.zip
# + inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
import matplotlib.pyplot as plt
# %matplotlib inline
# Adapted from: https://matplotlib.org/gallery/lines_bars_and_markers/barchart.html
def autolabel(rects, xpos='center', ax=None):
if ax is None:
ax = plt.gca()
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
'{}'.format(height), ha=ha[xpos], va='bottom')
def viz_market(market):
x = range(len(market))
y = [p for _, p in market]
is_buyer = [t == 'buyer' for t, _ in market]
colors = ['blue' if is_blue else 'gray' for is_blue in is_buyer]
def split_filter(x, f):
x_true = [xi for xi, fi in zip(x, f) if fi]
x_false = [xi for xi, fi in zip(x, f) if not fi]
return x_true, x_false
x_buyers, x_sellers = split_filter(x, is_buyer)
y_buyers, y_sellers = split_filter(y, is_buyer)
buyer_bars = plt.bar(x_buyers, y_buyers, color='blue', label='buyers')
seller_bars = plt.bar(x_sellers, y_sellers, color='lightgray', label='sellers')
plt.xlabel('Person ID')
plt.title('Price ($)')
plt.legend()
autolabel(buyer_bars)
autolabel(seller_bars)
def fn(fn_base, dirname='./market-data/'):
from os.path import isdir
assert isdir(dirname)
return '{}{}'.format(dirname, fn_base)
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# ## A simple economic market
#
# Consider the following model of an **_(economic) market_**.
#
# **Sellers and buyers.** Suppose there are $n$ people, who wish to trade some product, like coffee mugs. The people come in two types: **_sellers_**, who have a coffee mug and wish to sell it, and **_buyers_**, who do not have coffee mugs but wish to acquire one.
#
# **Product values and prices.** Each person values a coffee mug differently. That means each seller wants to sell his or her coffee mug at one price, while each buyer wants to buy at (possibly) some other price. These prices might differ from person to person, that is, different sellers want to sell at different prices, and similarly for buyers.
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# **Example.** Suppose $n=13$ people, of whom $n_s=9$ are sellers and $n_b=4$ buyers. Each seller and buyer values the product, a single coffee mug, at some price.
#
# The code cell below defines this market, which is stored in a list named `market_demo`. Each person `i` is represented by `market_demo[i] = (who_i, price_i)`, a tuple where `who_i` is either `'seller'` or `'buyer'`, and `price_i` is the value that person `i` has assigned to a coffee mug that he or she wishes to sell or buy.
#
# Run this code cell now to "create" this market.
# + inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
market_demo = [('seller', 1.5), ('seller', 2.0), ('seller', 1.0),
('seller', 1.5), ('seller', 4.5), ('seller', 1.0),
('seller', 3.5), ('seller', 3.0), ('buyer', 2.0),
('buyer', 2.0), ('buyer', 3.5), ('seller', 2.5),
('buyer', 1.0)]
print(market_demo)
plt.figure(figsize=(10, 5))
viz_market(market_demo)
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# Observe that there are 9 sellers, shown as light gray bars, and 4 buyers, shown as darker blue bars. Each bar is labeled by the corresponding person's price. For example, persons 0 and 3 wish to sell their coffee mugs for `$1.50`, whereas person 10 is willing to pay `$3.50`, while person 12 is only willing to pay `$1.00`.
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# ## The market-clearing price
#
# Now suppose buyers and sellers start trading their coffee mugs, according to the following procedure:
#
# 1. Initially, all buyers and sellers are "unmatched."
# 2. Pick any unmatched buyer with the highest price. Similarly, pick any unmatched seller with the lowest price.
# 3. A **_match_** occurs between these two **if** the buyer's price is at least as large as the seller's price. If there is a match, they exchange coffee mugs. This transaction is also called a **_trade_**. Once these two people have executed this trade, they are no longer part of the market.
# 4. Repeat this matching process (steps 2 and 3) among the remaining buyers and sellers, until no matches remain.
#
# > What if there is a tie, meaning there are multiple unmatched buyers with the same highest price or multiple unmatched sellers with the same lowest price? In this case, the buyer or seller is selected arbitrarily.
#
# For instance, go back to the `market_demo` example.
# - Initially, there are no matches.
# - The buyer with the highest price is Person 10, whose value is `$3.50`.
# - There are two sellers with the same lowest price, Persons 2 and 5, who both value coffee mugs at `$1.00`. Pick either one; let's say, Person 2. Thus, Persons 10 and 2 are "matched." They drop out of the market.
# - Among remaining buyers, both Persons 8 and 9 have the highest price, who each have a value of `$2.00`. So one of these will be matched against Person 5; let's say it's Person 8. Persons 5 and 8 are matched and drop out.
# - Then Person 9 will be matched against either Persons 0 or 3, who have the same price of `$1.50`; let's say it's Person 0.
#
# Here is what we have so far:
#
# | Buyer \$ | Seller \$ | Who? |
# |:--------:|:---------:|:------------------:|
# | 3.5 | 1.0 | $10 \Leftarrow 2$ |
# | 2.0 | 1.0 | $8 \Leftarrow 5$ |
# | 2.0 | 1.5 | $9 \Leftarrow 0$ |
# + [markdown] inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
# As it happens, that is the last possible trade in this market! Person 12 is the only unmatched buyer, but his or her value is only `$1.00`. By contrast, the next unmatched seller is Person 3 with a value of `$1.50`. Since the asking price of `$1.50` exceeds the buyer's price of `$1.00`, they cannot trade. We say the market has **reached equilibrium.**
# + [markdown] inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
# **The (market-)clearing price**. Looking at the trades, consider the highest selling price, which in the preceding example was `$1.50`. We refer to this value as the **_market-clearing price_**, or just **_clearing price_**: it is the selling price at which the market has reached an equilibrium and no further trades are possible.
#
# > There are other possible definitions of clearing price, but for this problem, please use this one.
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# ## Exercises
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# **Exercise 0** (2 points). You do not need to write any code in this exercise. However, you do need to read some code, for which you get a "almost-free" 2 points! (You **must** submit the problem to the autograder to get these two points.)
#
# To help you get started, we are giving you one function called `p, n = analyze_market(m)` that implements the procedure above. Given a market `m`, it returns two values: the clearing price `p` and the number of trades `t`. If no trades are possible at all, it returns `p=0` and `t=0` (both integers, even though `p` could be fractional in general).
#
# Read the code for `analyze_market()` and convince yourself that it implements the trading procedure described previously. As a reminder (and to save you some scrolling), here is that trading procedure, repeated verbatim:
#
# 1. Initially, all buyers and sellers are "unmatched."
# 2. Pick any unmatched buyer with the highest price. Similarly, pick any unmatched seller with the lowest price.
# 3. A **_match_** occurs between these two **if** the buyer's price is at least as large as the seller's price. If there is a match, they exchange coffee mugs. This transaction is also called a **_trade_**. Once these two people have executed this trade, they are no longer part of the market.
# 4. Repeat this matching process (steps 2 and 3) among the remaining buyers and sellers, until no matches remain.
#
# > What if there is a tie, meaning there are multiple unmatched buyers with the same highest price or multiple unmatched sellers with the same lowest price? In this case, the buyer or seller is selected arbitrarily.
# + inputHidden=false nbgrader={"grade": true, "grade_id": "ex0_freebie", "locked": true, "points": "2", "solution": false} outputHidden=false
# Test cell: `ex0_freebie` (2 points)
def analyze_market(market, verbose=False):
buy_prices = [price for who, price in market if who == 'buyer']
sell_prices = [price for who, price in market if who == 'seller']
trades = [] # Tracks trades
unmatched = buy_prices and sell_prices
while unmatched:
i_buyer = buy_prices.index(max(buy_prices))
i_seller = sell_prices.index(min(sell_prices))
if buy_prices[i_buyer] >= sell_prices[i_seller]: # A match!
trades.append((buy_prices[i_buyer], sell_prices[i_seller]))
del buy_prices[i_buyer]
del sell_prices[i_seller]
unmatched = buy_prices and sell_prices
else:
unmatched = False # Stops trading
if verbose: print(trades)
if trades:
return trades[-1][1], len(trades)
return int(0), int(0)
clearing_price_demo, num_trades_demo = analyze_market(market_demo, verbose=True)
print("The clearing price is ${:.2f}.".format(clearing_price_demo))
print("There were {} trades.".format(num_trades_demo))
print("\n(Passed!)")
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# **Creating a random market.** For the next few exercises, we'll need a function that can create a random market.
#
# The function `create_random_market(num_people, prob_buyer, max_price)` will randomly generate a market in the form of a list of tuples formatted just like `market_demo`, above. Its parameters are:
#
# - `num_people`: The number of people in the market
# - `prob_buyer`: The probability that a given person should be a buyer, rather than a seller.
# - `max_price`: The maximum value that can be assigned to any buyer or seller.
#
# Each value (price) will be an integer drawn uniformly at random from the _closed_ interval `[1, max_price]`, that is, inclusive of 1 and `max_price`.
#
# Recall that `market_demo` looked like the following (run the next cell):
# + inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
print(market_demo)
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# And here is the `create_random_market` function; as a quick demo, run it a few times.
# + inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
def random_person(prob_buyer, max_price):
from random import uniform, randrange
who = 'buyer' if uniform(0, 1) < prob_buyer else 'seller'
price = randrange(1, max_price+1)
return (who, price)
def create_random_market(num_people, prob_buyer, max_price):
return [random_person(prob_buyer, max_price) for _ in range(num_people)]
# Demo:
market_demo2 = create_random_market(8, 0.3, 10)
viz_market(market_demo2)
# + [markdown] inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
# **Exercise 1** (2 points). Next, you will implement a function that simulates market trading.
#
# In particular, you will define this function:
#
# ```python
# def simulate(prob_buyer, num_people, max_price, num_trials):
# ...
# ```
#
# One input is `num_trials`, which is the number of simulation trials to run. In each trial, your code should:
#
# - Randomly generate a market with `num_people` people, where the probability of being a buyer is `prob_buyer` and the maximum price for any buyer or seller is `max_price`, drawn uniformly at random from 1 to `max_price`, inclusive. _Hint: Use a function that appears earlier in this notebook._
# - Call `analyze_market()` to analyze that market.
# - Record the clearing price and number of trades as a tuple (pair).
# - Return all of these pairs in a list.
#
# For example,
#
# ```python
# simulate(0.5, 8, 10, 5)
# ```
#
# might return the list of pairs, `[(10, 2), (3, 3), (10, 2), (9, 1), (15, 2)]`, which has one entry per trial and 5 trials in all, and each entry is a (clearing price, number of trades) pair. (This is just an example of the format of the output; since the markets will be generated randomly, you will see different values.)
#
# There are two test cells, so you can get partial credit. The first is just a quick demo; if your simulation is on the right track, you should see that when there are many more sellers than buyers, then the clearing price is low; and in the opposite scenario, the clearing price will be high.
# + inputHidden=false outputHidden=false
def simulate(prob_buyer, num_people, max_price, num_trials):
### BEGIN SOLUTION
results = []
for _ in range(num_trials):
market = create_random_market(num_people, prob_buyer, max_price)
clearing_price, trades = analyze_market(market)
results.append((clearing_price, trades))
return results
### END SOLUTION
# + inputHidden=false nbgrader={"grade": true, "grade_id": "ex1_0_quick_run", "locked": true, "points": "1", "solution": false} outputHidden=false
# Test cell 0: `ex1_0_quick_run` (1 point)
def avg_clearing_price(sim_results):
return sum([price for price, _ in sim_results]) / len(sim_results)
def avg_trades(sim_results):
return sum([trades for _, trades in sim_results]) / len(sim_results)
def viz_counts(values, counts, norm=True, ax=None, title=''):
if ax is None: ax = plt.gca()
if norm:
s = sum(counts)
y = [c/s for c in counts]
else:
y = counts
bars = ax.bar(values, y)
ax.set_xlabel('Values')
ax.set_ylabel('Fraction ({} trials)'.format(s) if norm else 'Counts')
ax.set_title(title)
return bars
def get_counts_sorted_by_value(x, values=None):
from collections import Counter
counts_table = Counter(x)
if values is None:
pairs = sorted(counts_table.items(), key=lambda x: x[0])
values = [v for v, _ in pairs]
counts = [c for _, c in pairs]
else:
values = list(values)
counts = [counts_table.get(v, 0) for v in values]
return values, counts
NUM_PEOPLE = 100
MAX_PRICE = 10
NUM_TRIALS = 1000
CASES__AVG_BUYERS = [10, 90, 50]
all_results = []
for avg_buyers in CASES__AVG_BUYERS:
results = simulate(avg_buyers/NUM_PEOPLE, NUM_PEOPLE, MAX_PRICE, NUM_TRIALS)
print("""
==================================================
Consider a market of {} people, of whom about {} are buyers on average.
Suppose a given product is worth at most ${} to any buyer or seller.
Then, {} trials of your simulation suggests that ...
""".format(NUM_PEOPLE, avg_buyers, MAX_PRICE, NUM_TRIALS))
print("* the average clearing price is ${}; and".format(avg_clearing_price(results)))
print("* the average number of trades is {}.".format(avg_trades(results)))
all_results.append(results)
price_values, price_counts = get_counts_sorted_by_value([p for p, _ in results],
values=range(0, MAX_PRICE+1))
trade_values, trade_counts = get_counts_sorted_by_value([t for _, t in results],
values=range(0, (NUM_PEOPLE+2)//2))
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
subtitle = '\nn={} people, b={} buyers (expected), max price={}'.format(NUM_PEOPLE, avg_buyers, MAX_PRICE)
viz_counts(price_values, price_counts, ax=ax[0], title='Prices{}'.format(subtitle))
viz_counts(trade_values, trade_counts, ax=ax[1], title='Trades{}'.format(subtitle))
print("\n(Passed!)")
# + inputHidden=false nbgrader={"grade": true, "grade_id": "ex1_1_random_tests", "locked": true, "points": "1", "solution": false} outputHidden=false
# Test cell: `ex1_1_random_tests` (1 point)
def check_dist(dist, obs, title=None):
from scipy.stats import kstest, ks_2samp
from numpy import array
exp_obs = []
for v, c in zip(dist.index, dist['freq']):
exp_obs += [v] * int(c * len(obs))
D, p = ks_2samp(obs, array(exp_obs))
if title is not None: # Verbose mode
print("{}: D={}, p={}".format(title, D, p))
assert p > 0.1, "There is something fishy about the values produced by your simulation. Keep trying! (D={}, p={})".format(D, p)
def read_dist(filepath):
from pandas import read_csv
pmf = read_csv(filepath)
cdf = pmf.set_index('value')
cdf['cfreq'] = cdf['freq'].cumsum()
return cdf
def check_sim_results(avg_buyers, results, title=None):
prices_dist = read_dist(fn('prices--n{}--eb{}--p{}.csv'.format(NUM_PEOPLE, avg_buyers, MAX_PRICE)))
prices_obs = [p for p, _ in results]
check_dist(prices_dist, prices_obs, title)
trades_dist = read_dist(fn('trades--n{}--eb{}--p{}.csv'.format(NUM_PEOPLE, avg_buyers, MAX_PRICE)))
trades_obs = [t for _, t in results]
check_dist(trades_dist, trades_obs, title)
NUM_PEOPLE = 100
MAX_PRICE = 10
NUM_TRIALS = 1000
CASES__AVG_BUYERS = [10, 90, 50]
for avg_buyers in CASES__AVG_BUYERS:
results = simulate(avg_buyers/NUM_PEOPLE, NUM_PEOPLE, MAX_PRICE, NUM_TRIALS)
assert len(results) == NUM_TRIALS, "Did not return the correct number of results."
check_sim_results(avg_buyers, results, title='avg_buyers={}'.format(avg_buyers))
print("\n(Passed!)")
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# **Timing.** Let's measure how long it takes to run `analyze_market()` for a "large" market, i.e., one with many people.
# + inputHidden=false nbgrader={"grade": false, "locked": false, "solution": false} outputHidden=false
market_for_timing = create_random_market(20000, 0.5, 100)
# %timeit analyze_market(market_for_timing)
# + [markdown] inputHidden=false nbgrader={"grade": false, "locked": true, "solution": false} outputHidden=false
# **Exercise 2** (6 points). The `analyze_market()` function is slow. Come up with a more efficient implementation.
#
# There will be both a correctness test (1 point) and a timing test (5 points). The timing test below will check that your implementation is at least **50 times faster** than `analyze_market()` on an input comparable to `market_for_timing`, as generated above. There is no partial credit for anything that does not beat this threshold.
#
# > _Hint._ You may be tempted to start with the `analyze_market()` but replace lists with Numpy arrays. While that may work (we didn't try it), our sample solution handily beats the target threshold using only standard Python (no Numpy!) but with a better algorithm. In particular, rather than literally implementing the trading procedure, as `analyze_market()` does, see if you can find a different way to perform the same analysis.
# + inputHidden=false outputHidden=false
def analyze_market_faster(market, verbose=False):
### BEGIN SOLUTION
sorted_market = sorted(market, key=lambda x: x[1])
buyer_prices = [price for who, price in sorted_market if who == 'buyer']
seller_prices = [price for who, price in sorted_market if who == 'seller']
matches = [(b, s) for b, s in zip(reversed(buyer_prices), seller_prices) if b >= s]
if verbose: print(matches)
clearing_price = matches[-1][1] if matches else 0.0
return clearing_price, len(matches)
### END SOLUTION
clearing_price0, trades0 = analyze_market(market_demo, verbose=True)
clearing_price1, trades1 = analyze_market_faster(market_demo, verbose=True)
print("Baseline: The clearing price is ${} with {} trades.".format(clearing_price0, trades0))
print("Your method: The clearing price is ${} with {} trades.".format(clearing_price1, trades1))
# + nbgrader={"grade": false, "locked": false, "solution": false}
# This code cell times your method and is here for debugging purposes
# %timeit analyze_market_faster(market_for_timing)
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# The following two cells are the tests for Exercise 2 that you must pass to get points.
# + inputHidden=false nbgrader={"grade": true, "grade_id": "analyze_market_faster_correctness", "locked": true, "points": "1", "solution": false} outputHidden=false
# Test cell 0: `analyze_market_faster_correctness` (1 point)
import random
print("Checking corner cases...")
assert analyze_market([]) == analyze_market_faster([])
assert analyze_market([(1, 'buyer')]) == analyze_market_faster([(1, 'buyer')])
assert analyze_market([(1, 'seller')]) == analyze_market_faster([(1, 'seller')])
print("Checking random cases...")
for trial_num in range(1, 11): # 10 random trials
num_people_for_testing = random.randrange(1, 10)
avg_buyers_for_testing = random.randrange(num_people_for_testing+1)
prob_buyer_for_testing = avg_buyers_for_testing / num_people_for_testing
max_price_for_testing = random.randrange(5, 20)
print("Trial {}: {} people, {} buyers (expected; prob={}), max price of ${}".format(trial_num,
num_people_for_testing,
avg_buyers_for_testing,
prob_buyer_for_testing,
max_price_for_testing))
market_for_testing = create_random_market(num_people_for_testing, prob_buyer_for_testing, max_price_for_testing)
naive_result = analyze_market(market_for_testing)
your_result = analyze_market_faster(market_for_testing)
assert your_result == naive_result, "Results do not match on this market (`market_for_testing`):\n\t{}.\nYour result is {} while the baseline produces {}.".format(market_for_testing, your_result, naive_result)
print("\n(Passed!)")
# + inputHidden=false nbgrader={"grade": true, "grade_id": "analyze_market_faster_speed", "locked": true, "points": "5", "solution": false} outputHidden=false
# Test cell 1: `analyze_market_faster_speed` (5 points)
print("Checking random cases again...")
for trial_num in range(1, 11): # 10 random trials
num_people_for_testing = random.randrange(1, 10)
avg_buyers_for_testing = random.randrange(num_people_for_testing+1)
prob_buyer_for_testing = avg_buyers_for_testing / num_people_for_testing
max_price_for_testing = random.randrange(5, 20)
print("Trial {}: {} people, {} buyers (expected; prob={}), max price of ${}".format(trial_num,
num_people_for_testing,
avg_buyers_for_testing,
prob_buyer_for_testing,
max_price_for_testing))
market_for_testing = create_random_market(num_people_for_testing, prob_buyer_for_testing, max_price_for_testing)
naive_result = analyze_market(market_for_testing)
your_result = analyze_market_faster(market_for_testing)
assert your_result == naive_result, "Results do not match on this market (`market_for_testing`):\n\t{}.\nYour result is {} while the baseline produces {}.".format(market_for_testing, your_result, naive_result)
market_for_timing2 = create_random_market(20000, 0.5, 100)
print("Timing the naive method...")
# t_naive = %timeit -o analyze_market(market_for_timing2)
print("\nTiming your method...")
# t_you = %timeit -o analyze_market_faster(market_for_timing2)
speedup = t_naive.average / t_you.average
print("\nYour method is {:.1f}x faster than the baseline.".format(speedup))
assert speedup >= 50, "Sorry, not yet fast enough!"
print("\n(Passed!)")
# + [markdown] nbgrader={"grade": false, "locked": true, "solution": false}
# **Fin!** That's the end of this problem. Don't forget to try running it from top-to-bottom to make sure that it works, and then be sure to submit it and verify that it also passes the autograder.
| supplemental/market--soln.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Road Hazard Data
# cd "Stan's Datasets Notebooks"
import pandas as pd
import numpy as np
import camelot
import os
import glob
import json
import re
# +
lc_n_bridges = camelot.read_pdf('https://member.coach-net.com/members/pdfs/lowclearancebridgeinfo.pdf', pages='all')
lc_n_bridges
# -
lc_n_bridges.export("./Raw POI Factory Datasets/Low Clearances and Bridges/lowclearancebridgeinfo.csv", f='csv')
lc_n_bridges[0]
lc_n_bridges[0].df
os.remove('./Raw POI Factory Datasets/Low Clearances and Bridges/lowclearancebridgeinfo-page-1-table-1.csv')
os.chdir("./Raw POI Factory Datasets/Low Clearances and Bridges/")
extension = 'csv'
lc_n_bridges = [i for i in glob.glob('*.{}'.format(extension))]
lc_n_bridges_csv = pd.concat([pd.read_csv(f) for f in lc_n_bridges])
lc_n_bridges_csv.to_csv("lowclearanceandbridgeinfo.csv", index=False, encoding='utf-8-sig')
# +
lc_n_bridges = pd.read_csv('lowclearanceandbridgeinfo.csv')
lc_n_bridges.head()
# -
filelist = glob.glob('lowclearancebridgeinfo-page-*')
for filename in filelist:
os.remove(filename)
lc_n_bridges['Longitude'] = np.nan
lc_n_bridges['Latitude'] = np.nan
lc_n_bridges.head()
# +
lc_n_bridges = lc_n_bridges.rename(columns={'Height': 'Ft', 'Posted': 'Posted Ft', 'Height.1': 'Posted Inches'})
lc_n_bridges.head()
# -
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges[lc_n_bridges['Location'] == 'Mobile-Bankhead Tunnel']
lc_n_bridges.at[380,'Longitude'] = -88.030960
lc_n_bridges[lc_n_bridges['Location'] == 'Mobile-Bankhead Tunnel']
lc_n_bridges.at[380,'Latitude'] = 30.692998
lc_n_bridges[lc_n_bridges['Location'] == 'Mobile-Bankhead Tunnel']
# +
lc_n_bridges.at[380,'Posted Ft'] = 12.0
lc_n_bridges.at[380,'Posted Inches'] = 0.0
lc_n_bridges[lc_n_bridges['State'] == 'AL']
# +
lc_n_bridges.at[381,'Longitude'] = -87.916574
lc_n_bridges.at[381,'Latitude'] = 30.666192
lc_n_bridges[lc_n_bridges['State'] == 'AL']
# -
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': 'US 90',
'Location': 'Spanish Fort-Battleship Parkway',
'Ft': 13.0, 'Inches': 6.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -87.986697,
'Latitude': 30.678119}, ignore_index=True)
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': 'Industrial Rd',
'Location': 'Jackson-Near River Rd',
'Ft': 11.0, 'Inches': 0.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -87.904922,
'Latitude': 31.495218}, ignore_index=True)
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': 'County Rd 40',
'Location': 'Ozark - Jodie Parker Rd',
'Ft': 8.0, 'Inches': 0.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -85.659372,
'Latitude': 31.472883}, ignore_index=True)
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges.at[372,'Posted Ft'] = 10.0
lc_n_bridges.at[372,'Posted Inches'] = 0.0
lc_n_bridges.at[372,'Longitude'] = -86.627767
lc_n_bridges.at[372,'Latitude'] = 31.829544
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges.at[371, 'HWY #'] = 'County Rd 30'
lc_n_bridges.at[371, 'Location'] = 'Seale - Carver Rd/30 under Hwy 431'
lc_n_bridges.at[371,'Longitude'] = -85.178744
lc_n_bridges.at[371,'Latitude'] = 32.294212
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': '2nd Ave',
'Location': 'Phenix - S. Railroad St/2nd Ave',
'Ft': 8.0, 'Inches': 0.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -84.996882,
'Latitude': 32.477346}, ignore_index=True)
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': '4th Ave',
'Location': 'Phenix - 1803 4th Ave',
'Ft': 8.0, 'Inches': 0.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -84.998566,
'Latitude': 32.477232}, ignore_index=True)
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges.at[373,'Posted Ft'] = 12.0
lc_n_bridges.at[373,'Posted Inches'] = 6.0
lc_n_bridges.at[373,'Longitude'] = -86.207025
lc_n_bridges.at[373,'Latitude'] = 32.538458
lc_n_bridges[lc_n_bridges['State'] == 'AL']
lc_n_bridges = lc_n_bridges.append({'State': 'AL', 'HWY #': 'County Rd 18',
'Location': 'Forkland',
'Ft': 8.0, 'Inches': 0.0, 'Posted Ft': np.nan,
'Posted Inches': np.nan, 'Longitude': -87.830738,
'Latitude': 32.549558}, ignore_index=True)
lc_n_bridges[lc_n_bridges['State'] == 'AL']
# # Scraping with Selenium
# !pip install selenium
with open('st_credentials.json') as creds:
credentials = json.load(creds)
# +
import time
from selenium import webdriver
# driver = webdriver.Chrome('/home/seek/Documents/GitHub/chromedriver') # Optional argument, if not specified will search path.
# driver.get('http://www.google.com/');
# time.sleep(5) # Let the user actually see something!
# search_box = driver.find_element_by_name('q')
# search_box.send_keys('Chrome<PASSWORD>')
# search_box.submit()
# time.sleep(5) # Let the user actually see something!
# driver.quit()
# +
# driver = webdriver.Chrome()
# driver.get("http://www.python.org")
# assert "Python" in driver.title
# elem = driver.find_element_by_name("q")
# elem.clear()
# elem.send_keys("pycon")
# elem.send_keys(Keys.RETURN)
# assert "No results found." not in driver.page_source
# driver.close()
# -
# ls
# +
driver = webdriver.Chrome()
driver.get("https://www.surfacetracks.com/amember/login")
user_element = driver.find_element_by_name("amember_login")
pass_element = driver.find_element_by_name("amember_pass")
user_element.send_keys(credentials['amember_login'])
pass_element.send_keys(credentials['amember_pass'])
driver.find_element_by_xpath("//input[@value='Login']").click()
# -
driver.get('https://www.surfacetracks.com/plus/get-feature.php?id=107368')
# +
driver = webdriver.Chrome(executable_path="/Users/connorheraty/Desktop/data-science/Stan\'s\ Datasets\ Notebooks//chromedriver.exe")
# +
from selenium import webdriver
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DRIVER_BIN = os.path.join(PROJECT_ROOT, "chromedriver.exe")
browser = webdriver.Chrome(executable_path = DRIVER_BIN)
browser.get('http://www.baidu.com/')
# -
PROJECT_ROOT = os.path.abspath(os.path.dirname('chromedriver.exe'))
DRIVER_BIN = os.path.join(PROJECT_ROOT, "chromedriver.exe")
DRIVER_BIN
browser = webdriver.Chrome(executable_path = DRIVER_BIN)
| Notebooks/stan_notebooks/Road Hazards.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Template
# ## String Template Class in Python
# ```
# In String module, Template Class allows us to create simplified syntax for output specification.
#
# The format uses placeholder names formed by $ with valid Python identifiers (alphanumeric characters and underscores).
#
# Surrounding the placeholder with braces allows it to be followed by more alphanumeric letters with no intervening spaces. Writing $$ creates a single escaped $:
#
# ```
# ## Example
# A Simple Python templaye example
from string import Template
# Create a template that has placeholder for value of x
t = Template('x is $x')
# Substitute value of x in above template
print (t.substitute({'x' : 1}))
# ## Example
# +
from string import Template
def Main():
cart = []
cart.append(dict(item='Coke', price=8, qty=2 ))
t = Template('$qty x $item = $price')
total = 0
print('Cart:')
for data in cart:
print(t.substitute(data))
total += data['price']
print('Total: ' + str(total) )
if __name__ == '__main__':
Main()
| AdvanceTopics/Template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="7IwfzELSnSaj"
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
# + id="pQMOASnPnlsv"
training_data = datasets.FashionMNIST(
root='data',
train=True,
download=True,
transform=ToTensor(),
)
# + id="TdJVokj3oBxR"
test_data = datasets.FashionMNIST(
root='data',
train=False,
download=True,
transform=ToTensor()
)
# + id="MXi-FEuboNEN"
batch_size = 64
# + id="Ju2ZSHHsoTme"
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
# + id="A8A0wg8QoePY"
for X, y in train_dataloader:
print('Shape of X [N, C, H, W]:', X.shape)
print('Shape of y:', y.shape)
break
# + id="lW7Vgx7JorxT"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Device:', device)
# + id="f6YGXL5PpAXW"
class NeuralNetwork(nn.Module):
def __init__(self) -> None:
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
# + id="l_w1gG9opp2u"
model = NeuralNetwork().to(device)
print(model)
# + id="VjnYu4dYpvyA"
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
# + id="9ykogui8p_q1"
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print('loss {:7f} [{}/{}]'.format(loss, current, size))
# + id="8STlHZVIr8-R"
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print('Test error: \n Accuracy: {:.1f}, Avg loss: {:8f}'.format(100 * correct, test_loss))
# + id="oROcv-E7tGbr"
epochs = 5
for t in range(epochs):
print('Epoch {} \n----------'.format(t + 1))
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print('Done')
# + id="L1JbV2q2tF58"
torch.save(model.state_dict(), 'model.pth')
# + id="7k41m1QYt5YO"
# !ls
# + id="kfbBBQALt52S"
model_load = NeuralNetwork()
model_load.load_state_dict(torch.load('model.pth'))
# + id="FrWU6GTEuD6f"
classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model_load.eval()
x, y = test_data[1][0], test_data[1][1]
with torch.no_grad():
pred = model_load(x)
predicted, actual = classes[pred[0].argmax(0)], classes[y]
print(f'Predicted: "{predicted}", Actual: "{actual}"')
# + id="deArtuX5uS3R"
| pytorch/pytorch_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploring and Processing Data - Part 1
import pandas as pd
import numpy as np
import os
# # Import Data
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
type(train_df)
# # Basic Structure
train_df.info()
test_df.info()
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
df.info
df.head(20)
df.loc[5:10]
df.iloc[5:10]
df.loc[5:10, 3:8]
male_passengers = df.loc[df.Sex == 'male',:]
print('Number of male passengers: {0}'.format(len(male_passengers)))
# # Summary Statistics
df.describe()
# numerical feature
# Centrality measures
print('Mean fare: {0}'.format(df.Fare.mean()))
print('Medina fare: {0}'.format(df.Fare.median()))
# dispersion measures
print('Min fare: {0}'.format(df.Fare.min())) # minimum fare
print('Max fare: {0}'.format(df.Fare.max())) # maximum fare
print('Fare range: {0}'.format(df.Fare.max() - df.Fare.min())) # Fare Range
print('25th Percentile: {0}'.format(df.Fare.quantile(.25))) # 25 percentile
print('50th Percentile: {0}'.format(df.Fare.quantile(.50))) # 50 percentile
print('75th Percentile: {0}'.format(df.Fare.quantile(.75))) # 50 percentile
print('Variance: {0}'.format(df.Fare.var())) # variance
print('Standard Devation: {0}'.format(df.Fare.std()))
df.Fare.plot(kind='box')
df.describe(include='all')
df.Sex.value_counts()
df.Sex.value_counts(normalize=True)*100 # Procentuell
df[df.Survived != -888].Survived.value_counts()
df.Pclass.value_counts()
df.Pclass.value_counts().plot(kind="bar")
df.Pclass.value_counts().plot(kind="bar", rot = 0, title='Class wise passenger count', color='c');
# # Distributions
df.Age.plot(kind='hist', title='histogram for Age', color='c');
df.Age.plot(kind='hist', title='histogram for Age', color='c', bins=20);
df.Age.plot(kind='kde', title='Density plot for age', color='c');
df.Fare.plot(kind='hist', title='histogram for Fare', color='c', bins=20);
print('skewness for age : {0:.2f}'.format(df.Age.skew()))
print('skewness for Fare : {0:.2f}'.format(df.Fare.skew()))
df.plot.scatter(x='Age', y='Fare', title='Scatter plot : Age vs Fare');
df.plot.scatter(x='Age', y='Fare', title='Scatter plot : Age vs Fare', alpha=0.2);
df.plot.scatter(x='Pclass', y='Fare', title='Scatter plot : Passenger class vs Fare', alpha=0.15);
# # Grouping & Aggregations
df.groupby('Sex').Age.median()
df.groupby('Pclass').Fare.median()
df.groupby('Pclass').Age.median()
df.groupby(['Pclass'])['Fare', 'Age'].median()
df.groupby(['Pclass']).agg({'Fare' : 'mean', 'Age': 'median'})
aggregations = {
'Fare': [
('mean_Fare', 'mean'),
('median_Fare', 'median'),
('max_Fare', max),
('min_Fare', min)
],
'Age': [
('median_Age', 'median'),
('min_Age', min),
('max_Age', max),
('range_Age', lambda x: max(x) - min(x))
]
}
df.groupby('Pclass').agg(aggregations)
df.groupby(['Pclass','Embarked']).agg(aggregations)
pd.crosstab(df.Sex,df.Pclass)
df.pivot_table(index='Sex', columns='Pclass', values='Age', aggfunc='mean')
df.groupby(['Sex', 'Pclass']).Age.mean()
df.groupby(['Sex', 'Pclass']).Age.mean().unstack()#same as pivot table
# # Data Munging : Working with missing values
df.info()
# ## Feature : Embarked
df[df.Embarked.isnull()]
df.Embarked.value_counts()
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Embarked)
df[df.Embarked.isnull()]
df.groupby(['Pclass', 'Embarked']).Fare.median()
df.loc[62, 'Embarked'] = 'C'
df.loc[62]
df.loc[830]
df.loc[830, 'Embarked'] = 'C'
df.info()
# ## Feature Fare
df[df.Fare.isnull()]
median_fare = df.loc[(df.Pclass == 3) & (df.Embarked == 'S'), 'Fare'].median()
print(median_fare)
df.Fare.fillna(median_fare, inplace=True)
df.info()
# # Feature : Age
df[df.Age.isnull()]
df.Age.plot(kind='hist', bins=20, color='c')
df.Age.mean()
df.groupby('Sex').Age.median()
df[df.Age.notnull()].boxplot('Age', 'Sex');
# Function to extract the title from the name
def GetTitle(name):
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title
# use map function to apply the function on each Name value row i
df.Name.map(lambda x : GetTitle(x)) # alternatively you can use : df.Name.map(GetTitle)
df.Name.map(lambda x : GetTitle(x)).unique()
# Function to extract the title from the name
def GetTitle(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
df['Title'] = df.Name.map(lambda x: GetTitle(x))
df.head()
df[df.Age.notnull()].boxplot('Age', 'Title');
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
df.info()
# # Working with Outliers
# use histogram to get understand the distribution
df.Age.plot(kind='hist', bins=20, color='c');
df.loc[df.Age > 70]
# ## Fare
# histogram for fare
df.Fare.plot(kind='hist', title='histogram for Fare', bins=20, color='c');
# box plot to indentify outliers
df.Fare.plot(kind='box');
# look into the outliers
df.loc[df.Fare == df.Fare.max()]
LogFare = np.log(df.Fare + 1.0)
LogFare.plot(kind='hist', color='c', bins=20);
# binning
pd.qcut(df.Fare, 4)
pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']) #discretization
pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']).value_counts().plot(kind='bar', color='c', rot=0);
df['Fare_bin'] = pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high'])
df.head()
# # Feature Engineering
# ## Feature : Age State ( Adult or Child)
df['AgeState'] = np.where(df.Age >= 18, 'Adult', 'Child')
df.AgeState.value_counts()
# ## Feature : Family Size
df['FamilySize'] = df.Parch + df.SibSp + 1
df['FamilySize'].plot(kind='hist', color='c');
# further explore this family with max family members
df.loc[df.FamilySize == df.FamilySize.max()]
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].FamilySize)
# ## Feature : isMother
# a lady aged more thana 18 who has Parch >0 and is married (not Miss)
df['IsMother'] = np.where(((df.Sex == 'female') & (df.Parch > 0) & (df.Age > 18) & (df.Title != 'Miss')), 1, 0)
df.loc[df.IsMother == 1]
# Crosstab with IsMother
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].IsMother)
# ## Deck
df.Cabin
df.Cabin.unique()
df.loc[df.Cabin == 'T']
df.loc[df.Cabin == 'T', 'Cabin'] = np.NaN
df.Cabin.unique()
# extract first character of Cabin string to the deck
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z')
df['Deck'] = df['Cabin'].map(lambda x : get_deck(x))
# check counts
df.Deck.value_counts()
# use crosstab to look into survived feature cabin wise
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Deck)
# info command
df.info()
# sex
df['IsMale'] = np.where(df.Sex == 'male', 1, 0)
# columns Deck, Pclass, Title, AgeState
df = pd.get_dummies(df,columns=['Deck', 'Pclass','Title', 'Fare_bin', 'Embarked','AgeState'])
df.info()
# drop columns
df.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1, inplace=True)
# reorder columns
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
df.info()
df.head()
# ## Save Processed Dataset
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df.loc[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df.loc[df.Survived == -888, columns].to_csv(write_test_path)
# # Building the data processing script
get_processed_data_script_file = os.path.join(os.path.pardir,'src','data','get_processed_data.py')
# +
# %%writefile $get_processed_data_script_file
import numpy as np
import pandas as pd
import os
def read_data():
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read the data with all default parameters
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
return df
def process_data(df):
# using the method chaining concept
return (df
# create title attribute - then add this
.assign(Title = lambda x: x.Name.map(get_title))
# working missing values - start with this
.pipe(fill_missing_values)
# create fare bin feature
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low','low','high','very_high']))
# create age state
.assign(AgeState = lambda x : np.where(x.Age >= 18, 'Adult','Child'))
.assign(FamilySize = lambda x : x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss')), 1, 0))
# create deck feature
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x : x.Cabin.map(get_deck))
# feature encoding
.assign(IsMale = lambda x : np.where(x.Sex == 'male', 1,0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass','Title', 'Fare_Bin', 'Embarked','AgeState'])
# add code to drop unnecessary columns
.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1)
# reorder columns
.pipe(reorder_columns)
)
def get_title(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z')
def fill_missing_values(df):
# embarked
df.Embarked.fillna('C', inplace=True)
# fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
# age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median , inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df[df.Survived == -888][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df)
# -
# !python $get_processed_data_script_file
train_df = pd.read_csv(write_train_path)
train_df.info()
test_df = pd.read_csv(write_test_path)
test_df.info()
# ## Advanced visualization using MatPlotLib
import matplotlib.pyplot as plt
# %matplotlib inline
plt.hist(df.Age);
plt.hist(df.Age, bins=20, color='c')
plt.show()
plt.hist(df.Age, bins=20, color='c')
plt.title('Histogram : Age')
plt.xlabel('Bins')
plt.ylabel('Counts')
plt.show()
f , ax = plt.subplots()
ax.hist(df.Age, bins=20, color='c')
ax.set_title('Histogram : Age')
ax.set_xlabel('Bins')
ax.set_ylabel('Counts')
plt.show()
# +
# Add subplots
f , (ax1, ax2) = plt.subplots(1, 2 , figsize=(14,3))
ax1.hist(df.Fare, bins=20, color='c')
ax1.set_title('Histogram : Fare')
ax1.set_xlabel('Bins')
ax1.set_ylabel('Counts')
ax2.hist(df.Age, bins=20, color='tomato')
ax2.set_title('Histogram : Age')
ax2.set_xlabel('Bins')
ax2.set_ylabel('Counts')
plt.show()
# +
# Adding subplots
f , ax_arr = plt.subplots(3 , 2 , figsize=(14,7))
# Plot 1
ax_arr[0,0].hist(df.Fare, bins=20, color='c')
ax_arr[0,0].set_title('Histogram : Fare')
ax_arr[0,0].set_xlabel('Bins')
ax_arr[0,0].set_ylabel('Counts')
# Plot 2
ax_arr[0,1].hist(df.Age, bins=20, color='c')
ax_arr[0,1].set_title('Histogram : Age')
ax_arr[0,1].set_xlabel('Bins')
ax_arr[0,1].set_ylabel('Counts')
# Plot 3
ax_arr[1,0].boxplot(df.Fare.values)
ax_arr[1,0].set_title('Boxplot : Age')
ax_arr[1,0].set_xlabel('Fare')
ax_arr[1,0].set_ylabel('Fare')
# Plot 4
ax_arr[1,1].boxplot(df.Age.values)
ax_arr[1,1].set_title('Boxplot : Age')
ax_arr[1,1].set_xlabel('Age')
ax_arr[1,1].set_ylabel('Age')
# Plot 5
ax_arr[2,0].scatter(df.Age, df.Fare, color='c', alpha=0.15)
ax_arr[2,0].set_title('Scatter Plot : Age vs Fare')
ax_arr[2,0].set_xlabel('Age')
ax_arr[2,0].set_ylabel('Fare')
ax_arr[2, 1].axis('off') # add it later - 2
plt.tight_layout() # add it later - 1
plt.show()
# -
| notebooks/2.0-ps-exploring-and-processing-data.ipynb |